Lines Matching +full:reserved +full:- +full:memory
1 // SPDX-License-Identifier: GPL-2.0-or-later
51 * A simple test that tries to allocate a memory region within min_addr and
55 * | + +-----------+ |
57 * +----+-------+-----------+------+
66 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_simple_check()
82 rgn_end = rgn->base + rgn->size; in alloc_nid_top_down_simple_check()
87 ASSERT_EQ(rgn->size, size); in alloc_nid_top_down_simple_check()
88 ASSERT_EQ(rgn->base, max_addr - size); in alloc_nid_top_down_simple_check()
91 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_top_down_simple_check()
92 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_top_down_simple_check()
100 * A simple test that tries to allocate a memory region within min_addr and
104 * | + +---------+ + |
106 * +------+-------+---------+--+----+
118 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_end_misaligned_check()
135 rgn_end = rgn->base + rgn->size; in alloc_nid_top_down_end_misaligned_check()
140 ASSERT_EQ(rgn->size, size); in alloc_nid_top_down_end_misaligned_check()
141 ASSERT_EQ(rgn->base, max_addr - size - misalign); in alloc_nid_top_down_end_misaligned_check()
144 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_top_down_end_misaligned_check()
145 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_top_down_end_misaligned_check()
153 * A simple test that tries to allocate a memory region, which spans over the
157 * | +---------------+ |
159 * +------+---------------+-------+
169 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_exact_address_generic_check()
185 rgn_end = rgn->base + rgn->size; in alloc_nid_exact_address_generic_check()
190 ASSERT_EQ(rgn->size, size); in alloc_nid_exact_address_generic_check()
191 ASSERT_EQ(rgn->base, min_addr); in alloc_nid_exact_address_generic_check()
194 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_exact_address_generic_check()
195 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_exact_address_generic_check()
203 * A test that tries to allocate a memory region, which can't fit into
207 * | +----------+-----+ |
209 * +--------+----------+-----+----+
216 * Expect to drop the lower limit and allocate a memory region which
221 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_narrow_range_check()
240 ASSERT_EQ(rgn->size, size); in alloc_nid_top_down_narrow_range_check()
241 ASSERT_EQ(rgn->base, max_addr - size); in alloc_nid_top_down_narrow_range_check()
243 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_top_down_narrow_range_check()
244 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_top_down_narrow_range_check()
252 * A test that tries to allocate a memory region, which can't fit into
254 * of the available memory:
256 * +-------------+
258 * +-------------+
262 * +-------+--------------+
296 * A test that tries to allocate a memory region within min_addr min_addr range,
300 * | +--------+---------------|
302 * +-------+--------+---------------+
311 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_min_reserved_generic_check()
324 min_addr = max_addr - r2_size; in alloc_nid_min_reserved_generic_check()
325 reserved_base = min_addr - r1_size; in alloc_nid_min_reserved_generic_check()
336 ASSERT_EQ(rgn->size, total_size); in alloc_nid_min_reserved_generic_check()
337 ASSERT_EQ(rgn->base, reserved_base); in alloc_nid_min_reserved_generic_check()
339 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_min_reserved_generic_check()
340 ASSERT_EQ(memblock.reserved.total_size, total_size); in alloc_nid_min_reserved_generic_check()
348 * A test that tries to allocate a memory region within min_addr and max_addr,
352 * | +-------------+--------|
354 * +----------+-------------+--------+
363 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_max_reserved_generic_check()
374 max_addr = memblock_end_of_DRAM() - r1_size; in alloc_nid_max_reserved_generic_check()
375 min_addr = max_addr - r2_size; in alloc_nid_max_reserved_generic_check()
386 ASSERT_EQ(rgn->size, total_size); in alloc_nid_max_reserved_generic_check()
387 ASSERT_EQ(rgn->base, min_addr); in alloc_nid_max_reserved_generic_check()
389 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_max_reserved_generic_check()
390 ASSERT_EQ(memblock.reserved.total_size, total_size); in alloc_nid_max_reserved_generic_check()
398 * A test that tries to allocate memory within min_addr and max_add range, when
399 * there are two reserved regions at the borders, with a gap big enough to fit
403 * | +--------+ +-------+------+ |
405 * +----+--------+---+-------+------+--+
416 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_nid_top_down_reserved_with_space_check()
417 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; in alloc_nid_top_down_reserved_with_space_check()
429 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; in alloc_nid_top_down_reserved_with_space_check()
433 r2.base = r1.base - (r3_size + gap_size + r2.size); in alloc_nid_top_down_reserved_with_space_check()
449 ASSERT_EQ(rgn1->size, r1.size + r3_size); in alloc_nid_top_down_reserved_with_space_check()
450 ASSERT_EQ(rgn1->base, max_addr - r3_size); in alloc_nid_top_down_reserved_with_space_check()
452 ASSERT_EQ(rgn2->size, r2.size); in alloc_nid_top_down_reserved_with_space_check()
453 ASSERT_EQ(rgn2->base, r2.base); in alloc_nid_top_down_reserved_with_space_check()
455 ASSERT_EQ(memblock.reserved.cnt, 2); in alloc_nid_top_down_reserved_with_space_check()
456 ASSERT_EQ(memblock.reserved.total_size, total_size); in alloc_nid_top_down_reserved_with_space_check()
464 * A test that tries to allocate memory within min_addr and max_add range, when
465 * there are two reserved regions at the borders, with a gap of a size equal to
469 * | +--------+--------+--------+ |
471 * +-----+--------+--------+--------+-----+
481 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_reserved_full_merge_generic_check()
492 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; in alloc_nid_reserved_full_merge_generic_check()
496 r2.base = r1.base - (r3_size + r2.size); in alloc_nid_reserved_full_merge_generic_check()
512 ASSERT_EQ(rgn->size, total_size); in alloc_nid_reserved_full_merge_generic_check()
513 ASSERT_EQ(rgn->base, r2.base); in alloc_nid_reserved_full_merge_generic_check()
515 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_reserved_full_merge_generic_check()
516 ASSERT_EQ(memblock.reserved.total_size, total_size); in alloc_nid_reserved_full_merge_generic_check()
524 * A test that tries to allocate memory within min_addr and max_add range, when
525 * there are two reserved regions at the borders, with a gap that can't fit
529 * | +----------+------+ +------+ |
531 * +--+----------+------+----+------+---+
543 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_nid_top_down_reserved_no_space_check()
544 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; in alloc_nid_top_down_reserved_no_space_check()
556 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; in alloc_nid_top_down_reserved_no_space_check()
560 r2.base = r1.base - (r2.size + gap_size); in alloc_nid_top_down_reserved_no_space_check()
576 ASSERT_EQ(rgn1->size, r1.size); in alloc_nid_top_down_reserved_no_space_check()
577 ASSERT_EQ(rgn1->base, r1.base); in alloc_nid_top_down_reserved_no_space_check()
579 ASSERT_EQ(rgn2->size, r2.size + r3_size); in alloc_nid_top_down_reserved_no_space_check()
580 ASSERT_EQ(rgn2->base, r2.base - r3_size); in alloc_nid_top_down_reserved_no_space_check()
582 ASSERT_EQ(memblock.reserved.cnt, 2); in alloc_nid_top_down_reserved_no_space_check()
583 ASSERT_EQ(memblock.reserved.total_size, total_size); in alloc_nid_top_down_reserved_no_space_check()
591 * A test that tries to allocate memory within min_addr and max_add range, but
592 * it's too narrow and everything else is reserved:
594 * +-----------+
596 * +-----------+
598 * |--------------+ +----------|
600 * +--------------+------+----------+
622 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES; in alloc_nid_reserved_all_generic_check()
625 r2.size = MEM_SIZE - (r1.size + gap_size); in alloc_nid_reserved_all_generic_check()
646 * A test that tries to allocate a memory region, where max_addr is
647 * bigger than the end address of the available memory. Expect to allocate
648 * a region that ends before the end of the memory.
652 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_cap_max_check()
661 min_addr = memblock_end_of_DRAM() - SZ_1K; in alloc_nid_top_down_cap_max_check()
671 ASSERT_EQ(rgn->size, size); in alloc_nid_top_down_cap_max_check()
672 ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size); in alloc_nid_top_down_cap_max_check()
674 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_top_down_cap_max_check()
675 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_top_down_cap_max_check()
683 * A test that tries to allocate a memory region, where min_addr is
684 * smaller than the start address of the available memory. Expect to allocate
685 * a region that ends before the end of the memory.
689 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_cap_min_check()
698 min_addr = memblock_start_of_DRAM() - SZ_256; in alloc_nid_top_down_cap_min_check()
708 ASSERT_EQ(rgn->size, size); in alloc_nid_top_down_cap_min_check()
709 ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size); in alloc_nid_top_down_cap_min_check()
711 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_top_down_cap_min_check()
712 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_top_down_cap_min_check()
720 * A simple test that tries to allocate a memory region within min_addr and
724 * | +-----------+ | |
726 * +----+-----------+-----------+------+
735 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_simple_check()
751 rgn_end = rgn->base + rgn->size; in alloc_nid_bottom_up_simple_check()
756 ASSERT_EQ(rgn->size, size); in alloc_nid_bottom_up_simple_check()
757 ASSERT_EQ(rgn->base, min_addr); in alloc_nid_bottom_up_simple_check()
760 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_bottom_up_simple_check()
761 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_bottom_up_simple_check()
769 * A simple test that tries to allocate a memory region within min_addr and
773 * | + +-----------+ + |
775 * +-----+---+-----------+-----+-----+
776 * ^ ^----. ^
787 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_start_misaligned_check()
804 rgn_end = rgn->base + rgn->size; in alloc_nid_bottom_up_start_misaligned_check()
809 ASSERT_EQ(rgn->size, size); in alloc_nid_bottom_up_start_misaligned_check()
810 ASSERT_EQ(rgn->base, min_addr + (SMP_CACHE_BYTES - misalign)); in alloc_nid_bottom_up_start_misaligned_check()
813 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_bottom_up_start_misaligned_check()
814 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_bottom_up_start_misaligned_check()
822 * A test that tries to allocate a memory region, which can't fit into min_addr
826 * |---------+ + + |
828 * +---------+---------+----+------+
835 * Expect to drop the lower limit and allocate a memory region which
836 * starts at the beginning of the available memory.
840 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_narrow_range_check()
859 ASSERT_EQ(rgn->size, size); in alloc_nid_bottom_up_narrow_range_check()
860 ASSERT_EQ(rgn->base, memblock_start_of_DRAM()); in alloc_nid_bottom_up_narrow_range_check()
862 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_bottom_up_narrow_range_check()
863 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_bottom_up_narrow_range_check()
871 * A test that tries to allocate memory within min_addr and max_add range, when
872 * there are two reserved regions at the borders, with a gap big enough to fit
876 * | +--------+-------+ +------+ |
878 * +----+--------+-------+---+------+--+
889 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_nid_bottom_up_reserved_with_space_check()
890 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_reserved_with_space_check()
902 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; in alloc_nid_bottom_up_reserved_with_space_check()
906 r2.base = r1.base - (r3_size + gap_size + r2.size); in alloc_nid_bottom_up_reserved_with_space_check()
922 ASSERT_EQ(rgn1->size, r1.size); in alloc_nid_bottom_up_reserved_with_space_check()
923 ASSERT_EQ(rgn1->base, max_addr); in alloc_nid_bottom_up_reserved_with_space_check()
925 ASSERT_EQ(rgn2->size, r2.size + r3_size); in alloc_nid_bottom_up_reserved_with_space_check()
926 ASSERT_EQ(rgn2->base, r2.base); in alloc_nid_bottom_up_reserved_with_space_check()
928 ASSERT_EQ(memblock.reserved.cnt, 2); in alloc_nid_bottom_up_reserved_with_space_check()
929 ASSERT_EQ(memblock.reserved.total_size, total_size); in alloc_nid_bottom_up_reserved_with_space_check()
937 * A test that tries to allocate memory within min_addr and max_add range, when
938 * there are two reserved regions at the borders, with a gap of a size equal to
942 * |----------+ +------+ +----+ |
944 * +----------+----+------+---+----+--+
951 * Expect to drop the lower limit and allocate memory at the beginning of the
952 * available memory. The region counter and total size fields get updated.
958 struct memblock_region *rgn1 = &memblock.reserved.regions[2]; in alloc_nid_bottom_up_reserved_no_space_check()
959 struct memblock_region *rgn2 = &memblock.reserved.regions[1]; in alloc_nid_bottom_up_reserved_no_space_check()
960 struct memblock_region *rgn3 = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_reserved_no_space_check()
972 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; in alloc_nid_bottom_up_reserved_no_space_check()
976 r2.base = r1.base - (r2.size + gap_size); in alloc_nid_bottom_up_reserved_no_space_check()
992 ASSERT_EQ(rgn3->size, r3_size); in alloc_nid_bottom_up_reserved_no_space_check()
993 ASSERT_EQ(rgn3->base, memblock_start_of_DRAM()); in alloc_nid_bottom_up_reserved_no_space_check()
995 ASSERT_EQ(rgn2->size, r2.size); in alloc_nid_bottom_up_reserved_no_space_check()
996 ASSERT_EQ(rgn2->base, r2.base); in alloc_nid_bottom_up_reserved_no_space_check()
998 ASSERT_EQ(rgn1->size, r1.size); in alloc_nid_bottom_up_reserved_no_space_check()
999 ASSERT_EQ(rgn1->base, r1.base); in alloc_nid_bottom_up_reserved_no_space_check()
1001 ASSERT_EQ(memblock.reserved.cnt, 3); in alloc_nid_bottom_up_reserved_no_space_check()
1002 ASSERT_EQ(memblock.reserved.total_size, total_size); in alloc_nid_bottom_up_reserved_no_space_check()
1010 * A test that tries to allocate a memory region, where max_addr is
1011 * bigger than the end address of the available memory. Expect to allocate
1016 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_cap_max_check()
1035 ASSERT_EQ(rgn->size, size); in alloc_nid_bottom_up_cap_max_check()
1036 ASSERT_EQ(rgn->base, min_addr); in alloc_nid_bottom_up_cap_max_check()
1038 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_bottom_up_cap_max_check()
1039 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_bottom_up_cap_max_check()
1047 * A test that tries to allocate a memory region, where min_addr is
1048 * smaller than the start address of the available memory. Expect to allocate
1049 * a region at the beginning of the available memory.
1053 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_cap_min_check()
1063 max_addr = memblock_end_of_DRAM() - SZ_256; in alloc_nid_bottom_up_cap_min_check()
1072 ASSERT_EQ(rgn->size, size); in alloc_nid_bottom_up_cap_min_check()
1073 ASSERT_EQ(rgn->base, memblock_start_of_DRAM()); in alloc_nid_bottom_up_cap_min_check()
1075 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_bottom_up_cap_min_check()
1076 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_bottom_up_cap_min_check()
1239 * A test that tries to allocate a memory region in a specific NUMA node that
1240 * has enough memory to allocate a region of the requested size.
1246 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_numa_simple_check()
1247 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_top_down_numa_simple_check()
1256 ASSERT_LE(SZ_4, req_node->size); in alloc_nid_top_down_numa_simple_check()
1257 size = req_node->size / SZ_4; in alloc_nid_top_down_numa_simple_check()
1267 ASSERT_EQ(new_rgn->size, size); in alloc_nid_top_down_numa_simple_check()
1268 ASSERT_EQ(new_rgn->base, region_end(req_node) - size); in alloc_nid_top_down_numa_simple_check()
1269 ASSERT_LE(req_node->base, new_rgn->base); in alloc_nid_top_down_numa_simple_check()
1271 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_top_down_numa_simple_check()
1272 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_top_down_numa_simple_check()
1280 * A test that tries to allocate a memory region in a specific NUMA node that
1281 * does not have enough memory to allocate a region of the requested size:
1283 * | +-----+ +------------------+ |
1285 * +---+-----+----------+------------------+-----+
1287 * | +---------+ |
1289 * +-----------------------------+---------+-----+
1292 * enough memory (in this case, nid = 6) after falling back to NUMA_NO_NODE.
1298 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_numa_small_node_check()
1299 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_top_down_numa_small_node_check()
1300 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; in alloc_nid_top_down_numa_small_node_check()
1309 size = SZ_2 * req_node->size; in alloc_nid_top_down_numa_small_node_check()
1319 ASSERT_EQ(new_rgn->size, size); in alloc_nid_top_down_numa_small_node_check()
1320 ASSERT_EQ(new_rgn->base, region_end(exp_node) - size); in alloc_nid_top_down_numa_small_node_check()
1321 ASSERT_LE(exp_node->base, new_rgn->base); in alloc_nid_top_down_numa_small_node_check()
1323 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_top_down_numa_small_node_check()
1324 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_top_down_numa_small_node_check()
1332 * A test that tries to allocate a memory region in a specific NUMA node that
1333 * is fully reserved:
1335 * | +---------+ +------------------+ |
1337 * +--------------+---------+------------+------------------+-----+
1339 * | +---------+ +---------+ |
1340 * | | reserved| | new | |
1341 * +--------------+---------+---------------------+---------+-----+
1344 * large enough and has enough unreserved memory (in this case, nid = 6) after
1351 struct memblock_region *new_rgn = &memblock.reserved.regions[1]; in alloc_nid_top_down_numa_node_reserved_check()
1352 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_top_down_numa_node_reserved_check()
1353 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; in alloc_nid_top_down_numa_node_reserved_check()
1362 size = req_node->size; in alloc_nid_top_down_numa_node_reserved_check()
1366 memblock_reserve(req_node->base, req_node->size); in alloc_nid_top_down_numa_node_reserved_check()
1373 ASSERT_EQ(new_rgn->size, size); in alloc_nid_top_down_numa_node_reserved_check()
1374 ASSERT_EQ(new_rgn->base, region_end(exp_node) - size); in alloc_nid_top_down_numa_node_reserved_check()
1375 ASSERT_LE(exp_node->base, new_rgn->base); in alloc_nid_top_down_numa_node_reserved_check()
1377 ASSERT_EQ(memblock.reserved.cnt, 2); in alloc_nid_top_down_numa_node_reserved_check()
1378 ASSERT_EQ(memblock.reserved.total_size, size + req_node->size); in alloc_nid_top_down_numa_node_reserved_check()
1386 * A test that tries to allocate a memory region in a specific NUMA node that
1387 * is partially reserved but has enough memory for the allocated region:
1389 * | +---------------------------------------+ |
1391 * +-----------+---------------------------------------+----------+
1393 * | +------------------+ +-----+ |
1394 * | | reserved | | new | |
1395 * +-----------+------------------+--------------+-----+----------+
1403 struct memblock_region *new_rgn = &memblock.reserved.regions[1]; in alloc_nid_top_down_numa_part_reserved_check()
1404 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_top_down_numa_part_reserved_check()
1414 ASSERT_LE(SZ_8, req_node->size); in alloc_nid_top_down_numa_part_reserved_check()
1415 r1.base = req_node->base; in alloc_nid_top_down_numa_part_reserved_check()
1416 r1.size = req_node->size / SZ_2; in alloc_nid_top_down_numa_part_reserved_check()
1428 ASSERT_EQ(new_rgn->size, size); in alloc_nid_top_down_numa_part_reserved_check()
1429 ASSERT_EQ(new_rgn->base, region_end(req_node) - size); in alloc_nid_top_down_numa_part_reserved_check()
1430 ASSERT_LE(req_node->base, new_rgn->base); in alloc_nid_top_down_numa_part_reserved_check()
1432 ASSERT_EQ(memblock.reserved.cnt, 2); in alloc_nid_top_down_numa_part_reserved_check()
1433 ASSERT_EQ(memblock.reserved.total_size, size + r1.size); in alloc_nid_top_down_numa_part_reserved_check()
1441 * A test that tries to allocate a memory region in a specific NUMA node that
1442 * is partially reserved and does not have enough contiguous memory for the
1445 * | +-----------------------+ +----------------------|
1447 * +-----------+-----------------------+---------+----------------------+
1449 * | +----------+ +-----------|
1450 * | | reserved | | new |
1451 * +-----------------+----------+---------------------------+-----------+
1454 * large enough and has enough unreserved memory (in this case,
1455 * nid = NUMA_NODES - 1) after falling back to NUMA_NO_NODE. The region count
1461 int nid_exp = NUMA_NODES - 1; in alloc_nid_top_down_numa_part_reserved_fallback_check()
1462 struct memblock_region *new_rgn = &memblock.reserved.regions[1]; in alloc_nid_top_down_numa_part_reserved_fallback_check()
1463 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_top_down_numa_part_reserved_fallback_check()
1464 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; in alloc_nid_top_down_numa_part_reserved_fallback_check()
1474 ASSERT_LE(SZ_4, req_node->size); in alloc_nid_top_down_numa_part_reserved_fallback_check()
1475 size = req_node->size / SZ_2; in alloc_nid_top_down_numa_part_reserved_fallback_check()
1476 r1.base = req_node->base + (size / SZ_2); in alloc_nid_top_down_numa_part_reserved_fallback_check()
1489 ASSERT_EQ(new_rgn->size, size); in alloc_nid_top_down_numa_part_reserved_fallback_check()
1490 ASSERT_EQ(new_rgn->base, region_end(exp_node) - size); in alloc_nid_top_down_numa_part_reserved_fallback_check()
1491 ASSERT_LE(exp_node->base, new_rgn->base); in alloc_nid_top_down_numa_part_reserved_fallback_check()
1493 ASSERT_EQ(memblock.reserved.cnt, 2); in alloc_nid_top_down_numa_part_reserved_fallback_check()
1494 ASSERT_EQ(memblock.reserved.total_size, size + r1.size); in alloc_nid_top_down_numa_part_reserved_fallback_check()
1502 * A test that tries to allocate a memory region that spans over the min_addr
1510 * | +-----------------------+-----------+ |
1512 * +-----------+-----------------------+-----------+--------------+
1514 * | +-----------+ |
1516 * +-----------------------+-----------+--------------------------+
1518 * Expect to drop the lower limit and allocate a memory region that ends at
1524 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_numa_split_range_low_check()
1525 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_top_down_numa_split_range_low_check()
1536 min_addr = req_node_end - SZ_256; in alloc_nid_top_down_numa_split_range_low_check()
1545 ASSERT_EQ(new_rgn->size, size); in alloc_nid_top_down_numa_split_range_low_check()
1546 ASSERT_EQ(new_rgn->base, req_node_end - size); in alloc_nid_top_down_numa_split_range_low_check()
1547 ASSERT_LE(req_node->base, new_rgn->base); in alloc_nid_top_down_numa_split_range_low_check()
1549 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_top_down_numa_split_range_low_check()
1550 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_top_down_numa_split_range_low_check()
1558 * A test that tries to allocate a memory region that spans over the min_addr
1566 * | +--------------------------+---------+ |
1568 * +------+--------------------------+---------+----------------+
1570 * | +---------+ |
1572 * +-----------------------+---------+--------------------------+
1574 * Expect to drop the lower limit and allocate a memory region that
1580 int nid_exp = nid_req - 1; in alloc_nid_top_down_numa_split_range_high_check()
1581 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_numa_split_range_high_check()
1582 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; in alloc_nid_top_down_numa_split_range_high_check()
1593 min_addr = exp_node_end - SZ_256; in alloc_nid_top_down_numa_split_range_high_check()
1602 ASSERT_EQ(new_rgn->size, size); in alloc_nid_top_down_numa_split_range_high_check()
1603 ASSERT_EQ(new_rgn->base, exp_node_end - size); in alloc_nid_top_down_numa_split_range_high_check()
1604 ASSERT_LE(exp_node->base, new_rgn->base); in alloc_nid_top_down_numa_split_range_high_check()
1606 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_top_down_numa_split_range_high_check()
1607 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_top_down_numa_split_range_high_check()
1615 * A test that tries to allocate a memory region that spans over the min_addr
1623 * | +---------------+ +-------------+---------+ |
1625 * +----+---------------+--------+-------------+---------+----------+
1627 * | +---------+ |
1629 * +----------+---------+-------------------------------------------+
1631 * Expect to drop the lower limit and allocate a memory region that ends at
1637 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_numa_no_overlap_split_check()
1638 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_top_down_numa_no_overlap_split_check()
1639 struct memblock_region *node2 = &memblock.memory.regions[6]; in alloc_nid_top_down_numa_no_overlap_split_check()
1649 min_addr = node2->base - SZ_256; in alloc_nid_top_down_numa_no_overlap_split_check()
1658 ASSERT_EQ(new_rgn->size, size); in alloc_nid_top_down_numa_no_overlap_split_check()
1659 ASSERT_EQ(new_rgn->base, region_end(req_node) - size); in alloc_nid_top_down_numa_no_overlap_split_check()
1660 ASSERT_LE(req_node->base, new_rgn->base); in alloc_nid_top_down_numa_no_overlap_split_check()
1662 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_top_down_numa_no_overlap_split_check()
1663 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_top_down_numa_no_overlap_split_check()
1671 * A test that tries to allocate memory within min_addr and max_add range when
1680 * |-----------+ +----------+----...----+----------+ |
1682 * +-----------+-----------+----------+----...----+----------+------+
1684 * | +-----+ |
1686 * +---------------------------------------------------+-----+------+
1688 * Expect to allocate a memory region at the end of the final node in
1694 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_numa_no_overlap_low_check()
1695 struct memblock_region *min_node = &memblock.memory.regions[2]; in alloc_nid_top_down_numa_no_overlap_low_check()
1696 struct memblock_region *max_node = &memblock.memory.regions[5]; in alloc_nid_top_down_numa_no_overlap_low_check()
1705 min_addr = min_node->base; in alloc_nid_top_down_numa_no_overlap_low_check()
1714 ASSERT_EQ(new_rgn->size, size); in alloc_nid_top_down_numa_no_overlap_low_check()
1715 ASSERT_EQ(new_rgn->base, max_addr - size); in alloc_nid_top_down_numa_no_overlap_low_check()
1716 ASSERT_LE(max_node->base, new_rgn->base); in alloc_nid_top_down_numa_no_overlap_low_check()
1718 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_top_down_numa_no_overlap_low_check()
1719 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_top_down_numa_no_overlap_low_check()
1727 * A test that tries to allocate memory within min_addr and max_add range when
1736 * | +----------+----...----+----------+ +-----------+ |
1738 * +-----+----------+----...----+----------+--------+-----------+---+
1740 * | +-----+ |
1742 * +---------------------------------+-----+------------------------+
1744 * Expect to allocate a memory region at the end of the final node in
1750 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_numa_no_overlap_high_check()
1751 struct memblock_region *min_node = &memblock.memory.regions[2]; in alloc_nid_top_down_numa_no_overlap_high_check()
1752 struct memblock_region *max_node = &memblock.memory.regions[5]; in alloc_nid_top_down_numa_no_overlap_high_check()
1761 min_addr = min_node->base; in alloc_nid_top_down_numa_no_overlap_high_check()
1770 ASSERT_EQ(new_rgn->size, size); in alloc_nid_top_down_numa_no_overlap_high_check()
1771 ASSERT_EQ(new_rgn->base, max_addr - size); in alloc_nid_top_down_numa_no_overlap_high_check()
1772 ASSERT_LE(max_node->base, new_rgn->base); in alloc_nid_top_down_numa_no_overlap_high_check()
1774 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_top_down_numa_no_overlap_high_check()
1775 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_top_down_numa_no_overlap_high_check()
1783 * A test that tries to allocate a memory region in a specific NUMA node that
1784 * has enough memory to allocate a region of the requested size.
1790 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_numa_simple_check()
1791 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_bottom_up_numa_simple_check()
1800 ASSERT_LE(SZ_4, req_node->size); in alloc_nid_bottom_up_numa_simple_check()
1801 size = req_node->size / SZ_4; in alloc_nid_bottom_up_numa_simple_check()
1811 ASSERT_EQ(new_rgn->size, size); in alloc_nid_bottom_up_numa_simple_check()
1812 ASSERT_EQ(new_rgn->base, req_node->base); in alloc_nid_bottom_up_numa_simple_check()
1815 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_bottom_up_numa_simple_check()
1816 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_bottom_up_numa_simple_check()
1824 * A test that tries to allocate a memory region in a specific NUMA node that
1825 * does not have enough memory to allocate a region of the requested size:
1827 * |----------------------+-----+ |
1829 * +----------------------+-----+----------------+
1831 * |---------+ |
1833 * +---------+-----------------------------------+
1836 * has enough memory (in this case, nid = 0) after falling back to NUMA_NO_NODE.
1842 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_numa_small_node_check()
1843 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_bottom_up_numa_small_node_check()
1844 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; in alloc_nid_bottom_up_numa_small_node_check()
1853 size = SZ_2 * req_node->size; in alloc_nid_bottom_up_numa_small_node_check()
1863 ASSERT_EQ(new_rgn->size, size); in alloc_nid_bottom_up_numa_small_node_check()
1864 ASSERT_EQ(new_rgn->base, exp_node->base); in alloc_nid_bottom_up_numa_small_node_check()
1867 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_bottom_up_numa_small_node_check()
1868 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_bottom_up_numa_small_node_check()
1876 * A test that tries to allocate a memory region in a specific NUMA node that
1877 * is fully reserved:
1879 * |----------------------+ +-----------+ |
1881 * +----------------------+-----+-----------+--------------------+
1883 * |-----------+ +-----------+ |
1884 * | new | | reserved | |
1885 * +-----------+----------------+-----------+--------------------+
1888 * is large enough and has enough unreserved memory (in this case, nid = 0)
1896 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_numa_node_reserved_check()
1897 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_bottom_up_numa_node_reserved_check()
1898 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; in alloc_nid_bottom_up_numa_node_reserved_check()
1907 size = req_node->size; in alloc_nid_bottom_up_numa_node_reserved_check()
1911 memblock_reserve(req_node->base, req_node->size); in alloc_nid_bottom_up_numa_node_reserved_check()
1918 ASSERT_EQ(new_rgn->size, size); in alloc_nid_bottom_up_numa_node_reserved_check()
1919 ASSERT_EQ(new_rgn->base, exp_node->base); in alloc_nid_bottom_up_numa_node_reserved_check()
1922 ASSERT_EQ(memblock.reserved.cnt, 2); in alloc_nid_bottom_up_numa_node_reserved_check()
1923 ASSERT_EQ(memblock.reserved.total_size, size + req_node->size); in alloc_nid_bottom_up_numa_node_reserved_check()
1931 * A test that tries to allocate a memory region in a specific NUMA node that
1932 * is partially reserved but has enough memory for the allocated region:
1934 * | +---------------------------------------+ |
1936 * +-----------+---------------------------------------+---------+
1938 * | +------------------+-----+ |
1939 * | | reserved | new | |
1940 * +-----------+------------------+-----+------------------------+
1943 * the existing reserved region. The total size gets updated.
1948 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_numa_part_reserved_check()
1949 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_bottom_up_numa_part_reserved_check()
1960 ASSERT_LE(SZ_8, req_node->size); in alloc_nid_bottom_up_numa_part_reserved_check()
1961 r1.base = req_node->base; in alloc_nid_bottom_up_numa_part_reserved_check()
1962 r1.size = req_node->size / SZ_2; in alloc_nid_bottom_up_numa_part_reserved_check()
1975 ASSERT_EQ(new_rgn->size, total_size); in alloc_nid_bottom_up_numa_part_reserved_check()
1976 ASSERT_EQ(new_rgn->base, req_node->base); in alloc_nid_bottom_up_numa_part_reserved_check()
1979 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_bottom_up_numa_part_reserved_check()
1980 ASSERT_EQ(memblock.reserved.total_size, total_size); in alloc_nid_bottom_up_numa_part_reserved_check()
1988 * A test that tries to allocate a memory region in a specific NUMA node that
1989 * is partially reserved and does not have enough contiguous memory for the
1992 * |----------------------+ +-----------------------+ |
1994 * +----------------------+-------+-----------------------+---------+
1996 * |-----------+ +----------+ |
1997 * | new | | reserved | |
1998 * +-----------+------------------------+----------+----------------+
2001 * node that is large enough and has enough unreserved memory (in this case,
2009 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_numa_part_reserved_fallback_check()
2010 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_bottom_up_numa_part_reserved_fallback_check()
2011 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; in alloc_nid_bottom_up_numa_part_reserved_fallback_check()
2021 ASSERT_LE(SZ_4, req_node->size); in alloc_nid_bottom_up_numa_part_reserved_fallback_check()
2022 size = req_node->size / SZ_2; in alloc_nid_bottom_up_numa_part_reserved_fallback_check()
2023 r1.base = req_node->base + (size / SZ_2); in alloc_nid_bottom_up_numa_part_reserved_fallback_check()
2036 ASSERT_EQ(new_rgn->size, size); in alloc_nid_bottom_up_numa_part_reserved_fallback_check()
2037 ASSERT_EQ(new_rgn->base, exp_node->base); in alloc_nid_bottom_up_numa_part_reserved_fallback_check()
2040 ASSERT_EQ(memblock.reserved.cnt, 2); in alloc_nid_bottom_up_numa_part_reserved_fallback_check()
2041 ASSERT_EQ(memblock.reserved.total_size, size + r1.size); in alloc_nid_bottom_up_numa_part_reserved_fallback_check()
2049 * A test that tries to allocate a memory region that spans over the min_addr
2057 * | +-----------------------+-----------+ |
2059 * +-----------+-----------------------+-----------+--------------+
2061 * | +-----------+ |
2063 * +-----------+-----------+--------------------------------------+
2065 * Expect to drop the lower limit and allocate a memory region at the beginning
2071 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_numa_split_range_low_check()
2072 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_bottom_up_numa_split_range_low_check()
2083 min_addr = req_node_end - SZ_256; in alloc_nid_bottom_up_numa_split_range_low_check()
2092 ASSERT_EQ(new_rgn->size, size); in alloc_nid_bottom_up_numa_split_range_low_check()
2093 ASSERT_EQ(new_rgn->base, req_node->base); in alloc_nid_bottom_up_numa_split_range_low_check()
2096 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_bottom_up_numa_split_range_low_check()
2097 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_bottom_up_numa_split_range_low_check()
2105 * A test that tries to allocate a memory region that spans over the min_addr
2113 * |------------------+ +----------------------+---------+ |
2115 * +------------------+--------+----------------------+---------+------+
2117 * |---------+ |
2119 * +---------+---------------------------------------------------------+
2121 * Expect to drop the lower limit and allocate a memory region at the beginning
2122 * of the first node that has enough memory.
2128 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_numa_split_range_high_check()
2129 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_bottom_up_numa_split_range_high_check()
2130 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; in alloc_nid_bottom_up_numa_split_range_high_check()
2141 min_addr = req_node->base - SZ_256; in alloc_nid_bottom_up_numa_split_range_high_check()
2150 ASSERT_EQ(new_rgn->size, size); in alloc_nid_bottom_up_numa_split_range_high_check()
2151 ASSERT_EQ(new_rgn->base, exp_node->base); in alloc_nid_bottom_up_numa_split_range_high_check()
2154 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_bottom_up_numa_split_range_high_check()
2155 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_bottom_up_numa_split_range_high_check()
2163 * A test that tries to allocate a memory region that spans over the min_addr
2171 * | +---------------+ +-------------+---------+ |
2173 * +----+---------------+--------+-------------+---------+---------+
2175 * | +---------+ |
2177 * +----+---------+------------------------------------------------+
2179 * Expect to drop the lower limit and allocate a memory region that starts at
2185 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_numa_no_overlap_split_check()
2186 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_bottom_up_numa_no_overlap_split_check()
2187 struct memblock_region *node2 = &memblock.memory.regions[6]; in alloc_nid_bottom_up_numa_no_overlap_split_check()
2197 min_addr = node2->base - SZ_256; in alloc_nid_bottom_up_numa_no_overlap_split_check()
2206 ASSERT_EQ(new_rgn->size, size); in alloc_nid_bottom_up_numa_no_overlap_split_check()
2207 ASSERT_EQ(new_rgn->base, req_node->base); in alloc_nid_bottom_up_numa_no_overlap_split_check()
2210 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_bottom_up_numa_no_overlap_split_check()
2211 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_bottom_up_numa_no_overlap_split_check()
2219 * A test that tries to allocate memory within min_addr and max_add range when
2228 * |-----------+ +----------+----...----+----------+ |
2230 * +-----------+-----------+----------+----...----+----------+------+
2232 * | +-----+ |
2234 * +-----------------------+-----+----------------------------------+
2236 * Expect to allocate a memory region at the beginning of the first node
2242 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_numa_no_overlap_low_check()
2243 struct memblock_region *min_node = &memblock.memory.regions[2]; in alloc_nid_bottom_up_numa_no_overlap_low_check()
2244 struct memblock_region *max_node = &memblock.memory.regions[5]; in alloc_nid_bottom_up_numa_no_overlap_low_check()
2253 min_addr = min_node->base; in alloc_nid_bottom_up_numa_no_overlap_low_check()
2262 ASSERT_EQ(new_rgn->size, size); in alloc_nid_bottom_up_numa_no_overlap_low_check()
2263 ASSERT_EQ(new_rgn->base, min_addr); in alloc_nid_bottom_up_numa_no_overlap_low_check()
2266 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_bottom_up_numa_no_overlap_low_check()
2267 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_bottom_up_numa_no_overlap_low_check()
2275 * A test that tries to allocate memory within min_addr and max_add range when
2284 * | +----------+----...----+----------+ +---------+ |
2286 * +-----+----------+----...----+----------+---------+---------+---+
2288 * | +-----+ |
2290 * +-----+-----+---------------------------------------------------+
2292 * Expect to allocate a memory region at the beginning of the first node
2298 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_bottom_up_numa_no_overlap_high_check()
2299 struct memblock_region *min_node = &memblock.memory.regions[2]; in alloc_nid_bottom_up_numa_no_overlap_high_check()
2300 struct memblock_region *max_node = &memblock.memory.regions[5]; in alloc_nid_bottom_up_numa_no_overlap_high_check()
2309 min_addr = min_node->base; in alloc_nid_bottom_up_numa_no_overlap_high_check()
2318 ASSERT_EQ(new_rgn->size, size); in alloc_nid_bottom_up_numa_no_overlap_high_check()
2319 ASSERT_EQ(new_rgn->base, min_addr); in alloc_nid_bottom_up_numa_no_overlap_high_check()
2322 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_bottom_up_numa_no_overlap_high_check()
2323 ASSERT_EQ(memblock.reserved.total_size, size); in alloc_nid_bottom_up_numa_no_overlap_high_check()
2331 * A test that tries to allocate a memory region in a specific NUMA node that
2332 * does not have enough memory to allocate a region of the requested size.
2333 * Additionally, none of the nodes have enough memory to allocate the region:
2335 * +-----------------------------------+
2337 * +-----------------------------------+
2338 * |-------+-------+-------+-------+-------+-------+-------+-------|
2340 * +-------+-------+-------+-------+-------+-------+-------+-------+
2368 * A test that tries to allocate memory within min_addr and max_addr range when
2369 * there are two reserved regions at the borders. The requested node starts at
2377 * | +-----------+-----------------------+-----------------------|
2379 * +------+-----------+-----------------------+-----------------------+
2381 * | +----+-----------------------+----+ |
2383 * +-------------+----+-----------------------+----+------------------+
2392 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_nid_numa_reserved_full_merge_generic_check()
2393 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_nid_numa_reserved_full_merge_generic_check()
2394 struct memblock_region *next_node = &memblock.memory.regions[nid_next]; in alloc_nid_numa_reserved_full_merge_generic_check()
2397 phys_addr_t size = req_node->size; in alloc_nid_numa_reserved_full_merge_generic_check()
2405 r1.base = next_node->base; in alloc_nid_numa_reserved_full_merge_generic_check()
2409 r2.base = r1.base - (size + r2.size); in alloc_nid_numa_reserved_full_merge_generic_check()
2424 ASSERT_EQ(new_rgn->size, total_size); in alloc_nid_numa_reserved_full_merge_generic_check()
2425 ASSERT_EQ(new_rgn->base, r2.base); in alloc_nid_numa_reserved_full_merge_generic_check()
2427 ASSERT_LE(new_rgn->base, req_node->base); in alloc_nid_numa_reserved_full_merge_generic_check()
2430 ASSERT_EQ(memblock.reserved.cnt, 1); in alloc_nid_numa_reserved_full_merge_generic_check()
2431 ASSERT_EQ(memblock.reserved.total_size, total_size); in alloc_nid_numa_reserved_full_merge_generic_check()
2439 * A test that tries to allocate memory within min_addr and max_add range,
2441 * and everything else is reserved. Additionally, nid is set to NUMA_NO_NODE
2444 * +-----------+
2446 * +-----------+
2447 * | +---------------------+-----------|
2449 * +------+---------------------+-----------+
2451 * |----------------------+ +-----|
2453 * +----------------------+-----------+-----+
2465 struct memblock_region *next_node = &memblock.memory.regions[7]; in alloc_nid_numa_split_all_reserved_generic_check()
2474 r2.base = next_node->base + SZ_128; in alloc_nid_numa_split_all_reserved_generic_check()
2475 r2.size = memblock_end_of_DRAM() - r2.base; in alloc_nid_numa_split_all_reserved_generic_check()
2477 r1.size = MEM_SIZE - (r2.size + size); in alloc_nid_numa_split_all_reserved_generic_check()
2498 * A simple test that tries to allocate a memory region through the
2507 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_node_on_correct_nid()
2518 ASSERT_EQ(nid_req, req_node->nid); in alloc_node_on_correct_nid()