1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include "alloc_nid_api.h"
4 static int alloc_nid_test_flags = TEST_F_NONE;
7 * contains the fraction of MEM_SIZE contained in each node in basis point
8 * units (one hundredth of 1% or 1/10000)
10 static const unsigned int node_fractions[] = {
21 static inline const char * const get_memblock_alloc_try_nid_name(int flags)
23 if (flags & TEST_F_RAW)
24 return "memblock_alloc_try_nid_raw";
25 return "memblock_alloc_try_nid";
28 static inline void *run_memblock_alloc_try_nid(phys_addr_t size,
31 phys_addr_t max_addr, int nid)
33 if (alloc_nid_test_flags & TEST_F_RAW)
34 return memblock_alloc_try_nid_raw(size, align, min_addr,
36 return memblock_alloc_try_nid(size, align, min_addr, max_addr, nid);
40 * A simple test that tries to allocate a memory region within min_addr and
46 * +----+-------+-----------+------+
51 * Expect to allocate a region that ends at max_addr.
53 static int alloc_try_nid_top_down_simple_check(void)
55 struct memblock_region *rgn = &memblock.reserved.regions[0];
56 void *allocated_ptr = NULL;
57 phys_addr_t size = SZ_128;
65 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
66 max_addr = min_addr + SZ_512;
68 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
71 rgn_end = rgn->base + rgn->size;
73 ASSERT_NE(allocated_ptr, NULL);
74 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
76 ASSERT_EQ(rgn->size, size);
77 ASSERT_EQ(rgn->base, max_addr - size);
78 ASSERT_EQ(rgn_end, max_addr);
80 ASSERT_EQ(memblock.reserved.cnt, 1);
81 ASSERT_EQ(memblock.reserved.total_size, size);
89 * A simple test that tries to allocate a memory region within min_addr and
90 * max_addr range, where the end address is misaligned:
95 * +------+-------+---------+--+----+
103 * Expect to allocate an aligned region that ends before max_addr.
105 static int alloc_try_nid_top_down_end_misaligned_check(void)
107 struct memblock_region *rgn = &memblock.reserved.regions[0];
108 void *allocated_ptr = NULL;
109 phys_addr_t size = SZ_128;
110 phys_addr_t misalign = SZ_2;
111 phys_addr_t min_addr;
112 phys_addr_t max_addr;
118 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
119 max_addr = min_addr + SZ_512 + misalign;
121 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
124 rgn_end = rgn->base + rgn->size;
126 ASSERT_NE(allocated_ptr, NULL);
127 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
129 ASSERT_EQ(rgn->size, size);
130 ASSERT_EQ(rgn->base, max_addr - size - misalign);
131 ASSERT_LT(rgn_end, max_addr);
133 ASSERT_EQ(memblock.reserved.cnt, 1);
134 ASSERT_EQ(memblock.reserved.total_size, size);
142 * A simple test that tries to allocate a memory region, which spans over the
143 * min_addr and max_addr range:
146 * | +---------------+ |
148 * +------+---------------+-------+
153 * Expect to allocate a region that starts at min_addr and ends at
154 * max_addr, given that min_addr is aligned.
156 static int alloc_try_nid_exact_address_generic_check(void)
158 struct memblock_region *rgn = &memblock.reserved.regions[0];
159 void *allocated_ptr = NULL;
160 phys_addr_t size = SZ_1K;
161 phys_addr_t min_addr;
162 phys_addr_t max_addr;
168 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
169 max_addr = min_addr + size;
171 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
174 rgn_end = rgn->base + rgn->size;
176 ASSERT_NE(allocated_ptr, NULL);
177 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
179 ASSERT_EQ(rgn->size, size);
180 ASSERT_EQ(rgn->base, min_addr);
181 ASSERT_EQ(rgn_end, max_addr);
183 ASSERT_EQ(memblock.reserved.cnt, 1);
184 ASSERT_EQ(memblock.reserved.total_size, size);
192 * A test that tries to allocate a memory region, which can't fit into
193 * min_addr and max_addr range:
196 * | +----------+-----+ |
198 * +--------+----------+-----+----+
205 * Expect to drop the lower limit and allocate a memory region which
206 * ends at max_addr (if the address is aligned).
208 static int alloc_try_nid_top_down_narrow_range_check(void)
210 struct memblock_region *rgn = &memblock.reserved.regions[0];
211 void *allocated_ptr = NULL;
212 phys_addr_t size = SZ_256;
213 phys_addr_t min_addr;
214 phys_addr_t max_addr;
219 min_addr = memblock_start_of_DRAM() + SZ_512;
220 max_addr = min_addr + SMP_CACHE_BYTES;
222 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
226 ASSERT_NE(allocated_ptr, NULL);
227 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
229 ASSERT_EQ(rgn->size, size);
230 ASSERT_EQ(rgn->base, max_addr - size);
232 ASSERT_EQ(memblock.reserved.cnt, 1);
233 ASSERT_EQ(memblock.reserved.total_size, size);
241 * A test that tries to allocate a memory region, which can't fit into
242 * min_addr and max_addr range, with the latter being too close to the beginning
243 * of the available memory:
251 * +-------+--------------+
258 * Expect no allocation to happen.
260 static int alloc_try_nid_low_max_generic_check(void)
262 void *allocated_ptr = NULL;
263 phys_addr_t size = SZ_1K;
264 phys_addr_t min_addr;
265 phys_addr_t max_addr;
270 min_addr = memblock_start_of_DRAM();
271 max_addr = min_addr + SMP_CACHE_BYTES;
273 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
277 ASSERT_EQ(allocated_ptr, NULL);
285 * A test that tries to allocate a memory region within min_addr min_addr range,
286 * with min_addr being so close that it's next to an allocated region:
289 * | +--------+---------------|
291 * +-------+--------+---------------+
296 * Expect a merge of both regions. Only the region size gets updated.
298 static int alloc_try_nid_min_reserved_generic_check(void)
300 struct memblock_region *rgn = &memblock.reserved.regions[0];
301 void *allocated_ptr = NULL;
302 phys_addr_t r1_size = SZ_128;
303 phys_addr_t r2_size = SZ_64;
304 phys_addr_t total_size = r1_size + r2_size;
305 phys_addr_t min_addr;
306 phys_addr_t max_addr;
307 phys_addr_t reserved_base;
312 max_addr = memblock_end_of_DRAM();
313 min_addr = max_addr - r2_size;
314 reserved_base = min_addr - r1_size;
316 memblock_reserve(reserved_base, r1_size);
318 allocated_ptr = run_memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES,
322 ASSERT_NE(allocated_ptr, NULL);
323 assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
325 ASSERT_EQ(rgn->size, total_size);
326 ASSERT_EQ(rgn->base, reserved_base);
328 ASSERT_EQ(memblock.reserved.cnt, 1);
329 ASSERT_EQ(memblock.reserved.total_size, total_size);
337 * A test that tries to allocate a memory region within min_addr and max_addr,
338 * with max_addr being so close that it's next to an allocated region:
341 * | +-------------+--------|
343 * +----------+-------------+--------+
348 * Expect a merge of regions. Only the region size gets updated.
350 static int alloc_try_nid_max_reserved_generic_check(void)
352 struct memblock_region *rgn = &memblock.reserved.regions[0];
353 void *allocated_ptr = NULL;
354 phys_addr_t r1_size = SZ_64;
355 phys_addr_t r2_size = SZ_128;
356 phys_addr_t total_size = r1_size + r2_size;
357 phys_addr_t min_addr;
358 phys_addr_t max_addr;
363 max_addr = memblock_end_of_DRAM() - r1_size;
364 min_addr = max_addr - r2_size;
366 memblock_reserve(max_addr, r1_size);
368 allocated_ptr = run_memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES,
372 ASSERT_NE(allocated_ptr, NULL);
373 assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
375 ASSERT_EQ(rgn->size, total_size);
376 ASSERT_EQ(rgn->base, min_addr);
378 ASSERT_EQ(memblock.reserved.cnt, 1);
379 ASSERT_EQ(memblock.reserved.total_size, total_size);
387 * A test that tries to allocate memory within min_addr and max_add range, when
388 * there are two reserved regions at the borders, with a gap big enough to fit
392 * | +--------+ +-------+------+ |
393 * | | r2 | | rgn | r1 | |
394 * +----+--------+---+-------+------+--+
399 * Expect to merge the new region with r1. The second region does not get
400 * updated. The total size field gets updated.
403 static int alloc_try_nid_top_down_reserved_with_space_check(void)
405 struct memblock_region *rgn1 = &memblock.reserved.regions[1];
406 struct memblock_region *rgn2 = &memblock.reserved.regions[0];
407 void *allocated_ptr = NULL;
408 struct region r1, r2;
409 phys_addr_t r3_size = SZ_64;
410 phys_addr_t gap_size = SMP_CACHE_BYTES;
411 phys_addr_t total_size;
412 phys_addr_t max_addr;
413 phys_addr_t min_addr;
418 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
419 r1.size = SMP_CACHE_BYTES;
422 r2.base = r1.base - (r3_size + gap_size + r2.size);
424 total_size = r1.size + r2.size + r3_size;
425 min_addr = r2.base + r2.size;
428 memblock_reserve(r1.base, r1.size);
429 memblock_reserve(r2.base, r2.size);
431 allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
435 ASSERT_NE(allocated_ptr, NULL);
436 assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
438 ASSERT_EQ(rgn1->size, r1.size + r3_size);
439 ASSERT_EQ(rgn1->base, max_addr - r3_size);
441 ASSERT_EQ(rgn2->size, r2.size);
442 ASSERT_EQ(rgn2->base, r2.base);
444 ASSERT_EQ(memblock.reserved.cnt, 2);
445 ASSERT_EQ(memblock.reserved.total_size, total_size);
453 * A test that tries to allocate memory within min_addr and max_add range, when
454 * there are two reserved regions at the borders, with a gap of a size equal to
455 * the size of the new region:
458 * | +--------+--------+--------+ |
459 * | | r2 | r3 | r1 | |
460 * +-----+--------+--------+--------+-----+
465 * Expect to merge all of the regions into one. The region counter and total
466 * size fields get updated.
468 static int alloc_try_nid_reserved_full_merge_generic_check(void)
470 struct memblock_region *rgn = &memblock.reserved.regions[0];
471 void *allocated_ptr = NULL;
472 struct region r1, r2;
473 phys_addr_t r3_size = SZ_64;
474 phys_addr_t total_size;
475 phys_addr_t max_addr;
476 phys_addr_t min_addr;
481 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
482 r1.size = SMP_CACHE_BYTES;
485 r2.base = r1.base - (r3_size + r2.size);
487 total_size = r1.size + r2.size + r3_size;
488 min_addr = r2.base + r2.size;
491 memblock_reserve(r1.base, r1.size);
492 memblock_reserve(r2.base, r2.size);
494 allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
498 ASSERT_NE(allocated_ptr, NULL);
499 assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
501 ASSERT_EQ(rgn->size, total_size);
502 ASSERT_EQ(rgn->base, r2.base);
504 ASSERT_EQ(memblock.reserved.cnt, 1);
505 ASSERT_EQ(memblock.reserved.total_size, total_size);
513 * A test that tries to allocate memory within min_addr and max_add range, when
514 * there are two reserved regions at the borders, with a gap that can't fit
518 * | +----------+------+ +------+ |
519 * | | r3 | r2 | | r1 | |
520 * +--+----------+------+----+------+---+
527 * Expect to merge the new region with r2. The second region does not get
528 * updated. The total size counter gets updated.
530 static int alloc_try_nid_top_down_reserved_no_space_check(void)
532 struct memblock_region *rgn1 = &memblock.reserved.regions[1];
533 struct memblock_region *rgn2 = &memblock.reserved.regions[0];
534 void *allocated_ptr = NULL;
535 struct region r1, r2;
536 phys_addr_t r3_size = SZ_256;
537 phys_addr_t gap_size = SMP_CACHE_BYTES;
538 phys_addr_t total_size;
539 phys_addr_t max_addr;
540 phys_addr_t min_addr;
545 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
546 r1.size = SMP_CACHE_BYTES;
549 r2.base = r1.base - (r2.size + gap_size);
551 total_size = r1.size + r2.size + r3_size;
552 min_addr = r2.base + r2.size;
555 memblock_reserve(r1.base, r1.size);
556 memblock_reserve(r2.base, r2.size);
558 allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
562 ASSERT_NE(allocated_ptr, NULL);
563 assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
565 ASSERT_EQ(rgn1->size, r1.size);
566 ASSERT_EQ(rgn1->base, r1.base);
568 ASSERT_EQ(rgn2->size, r2.size + r3_size);
569 ASSERT_EQ(rgn2->base, r2.base - r3_size);
571 ASSERT_EQ(memblock.reserved.cnt, 2);
572 ASSERT_EQ(memblock.reserved.total_size, total_size);
580 * A test that tries to allocate memory within min_addr and max_add range, but
581 * it's too narrow and everything else is reserved:
587 * |--------------+ +----------|
589 * +--------------+------+----------+
596 * Expect no allocation to happen.
599 static int alloc_try_nid_reserved_all_generic_check(void)
601 void *allocated_ptr = NULL;
602 struct region r1, r2;
603 phys_addr_t r3_size = SZ_256;
604 phys_addr_t gap_size = SMP_CACHE_BYTES;
605 phys_addr_t max_addr;
606 phys_addr_t min_addr;
611 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
612 r1.size = SMP_CACHE_BYTES;
614 r2.size = MEM_SIZE - (r1.size + gap_size);
615 r2.base = memblock_start_of_DRAM();
617 min_addr = r2.base + r2.size;
620 memblock_reserve(r1.base, r1.size);
621 memblock_reserve(r2.base, r2.size);
623 allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
627 ASSERT_EQ(allocated_ptr, NULL);
635 * A test that tries to allocate a memory region, where max_addr is
636 * bigger than the end address of the available memory. Expect to allocate
637 * a region that ends before the end of the memory.
639 static int alloc_try_nid_top_down_cap_max_check(void)
641 struct memblock_region *rgn = &memblock.reserved.regions[0];
642 void *allocated_ptr = NULL;
643 phys_addr_t size = SZ_256;
644 phys_addr_t min_addr;
645 phys_addr_t max_addr;
650 min_addr = memblock_end_of_DRAM() - SZ_1K;
651 max_addr = memblock_end_of_DRAM() + SZ_256;
653 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
657 ASSERT_NE(allocated_ptr, NULL);
658 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
660 ASSERT_EQ(rgn->size, size);
661 ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
663 ASSERT_EQ(memblock.reserved.cnt, 1);
664 ASSERT_EQ(memblock.reserved.total_size, size);
672 * A test that tries to allocate a memory region, where min_addr is
673 * smaller than the start address of the available memory. Expect to allocate
674 * a region that ends before the end of the memory.
676 static int alloc_try_nid_top_down_cap_min_check(void)
678 struct memblock_region *rgn = &memblock.reserved.regions[0];
679 void *allocated_ptr = NULL;
680 phys_addr_t size = SZ_1K;
681 phys_addr_t min_addr;
682 phys_addr_t max_addr;
687 min_addr = memblock_start_of_DRAM() - SZ_256;
688 max_addr = memblock_end_of_DRAM();
690 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
694 ASSERT_NE(allocated_ptr, NULL);
695 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
697 ASSERT_EQ(rgn->size, size);
698 ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
700 ASSERT_EQ(memblock.reserved.cnt, 1);
701 ASSERT_EQ(memblock.reserved.total_size, size);
709 * A simple test that tries to allocate a memory region within min_addr and
713 * | +-----------+ | |
715 * +----+-----------+-----------+------+
720 * Expect to allocate a region that ends before max_addr.
722 static int alloc_try_nid_bottom_up_simple_check(void)
724 struct memblock_region *rgn = &memblock.reserved.regions[0];
725 void *allocated_ptr = NULL;
726 phys_addr_t size = SZ_128;
727 phys_addr_t min_addr;
728 phys_addr_t max_addr;
734 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
735 max_addr = min_addr + SZ_512;
737 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
740 rgn_end = rgn->base + rgn->size;
742 ASSERT_NE(allocated_ptr, NULL);
743 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
745 ASSERT_EQ(rgn->size, size);
746 ASSERT_EQ(rgn->base, min_addr);
747 ASSERT_LT(rgn_end, max_addr);
749 ASSERT_EQ(memblock.reserved.cnt, 1);
750 ASSERT_EQ(memblock.reserved.total_size, size);
758 * A simple test that tries to allocate a memory region within min_addr and
759 * max_addr range, where the start address is misaligned:
762 * | + +-----------+ + |
764 * +-----+---+-----------+-----+-----+
772 * Expect to allocate an aligned region that ends before max_addr.
774 static int alloc_try_nid_bottom_up_start_misaligned_check(void)
776 struct memblock_region *rgn = &memblock.reserved.regions[0];
777 void *allocated_ptr = NULL;
778 phys_addr_t size = SZ_128;
779 phys_addr_t misalign = SZ_2;
780 phys_addr_t min_addr;
781 phys_addr_t max_addr;
787 min_addr = memblock_start_of_DRAM() + misalign;
788 max_addr = min_addr + SZ_512;
790 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
793 rgn_end = rgn->base + rgn->size;
795 ASSERT_NE(allocated_ptr, NULL);
796 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
798 ASSERT_EQ(rgn->size, size);
799 ASSERT_EQ(rgn->base, min_addr + (SMP_CACHE_BYTES - misalign));
800 ASSERT_LT(rgn_end, max_addr);
802 ASSERT_EQ(memblock.reserved.cnt, 1);
803 ASSERT_EQ(memblock.reserved.total_size, size);
811 * A test that tries to allocate a memory region, which can't fit into min_addr
812 * and max_addr range:
817 * +---------+---------+----+------+
824 * Expect to drop the lower limit and allocate a memory region which
825 * starts at the beginning of the available memory.
827 static int alloc_try_nid_bottom_up_narrow_range_check(void)
829 struct memblock_region *rgn = &memblock.reserved.regions[0];
830 void *allocated_ptr = NULL;
831 phys_addr_t size = SZ_256;
832 phys_addr_t min_addr;
833 phys_addr_t max_addr;
838 min_addr = memblock_start_of_DRAM() + SZ_512;
839 max_addr = min_addr + SMP_CACHE_BYTES;
841 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
845 ASSERT_NE(allocated_ptr, NULL);
846 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
848 ASSERT_EQ(rgn->size, size);
849 ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
851 ASSERT_EQ(memblock.reserved.cnt, 1);
852 ASSERT_EQ(memblock.reserved.total_size, size);
860 * A test that tries to allocate memory within min_addr and max_add range, when
861 * there are two reserved regions at the borders, with a gap big enough to fit
865 * | +--------+-------+ +------+ |
866 * | | r2 | rgn | | r1 | |
867 * +----+--------+-------+---+------+--+
872 * Expect to merge the new region with r2. The second region does not get
873 * updated. The total size field gets updated.
876 static int alloc_try_nid_bottom_up_reserved_with_space_check(void)
878 struct memblock_region *rgn1 = &memblock.reserved.regions[1];
879 struct memblock_region *rgn2 = &memblock.reserved.regions[0];
880 void *allocated_ptr = NULL;
881 struct region r1, r2;
882 phys_addr_t r3_size = SZ_64;
883 phys_addr_t gap_size = SMP_CACHE_BYTES;
884 phys_addr_t total_size;
885 phys_addr_t max_addr;
886 phys_addr_t min_addr;
891 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
892 r1.size = SMP_CACHE_BYTES;
895 r2.base = r1.base - (r3_size + gap_size + r2.size);
897 total_size = r1.size + r2.size + r3_size;
898 min_addr = r2.base + r2.size;
901 memblock_reserve(r1.base, r1.size);
902 memblock_reserve(r2.base, r2.size);
904 allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
908 ASSERT_NE(allocated_ptr, NULL);
909 assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
911 ASSERT_EQ(rgn1->size, r1.size);
912 ASSERT_EQ(rgn1->base, max_addr);
914 ASSERT_EQ(rgn2->size, r2.size + r3_size);
915 ASSERT_EQ(rgn2->base, r2.base);
917 ASSERT_EQ(memblock.reserved.cnt, 2);
918 ASSERT_EQ(memblock.reserved.total_size, total_size);
926 * A test that tries to allocate memory within min_addr and max_add range, when
927 * there are two reserved regions at the borders, with a gap of a size equal to
928 * the size of the new region:
931 * |----------+ +------+ +----+ |
932 * | r3 | | r2 | | r1 | |
933 * +----------+----+------+---+----+--+
940 * Expect to drop the lower limit and allocate memory at the beginning of the
941 * available memory. The region counter and total size fields get updated.
942 * Other regions are not modified.
945 static int alloc_try_nid_bottom_up_reserved_no_space_check(void)
947 struct memblock_region *rgn1 = &memblock.reserved.regions[2];
948 struct memblock_region *rgn2 = &memblock.reserved.regions[1];
949 struct memblock_region *rgn3 = &memblock.reserved.regions[0];
950 void *allocated_ptr = NULL;
951 struct region r1, r2;
952 phys_addr_t r3_size = SZ_256;
953 phys_addr_t gap_size = SMP_CACHE_BYTES;
954 phys_addr_t total_size;
955 phys_addr_t max_addr;
956 phys_addr_t min_addr;
961 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
962 r1.size = SMP_CACHE_BYTES;
965 r2.base = r1.base - (r2.size + gap_size);
967 total_size = r1.size + r2.size + r3_size;
968 min_addr = r2.base + r2.size;
971 memblock_reserve(r1.base, r1.size);
972 memblock_reserve(r2.base, r2.size);
974 allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
978 ASSERT_NE(allocated_ptr, NULL);
979 assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
981 ASSERT_EQ(rgn3->size, r3_size);
982 ASSERT_EQ(rgn3->base, memblock_start_of_DRAM());
984 ASSERT_EQ(rgn2->size, r2.size);
985 ASSERT_EQ(rgn2->base, r2.base);
987 ASSERT_EQ(rgn1->size, r1.size);
988 ASSERT_EQ(rgn1->base, r1.base);
990 ASSERT_EQ(memblock.reserved.cnt, 3);
991 ASSERT_EQ(memblock.reserved.total_size, total_size);
999 * A test that tries to allocate a memory region, where max_addr is
1000 * bigger than the end address of the available memory. Expect to allocate
1001 * a region that starts at the min_addr.
1003 static int alloc_try_nid_bottom_up_cap_max_check(void)
1005 struct memblock_region *rgn = &memblock.reserved.regions[0];
1006 void *allocated_ptr = NULL;
1007 phys_addr_t size = SZ_256;
1008 phys_addr_t min_addr;
1009 phys_addr_t max_addr;
1014 min_addr = memblock_start_of_DRAM() + SZ_1K;
1015 max_addr = memblock_end_of_DRAM() + SZ_256;
1017 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1021 ASSERT_NE(allocated_ptr, NULL);
1022 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1024 ASSERT_EQ(rgn->size, size);
1025 ASSERT_EQ(rgn->base, min_addr);
1027 ASSERT_EQ(memblock.reserved.cnt, 1);
1028 ASSERT_EQ(memblock.reserved.total_size, size);
1036 * A test that tries to allocate a memory region, where min_addr is
1037 * smaller than the start address of the available memory. Expect to allocate
1038 * a region at the beginning of the available memory.
1040 static int alloc_try_nid_bottom_up_cap_min_check(void)
1042 struct memblock_region *rgn = &memblock.reserved.regions[0];
1043 void *allocated_ptr = NULL;
1044 phys_addr_t size = SZ_1K;
1045 phys_addr_t min_addr;
1046 phys_addr_t max_addr;
1051 min_addr = memblock_start_of_DRAM();
1052 max_addr = memblock_end_of_DRAM() - SZ_256;
1054 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1058 ASSERT_NE(allocated_ptr, NULL);
1059 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1061 ASSERT_EQ(rgn->size, size);
1062 ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
1064 ASSERT_EQ(memblock.reserved.cnt, 1);
1065 ASSERT_EQ(memblock.reserved.total_size, size);
1072 /* Test case wrappers for range tests */
1073 static int alloc_try_nid_simple_check(void)
1075 test_print("\tRunning %s...\n", __func__);
1076 memblock_set_bottom_up(false);
1077 alloc_try_nid_top_down_simple_check();
1078 memblock_set_bottom_up(true);
1079 alloc_try_nid_bottom_up_simple_check();
1084 static int alloc_try_nid_misaligned_check(void)
1086 test_print("\tRunning %s...\n", __func__);
1087 memblock_set_bottom_up(false);
1088 alloc_try_nid_top_down_end_misaligned_check();
1089 memblock_set_bottom_up(true);
1090 alloc_try_nid_bottom_up_start_misaligned_check();
1095 static int alloc_try_nid_narrow_range_check(void)
1097 test_print("\tRunning %s...\n", __func__);
1098 memblock_set_bottom_up(false);
1099 alloc_try_nid_top_down_narrow_range_check();
1100 memblock_set_bottom_up(true);
1101 alloc_try_nid_bottom_up_narrow_range_check();
1106 static int alloc_try_nid_reserved_with_space_check(void)
1108 test_print("\tRunning %s...\n", __func__);
1109 memblock_set_bottom_up(false);
1110 alloc_try_nid_top_down_reserved_with_space_check();
1111 memblock_set_bottom_up(true);
1112 alloc_try_nid_bottom_up_reserved_with_space_check();
1117 static int alloc_try_nid_reserved_no_space_check(void)
1119 test_print("\tRunning %s...\n", __func__);
1120 memblock_set_bottom_up(false);
1121 alloc_try_nid_top_down_reserved_no_space_check();
1122 memblock_set_bottom_up(true);
1123 alloc_try_nid_bottom_up_reserved_no_space_check();
1128 static int alloc_try_nid_cap_max_check(void)
1130 test_print("\tRunning %s...\n", __func__);
1131 memblock_set_bottom_up(false);
1132 alloc_try_nid_top_down_cap_max_check();
1133 memblock_set_bottom_up(true);
1134 alloc_try_nid_bottom_up_cap_max_check();
1139 static int alloc_try_nid_cap_min_check(void)
1141 test_print("\tRunning %s...\n", __func__);
1142 memblock_set_bottom_up(false);
1143 alloc_try_nid_top_down_cap_min_check();
1144 memblock_set_bottom_up(true);
1145 alloc_try_nid_bottom_up_cap_min_check();
1150 static int alloc_try_nid_min_reserved_check(void)
1152 test_print("\tRunning %s...\n", __func__);
1153 run_top_down(alloc_try_nid_min_reserved_generic_check);
1154 run_bottom_up(alloc_try_nid_min_reserved_generic_check);
1159 static int alloc_try_nid_max_reserved_check(void)
1161 test_print("\tRunning %s...\n", __func__);
1162 run_top_down(alloc_try_nid_max_reserved_generic_check);
1163 run_bottom_up(alloc_try_nid_max_reserved_generic_check);
1168 static int alloc_try_nid_exact_address_check(void)
1170 test_print("\tRunning %s...\n", __func__);
1171 run_top_down(alloc_try_nid_exact_address_generic_check);
1172 run_bottom_up(alloc_try_nid_exact_address_generic_check);
1177 static int alloc_try_nid_reserved_full_merge_check(void)
1179 test_print("\tRunning %s...\n", __func__);
1180 run_top_down(alloc_try_nid_reserved_full_merge_generic_check);
1181 run_bottom_up(alloc_try_nid_reserved_full_merge_generic_check);
1186 static int alloc_try_nid_reserved_all_check(void)
1188 test_print("\tRunning %s...\n", __func__);
1189 run_top_down(alloc_try_nid_reserved_all_generic_check);
1190 run_bottom_up(alloc_try_nid_reserved_all_generic_check);
1195 static int alloc_try_nid_low_max_check(void)
1197 test_print("\tRunning %s...\n", __func__);
1198 run_top_down(alloc_try_nid_low_max_generic_check);
1199 run_bottom_up(alloc_try_nid_low_max_generic_check);
1204 static int memblock_alloc_nid_range_checks(void)
1206 test_print("Running %s range tests...\n",
1207 get_memblock_alloc_try_nid_name(alloc_nid_test_flags));
1209 alloc_try_nid_simple_check();
1210 alloc_try_nid_misaligned_check();
1211 alloc_try_nid_narrow_range_check();
1212 alloc_try_nid_reserved_with_space_check();
1213 alloc_try_nid_reserved_no_space_check();
1214 alloc_try_nid_cap_max_check();
1215 alloc_try_nid_cap_min_check();
1217 alloc_try_nid_min_reserved_check();
1218 alloc_try_nid_max_reserved_check();
1219 alloc_try_nid_exact_address_check();
1220 alloc_try_nid_reserved_full_merge_check();
1221 alloc_try_nid_reserved_all_check();
1222 alloc_try_nid_low_max_check();
1228 * A test that tries to allocate a memory region in a specific NUMA node that
1229 * has enough memory to allocate a region of the requested size.
1230 * Expect to allocate an aligned region at the end of the requested node.
1232 static int alloc_try_nid_top_down_numa_simple_check(void)
1235 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1236 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1237 void *allocated_ptr = NULL;
1239 phys_addr_t min_addr;
1240 phys_addr_t max_addr;
1243 setup_numa_memblock(node_fractions);
1245 ASSERT_LE(SZ_4, req_node->size);
1246 size = req_node->size / SZ_4;
1247 min_addr = memblock_start_of_DRAM();
1248 max_addr = memblock_end_of_DRAM();
1250 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1251 min_addr, max_addr, nid_req);
1253 ASSERT_NE(allocated_ptr, NULL);
1254 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1256 ASSERT_EQ(new_rgn->size, size);
1257 ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
1258 ASSERT_LE(req_node->base, new_rgn->base);
1260 ASSERT_EQ(memblock.reserved.cnt, 1);
1261 ASSERT_EQ(memblock.reserved.total_size, size);
1269 * A test that tries to allocate a memory region in a specific NUMA node that
1270 * does not have enough memory to allocate a region of the requested size:
1272 * | +-----+ +------------------+ |
1273 * | | req | | expected | |
1274 * +---+-----+----------+------------------+-----+
1278 * +-----------------------------+---------+-----+
1280 * Expect to allocate an aligned region at the end of the last node that has
1281 * enough memory (in this case, nid = 6) after falling back to NUMA_NO_NODE.
1283 static int alloc_try_nid_top_down_numa_small_node_check(void)
1287 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1288 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1289 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1290 void *allocated_ptr = NULL;
1292 phys_addr_t min_addr;
1293 phys_addr_t max_addr;
1296 setup_numa_memblock(node_fractions);
1298 size = SZ_2 * req_node->size;
1299 min_addr = memblock_start_of_DRAM();
1300 max_addr = memblock_end_of_DRAM();
1302 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1303 min_addr, max_addr, nid_req);
1305 ASSERT_NE(allocated_ptr, NULL);
1306 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1308 ASSERT_EQ(new_rgn->size, size);
1309 ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
1310 ASSERT_LE(exp_node->base, new_rgn->base);
1312 ASSERT_EQ(memblock.reserved.cnt, 1);
1313 ASSERT_EQ(memblock.reserved.total_size, size);
1321 * A test that tries to allocate a memory region in a specific NUMA node that
1322 * is fully reserved:
1324 * | +---------+ +------------------+ |
1325 * | |requested| | expected | |
1326 * +--------------+---------+------------+------------------+-----+
1328 * | +---------+ +---------+ |
1329 * | | reserved| | new | |
1330 * +--------------+---------+---------------------+---------+-----+
1332 * Expect to allocate an aligned region at the end of the last node that is
1333 * large enough and has enough unreserved memory (in this case, nid = 6) after
1334 * falling back to NUMA_NO_NODE. The region count and total size get updated.
1336 static int alloc_try_nid_top_down_numa_node_reserved_check(void)
1340 struct memblock_region *new_rgn = &memblock.reserved.regions[1];
1341 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1342 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1343 void *allocated_ptr = NULL;
1345 phys_addr_t min_addr;
1346 phys_addr_t max_addr;
1349 setup_numa_memblock(node_fractions);
1351 size = req_node->size;
1352 min_addr = memblock_start_of_DRAM();
1353 max_addr = memblock_end_of_DRAM();
1355 memblock_reserve(req_node->base, req_node->size);
1356 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1357 min_addr, max_addr, nid_req);
1359 ASSERT_NE(allocated_ptr, NULL);
1360 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1362 ASSERT_EQ(new_rgn->size, size);
1363 ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
1364 ASSERT_LE(exp_node->base, new_rgn->base);
1366 ASSERT_EQ(memblock.reserved.cnt, 2);
1367 ASSERT_EQ(memblock.reserved.total_size, size + req_node->size);
1375 * A test that tries to allocate a memory region in a specific NUMA node that
1376 * is partially reserved but has enough memory for the allocated region:
1378 * | +---------------------------------------+ |
1380 * +-----------+---------------------------------------+----------+
1382 * | +------------------+ +-----+ |
1383 * | | reserved | | new | |
1384 * +-----------+------------------+--------------+-----+----------+
1386 * Expect to allocate an aligned region at the end of the requested node. The
1387 * region count and total size get updated.
1389 static int alloc_try_nid_top_down_numa_part_reserved_check(void)
1392 struct memblock_region *new_rgn = &memblock.reserved.regions[1];
1393 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1394 void *allocated_ptr = NULL;
1397 phys_addr_t min_addr;
1398 phys_addr_t max_addr;
1401 setup_numa_memblock(node_fractions);
1403 ASSERT_LE(SZ_8, req_node->size);
1404 r1.base = req_node->base;
1405 r1.size = req_node->size / SZ_2;
1406 size = r1.size / SZ_4;
1407 min_addr = memblock_start_of_DRAM();
1408 max_addr = memblock_end_of_DRAM();
1410 memblock_reserve(r1.base, r1.size);
1411 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1412 min_addr, max_addr, nid_req);
1414 ASSERT_NE(allocated_ptr, NULL);
1415 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1417 ASSERT_EQ(new_rgn->size, size);
1418 ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
1419 ASSERT_LE(req_node->base, new_rgn->base);
1421 ASSERT_EQ(memblock.reserved.cnt, 2);
1422 ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
1430 * A test that tries to allocate a memory region in a specific NUMA node that
1431 * is partially reserved and does not have enough contiguous memory for the
1434 * | +-----------------------+ +----------------------|
1435 * | | requested | | expected |
1436 * +-----------+-----------------------+---------+----------------------+
1438 * | +----------+ +-----------|
1439 * | | reserved | | new |
1440 * +-----------------+----------+---------------------------+-----------+
1442 * Expect to allocate an aligned region at the end of the last node that is
1443 * large enough and has enough unreserved memory (in this case,
1444 * nid = NUMA_NODES - 1) after falling back to NUMA_NO_NODE. The region count
1445 * and total size get updated.
1447 static int alloc_try_nid_top_down_numa_part_reserved_fallback_check(void)
1450 int nid_exp = NUMA_NODES - 1;
1451 struct memblock_region *new_rgn = &memblock.reserved.regions[1];
1452 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1453 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1454 void *allocated_ptr = NULL;
1457 phys_addr_t min_addr;
1458 phys_addr_t max_addr;
1461 setup_numa_memblock(node_fractions);
1463 ASSERT_LE(SZ_4, req_node->size);
1464 size = req_node->size / SZ_2;
1465 r1.base = req_node->base + (size / SZ_2);
1468 min_addr = memblock_start_of_DRAM();
1469 max_addr = memblock_end_of_DRAM();
1471 memblock_reserve(r1.base, r1.size);
1472 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1473 min_addr, max_addr, nid_req);
1475 ASSERT_NE(allocated_ptr, NULL);
1476 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1478 ASSERT_EQ(new_rgn->size, size);
1479 ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
1480 ASSERT_LE(exp_node->base, new_rgn->base);
1482 ASSERT_EQ(memblock.reserved.cnt, 2);
1483 ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
1491 * A test that tries to allocate a memory region that spans over the min_addr
1492 * and max_addr range and overlaps with two different nodes, where the first
1493 * node is the requested node:
1499 * | +-----------------------+-----------+ |
1500 * | | requested | node3 | |
1501 * +-----------+-----------------------+-----------+--------------+
1505 * +-----------------------+-----------+--------------------------+
1507 * Expect to drop the lower limit and allocate a memory region that ends at
1508 * the end of the requested node.
1510 static int alloc_try_nid_top_down_numa_split_range_low_check(void)
1513 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1514 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1515 void *allocated_ptr = NULL;
1516 phys_addr_t size = SZ_512;
1517 phys_addr_t min_addr;
1518 phys_addr_t max_addr;
1519 phys_addr_t req_node_end;
1522 setup_numa_memblock(node_fractions);
1524 req_node_end = region_end(req_node);
1525 min_addr = req_node_end - SZ_256;
1526 max_addr = min_addr + size;
1528 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1529 min_addr, max_addr, nid_req);
1531 ASSERT_NE(allocated_ptr, NULL);
1532 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1534 ASSERT_EQ(new_rgn->size, size);
1535 ASSERT_EQ(new_rgn->base, req_node_end - size);
1536 ASSERT_LE(req_node->base, new_rgn->base);
1538 ASSERT_EQ(memblock.reserved.cnt, 1);
1539 ASSERT_EQ(memblock.reserved.total_size, size);
1547 * A test that tries to allocate a memory region that spans over the min_addr
1548 * and max_addr range and overlaps with two different nodes, where the second
1549 * node is the requested node:
1555 * | +--------------------------+---------+ |
1556 * | | expected |requested| |
1557 * +------+--------------------------+---------+----------------+
1561 * +-----------------------+---------+--------------------------+
1563 * Expect to drop the lower limit and allocate a memory region that
1564 * ends at the end of the first node that overlaps with the range.
1566 static int alloc_try_nid_top_down_numa_split_range_high_check(void)
1569 int nid_exp = nid_req - 1;
1570 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1571 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1572 void *allocated_ptr = NULL;
1573 phys_addr_t size = SZ_512;
1574 phys_addr_t min_addr;
1575 phys_addr_t max_addr;
1576 phys_addr_t exp_node_end;
1579 setup_numa_memblock(node_fractions);
1581 exp_node_end = region_end(exp_node);
1582 min_addr = exp_node_end - SZ_256;
1583 max_addr = min_addr + size;
1585 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1586 min_addr, max_addr, nid_req);
1588 ASSERT_NE(allocated_ptr, NULL);
1589 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1591 ASSERT_EQ(new_rgn->size, size);
1592 ASSERT_EQ(new_rgn->base, exp_node_end - size);
1593 ASSERT_LE(exp_node->base, new_rgn->base);
1595 ASSERT_EQ(memblock.reserved.cnt, 1);
1596 ASSERT_EQ(memblock.reserved.total_size, size);
1604 * A test that tries to allocate a memory region that spans over the min_addr
1605 * and max_addr range and overlaps with two different nodes, where the requested
1606 * node ends before min_addr:
1612 * | +---------------+ +-------------+---------+ |
1613 * | | requested | | node1 | node2 | |
1614 * +----+---------------+--------+-------------+---------+----------+
1618 * +----------+---------+-------------------------------------------+
1620 * Expect to drop the lower limit and allocate a memory region that ends at
1621 * the end of the requested node.
1623 static int alloc_try_nid_top_down_numa_no_overlap_split_check(void)
1626 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1627 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1628 struct memblock_region *node2 = &memblock.memory.regions[6];
1629 void *allocated_ptr = NULL;
1631 phys_addr_t min_addr;
1632 phys_addr_t max_addr;
1635 setup_numa_memblock(node_fractions);
1638 min_addr = node2->base - SZ_256;
1639 max_addr = min_addr + size;
1641 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1642 min_addr, max_addr, nid_req);
1644 ASSERT_NE(allocated_ptr, NULL);
1645 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1647 ASSERT_EQ(new_rgn->size, size);
1648 ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
1649 ASSERT_LE(req_node->base, new_rgn->base);
1651 ASSERT_EQ(memblock.reserved.cnt, 1);
1652 ASSERT_EQ(memblock.reserved.total_size, size);
1660 * A test that tries to allocate memory within min_addr and max_add range when
1661 * the requested node and the range do not overlap, and requested node ends
1662 * before min_addr. The range overlaps with multiple nodes along node
1669 * |-----------+ +----------+----...----+----------+ |
1670 * | requested | | min node | ... | max node | |
1671 * +-----------+-----------+----------+----...----+----------+------+
1675 * +---------------------------------------------------+-----+------+
1677 * Expect to allocate a memory region at the end of the final node in
1678 * the range after falling back to NUMA_NO_NODE.
1680 static int alloc_try_nid_top_down_numa_no_overlap_low_check(void)
1683 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1684 struct memblock_region *min_node = &memblock.memory.regions[2];
1685 struct memblock_region *max_node = &memblock.memory.regions[5];
1686 void *allocated_ptr = NULL;
1687 phys_addr_t size = SZ_64;
1688 phys_addr_t max_addr;
1689 phys_addr_t min_addr;
1692 setup_numa_memblock(node_fractions);
1694 min_addr = min_node->base;
1695 max_addr = region_end(max_node);
1697 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1698 min_addr, max_addr, nid_req);
1700 ASSERT_NE(allocated_ptr, NULL);
1701 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1703 ASSERT_EQ(new_rgn->size, size);
1704 ASSERT_EQ(new_rgn->base, max_addr - size);
1705 ASSERT_LE(max_node->base, new_rgn->base);
1707 ASSERT_EQ(memblock.reserved.cnt, 1);
1708 ASSERT_EQ(memblock.reserved.total_size, size);
1716 * A test that tries to allocate memory within min_addr and max_add range when
1717 * the requested node and the range do not overlap, and requested node starts
1718 * after max_addr. The range overlaps with multiple nodes along node
1725 * | +----------+----...----+----------+ +-----------+ |
1726 * | | min node | ... | max node | | requested | |
1727 * +-----+----------+----...----+----------+--------+-----------+---+
1731 * +---------------------------------+-----+------------------------+
1733 * Expect to allocate a memory region at the end of the final node in
1734 * the range after falling back to NUMA_NO_NODE.
1736 static int alloc_try_nid_top_down_numa_no_overlap_high_check(void)
1739 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1740 struct memblock_region *min_node = &memblock.memory.regions[2];
1741 struct memblock_region *max_node = &memblock.memory.regions[5];
1742 void *allocated_ptr = NULL;
1743 phys_addr_t size = SZ_64;
1744 phys_addr_t max_addr;
1745 phys_addr_t min_addr;
1748 setup_numa_memblock(node_fractions);
1750 min_addr = min_node->base;
1751 max_addr = region_end(max_node);
1753 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1754 min_addr, max_addr, nid_req);
1756 ASSERT_NE(allocated_ptr, NULL);
1757 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1759 ASSERT_EQ(new_rgn->size, size);
1760 ASSERT_EQ(new_rgn->base, max_addr - size);
1761 ASSERT_LE(max_node->base, new_rgn->base);
1763 ASSERT_EQ(memblock.reserved.cnt, 1);
1764 ASSERT_EQ(memblock.reserved.total_size, size);
1772 * A test that tries to allocate a memory region in a specific NUMA node that
1773 * has enough memory to allocate a region of the requested size.
1774 * Expect to allocate an aligned region at the beginning of the requested node.
1776 static int alloc_try_nid_bottom_up_numa_simple_check(void)
1779 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1780 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1781 void *allocated_ptr = NULL;
1783 phys_addr_t min_addr;
1784 phys_addr_t max_addr;
1787 setup_numa_memblock(node_fractions);
1789 ASSERT_LE(SZ_4, req_node->size);
1790 size = req_node->size / SZ_4;
1791 min_addr = memblock_start_of_DRAM();
1792 max_addr = memblock_end_of_DRAM();
1794 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1795 min_addr, max_addr, nid_req);
1797 ASSERT_NE(allocated_ptr, NULL);
1798 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1800 ASSERT_EQ(new_rgn->size, size);
1801 ASSERT_EQ(new_rgn->base, req_node->base);
1802 ASSERT_LE(region_end(new_rgn), region_end(req_node));
1804 ASSERT_EQ(memblock.reserved.cnt, 1);
1805 ASSERT_EQ(memblock.reserved.total_size, size);
1813 * A test that tries to allocate a memory region in a specific NUMA node that
1814 * does not have enough memory to allocate a region of the requested size:
1816 * |----------------------+-----+ |
1817 * | expected | req | |
1818 * +----------------------+-----+----------------+
1822 * +---------+-----------------------------------+
1824 * Expect to allocate an aligned region at the beginning of the first node that
1825 * has enough memory (in this case, nid = 0) after falling back to NUMA_NO_NODE.
1827 static int alloc_try_nid_bottom_up_numa_small_node_check(void)
1831 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1832 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1833 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1834 void *allocated_ptr = NULL;
1836 phys_addr_t min_addr;
1837 phys_addr_t max_addr;
1840 setup_numa_memblock(node_fractions);
1842 size = SZ_2 * req_node->size;
1843 min_addr = memblock_start_of_DRAM();
1844 max_addr = memblock_end_of_DRAM();
1846 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1847 min_addr, max_addr, nid_req);
1849 ASSERT_NE(allocated_ptr, NULL);
1850 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1852 ASSERT_EQ(new_rgn->size, size);
1853 ASSERT_EQ(new_rgn->base, exp_node->base);
1854 ASSERT_LE(region_end(new_rgn), region_end(exp_node));
1856 ASSERT_EQ(memblock.reserved.cnt, 1);
1857 ASSERT_EQ(memblock.reserved.total_size, size);
1865 * A test that tries to allocate a memory region in a specific NUMA node that
1866 * is fully reserved:
1868 * |----------------------+ +-----------+ |
1869 * | expected | | requested | |
1870 * +----------------------+-----+-----------+--------------------+
1872 * |-----------+ +-----------+ |
1873 * | new | | reserved | |
1874 * +-----------+----------------+-----------+--------------------+
1876 * Expect to allocate an aligned region at the beginning of the first node that
1877 * is large enough and has enough unreserved memory (in this case, nid = 0)
1878 * after falling back to NUMA_NO_NODE. The region count and total size get
1881 static int alloc_try_nid_bottom_up_numa_node_reserved_check(void)
1885 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1886 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1887 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1888 void *allocated_ptr = NULL;
1890 phys_addr_t min_addr;
1891 phys_addr_t max_addr;
1894 setup_numa_memblock(node_fractions);
1896 size = req_node->size;
1897 min_addr = memblock_start_of_DRAM();
1898 max_addr = memblock_end_of_DRAM();
1900 memblock_reserve(req_node->base, req_node->size);
1901 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1902 min_addr, max_addr, nid_req);
1904 ASSERT_NE(allocated_ptr, NULL);
1905 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1907 ASSERT_EQ(new_rgn->size, size);
1908 ASSERT_EQ(new_rgn->base, exp_node->base);
1909 ASSERT_LE(region_end(new_rgn), region_end(exp_node));
1911 ASSERT_EQ(memblock.reserved.cnt, 2);
1912 ASSERT_EQ(memblock.reserved.total_size, size + req_node->size);
1920 * A test that tries to allocate a memory region in a specific NUMA node that
1921 * is partially reserved but has enough memory for the allocated region:
1923 * | +---------------------------------------+ |
1925 * +-----------+---------------------------------------+---------+
1927 * | +------------------+-----+ |
1928 * | | reserved | new | |
1929 * +-----------+------------------+-----+------------------------+
1931 * Expect to allocate an aligned region in the requested node that merges with
1932 * the existing reserved region. The total size gets updated.
1934 static int alloc_try_nid_bottom_up_numa_part_reserved_check(void)
1937 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1938 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1939 void *allocated_ptr = NULL;
1942 phys_addr_t min_addr;
1943 phys_addr_t max_addr;
1944 phys_addr_t total_size;
1947 setup_numa_memblock(node_fractions);
1949 ASSERT_LE(SZ_8, req_node->size);
1950 r1.base = req_node->base;
1951 r1.size = req_node->size / SZ_2;
1952 size = r1.size / SZ_4;
1953 min_addr = memblock_start_of_DRAM();
1954 max_addr = memblock_end_of_DRAM();
1955 total_size = size + r1.size;
1957 memblock_reserve(r1.base, r1.size);
1958 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1959 min_addr, max_addr, nid_req);
1961 ASSERT_NE(allocated_ptr, NULL);
1962 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1964 ASSERT_EQ(new_rgn->size, total_size);
1965 ASSERT_EQ(new_rgn->base, req_node->base);
1966 ASSERT_LE(region_end(new_rgn), region_end(req_node));
1968 ASSERT_EQ(memblock.reserved.cnt, 1);
1969 ASSERT_EQ(memblock.reserved.total_size, total_size);
1977 * A test that tries to allocate a memory region in a specific NUMA node that
1978 * is partially reserved and does not have enough contiguous memory for the
1981 * |----------------------+ +-----------------------+ |
1982 * | expected | | requested | |
1983 * +----------------------+-------+-----------------------+---------+
1985 * |-----------+ +----------+ |
1986 * | new | | reserved | |
1987 * +-----------+------------------------+----------+----------------+
1989 * Expect to allocate an aligned region at the beginning of the first
1990 * node that is large enough and has enough unreserved memory (in this case,
1991 * nid = 0) after falling back to NUMA_NO_NODE. The region count and total size
1994 static int alloc_try_nid_bottom_up_numa_part_reserved_fallback_check(void)
1998 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1999 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2000 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
2001 void *allocated_ptr = NULL;
2004 phys_addr_t min_addr;
2005 phys_addr_t max_addr;
2008 setup_numa_memblock(node_fractions);
2010 ASSERT_LE(SZ_4, req_node->size);
2011 size = req_node->size / SZ_2;
2012 r1.base = req_node->base + (size / SZ_2);
2015 min_addr = memblock_start_of_DRAM();
2016 max_addr = memblock_end_of_DRAM();
2018 memblock_reserve(r1.base, r1.size);
2019 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2020 min_addr, max_addr, nid_req);
2022 ASSERT_NE(allocated_ptr, NULL);
2023 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2025 ASSERT_EQ(new_rgn->size, size);
2026 ASSERT_EQ(new_rgn->base, exp_node->base);
2027 ASSERT_LE(region_end(new_rgn), region_end(exp_node));
2029 ASSERT_EQ(memblock.reserved.cnt, 2);
2030 ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
2038 * A test that tries to allocate a memory region that spans over the min_addr
2039 * and max_addr range and overlaps with two different nodes, where the first
2040 * node is the requested node:
2046 * | +-----------------------+-----------+ |
2047 * | | requested | node3 | |
2048 * +-----------+-----------------------+-----------+--------------+
2052 * +-----------+-----------+--------------------------------------+
2054 * Expect to drop the lower limit and allocate a memory region at the beginning
2055 * of the requested node.
2057 static int alloc_try_nid_bottom_up_numa_split_range_low_check(void)
2060 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2061 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2062 void *allocated_ptr = NULL;
2063 phys_addr_t size = SZ_512;
2064 phys_addr_t min_addr;
2065 phys_addr_t max_addr;
2066 phys_addr_t req_node_end;
2069 setup_numa_memblock(node_fractions);
2071 req_node_end = region_end(req_node);
2072 min_addr = req_node_end - SZ_256;
2073 max_addr = min_addr + size;
2075 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2076 min_addr, max_addr, nid_req);
2078 ASSERT_NE(allocated_ptr, NULL);
2079 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2081 ASSERT_EQ(new_rgn->size, size);
2082 ASSERT_EQ(new_rgn->base, req_node->base);
2083 ASSERT_LE(region_end(new_rgn), req_node_end);
2085 ASSERT_EQ(memblock.reserved.cnt, 1);
2086 ASSERT_EQ(memblock.reserved.total_size, size);
2094 * A test that tries to allocate a memory region that spans over the min_addr
2095 * and max_addr range and overlaps with two different nodes, where the second
2096 * node is the requested node:
2102 * |------------------+ +----------------------+---------+ |
2103 * | expected | | previous |requested| |
2104 * +------------------+--------+----------------------+---------+------+
2108 * +---------+---------------------------------------------------------+
2110 * Expect to drop the lower limit and allocate a memory region at the beginning
2111 * of the first node that has enough memory.
2113 static int alloc_try_nid_bottom_up_numa_split_range_high_check(void)
2117 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2118 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2119 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
2120 void *allocated_ptr = NULL;
2121 phys_addr_t size = SZ_512;
2122 phys_addr_t min_addr;
2123 phys_addr_t max_addr;
2124 phys_addr_t exp_node_end;
2127 setup_numa_memblock(node_fractions);
2129 exp_node_end = region_end(req_node);
2130 min_addr = req_node->base - SZ_256;
2131 max_addr = min_addr + size;
2133 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2134 min_addr, max_addr, nid_req);
2136 ASSERT_NE(allocated_ptr, NULL);
2137 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2139 ASSERT_EQ(new_rgn->size, size);
2140 ASSERT_EQ(new_rgn->base, exp_node->base);
2141 ASSERT_LE(region_end(new_rgn), exp_node_end);
2143 ASSERT_EQ(memblock.reserved.cnt, 1);
2144 ASSERT_EQ(memblock.reserved.total_size, size);
2152 * A test that tries to allocate a memory region that spans over the min_addr
2153 * and max_addr range and overlaps with two different nodes, where the requested
2154 * node ends before min_addr:
2160 * | +---------------+ +-------------+---------+ |
2161 * | | requested | | node1 | node2 | |
2162 * +----+---------------+--------+-------------+---------+---------+
2166 * +----+---------+------------------------------------------------+
2168 * Expect to drop the lower limit and allocate a memory region that starts at
2169 * the beginning of the requested node.
2171 static int alloc_try_nid_bottom_up_numa_no_overlap_split_check(void)
2174 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2175 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2176 struct memblock_region *node2 = &memblock.memory.regions[6];
2177 void *allocated_ptr = NULL;
2179 phys_addr_t min_addr;
2180 phys_addr_t max_addr;
2183 setup_numa_memblock(node_fractions);
2186 min_addr = node2->base - SZ_256;
2187 max_addr = min_addr + size;
2189 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2190 min_addr, max_addr, nid_req);
2192 ASSERT_NE(allocated_ptr, NULL);
2193 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2195 ASSERT_EQ(new_rgn->size, size);
2196 ASSERT_EQ(new_rgn->base, req_node->base);
2197 ASSERT_LE(region_end(new_rgn), region_end(req_node));
2199 ASSERT_EQ(memblock.reserved.cnt, 1);
2200 ASSERT_EQ(memblock.reserved.total_size, size);
2208 * A test that tries to allocate memory within min_addr and max_add range when
2209 * the requested node and the range do not overlap, and requested node ends
2210 * before min_addr. The range overlaps with multiple nodes along node
2217 * |-----------+ +----------+----...----+----------+ |
2218 * | requested | | min node | ... | max node | |
2219 * +-----------+-----------+----------+----...----+----------+------+
2223 * +-----------------------+-----+----------------------------------+
2225 * Expect to allocate a memory region at the beginning of the first node
2226 * in the range after falling back to NUMA_NO_NODE.
2228 static int alloc_try_nid_bottom_up_numa_no_overlap_low_check(void)
2231 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2232 struct memblock_region *min_node = &memblock.memory.regions[2];
2233 struct memblock_region *max_node = &memblock.memory.regions[5];
2234 void *allocated_ptr = NULL;
2235 phys_addr_t size = SZ_64;
2236 phys_addr_t max_addr;
2237 phys_addr_t min_addr;
2240 setup_numa_memblock(node_fractions);
2242 min_addr = min_node->base;
2243 max_addr = region_end(max_node);
2245 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2246 min_addr, max_addr, nid_req);
2248 ASSERT_NE(allocated_ptr, NULL);
2249 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2251 ASSERT_EQ(new_rgn->size, size);
2252 ASSERT_EQ(new_rgn->base, min_addr);
2253 ASSERT_LE(region_end(new_rgn), region_end(min_node));
2255 ASSERT_EQ(memblock.reserved.cnt, 1);
2256 ASSERT_EQ(memblock.reserved.total_size, size);
2264 * A test that tries to allocate memory within min_addr and max_add range when
2265 * the requested node and the range do not overlap, and requested node starts
2266 * after max_addr. The range overlaps with multiple nodes along node
2273 * | +----------+----...----+----------+ +---------+ |
2274 * | | min node | ... | max node | |requested| |
2275 * +-----+----------+----...----+----------+---------+---------+---+
2279 * +-----+-----+---------------------------------------------------+
2281 * Expect to allocate a memory region at the beginning of the first node
2282 * in the range after falling back to NUMA_NO_NODE.
2284 static int alloc_try_nid_bottom_up_numa_no_overlap_high_check(void)
2287 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2288 struct memblock_region *min_node = &memblock.memory.regions[2];
2289 struct memblock_region *max_node = &memblock.memory.regions[5];
2290 void *allocated_ptr = NULL;
2291 phys_addr_t size = SZ_64;
2292 phys_addr_t max_addr;
2293 phys_addr_t min_addr;
2296 setup_numa_memblock(node_fractions);
2298 min_addr = min_node->base;
2299 max_addr = region_end(max_node);
2301 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2302 min_addr, max_addr, nid_req);
2304 ASSERT_NE(allocated_ptr, NULL);
2305 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2307 ASSERT_EQ(new_rgn->size, size);
2308 ASSERT_EQ(new_rgn->base, min_addr);
2309 ASSERT_LE(region_end(new_rgn), region_end(min_node));
2311 ASSERT_EQ(memblock.reserved.cnt, 1);
2312 ASSERT_EQ(memblock.reserved.total_size, size);
2320 * A test that tries to allocate a memory region in a specific NUMA node that
2321 * does not have enough memory to allocate a region of the requested size.
2322 * Additionally, none of the nodes have enough memory to allocate the region:
2324 * +-----------------------------------+
2326 * +-----------------------------------+
2327 * |-------+-------+-------+-------+-------+-------+-------+-------|
2328 * | node0 | node1 | node2 | node3 | node4 | node5 | node6 | node7 |
2329 * +-------+-------+-------+-------+-------+-------+-------+-------+
2331 * Expect no allocation to happen.
2333 static int alloc_try_nid_numa_large_region_generic_check(void)
2336 void *allocated_ptr = NULL;
2337 phys_addr_t size = MEM_SIZE / SZ_2;
2338 phys_addr_t min_addr;
2339 phys_addr_t max_addr;
2342 setup_numa_memblock(node_fractions);
2344 min_addr = memblock_start_of_DRAM();
2345 max_addr = memblock_end_of_DRAM();
2347 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2348 min_addr, max_addr, nid_req);
2349 ASSERT_EQ(allocated_ptr, NULL);
2357 * A test that tries to allocate memory within min_addr and max_addr range when
2358 * there are two reserved regions at the borders. The requested node starts at
2359 * min_addr and ends at max_addr and is the same size as the region to be
2366 * | +-----------+-----------------------+-----------------------|
2367 * | | node5 | requested | node7 |
2368 * +------+-----------+-----------------------+-----------------------+
2370 * | +----+-----------------------+----+ |
2371 * | | r2 | new | r1 | |
2372 * +-------------+----+-----------------------+----+------------------+
2374 * Expect to merge all of the regions into one. The region counter and total
2375 * size fields get updated.
2377 static int alloc_try_nid_numa_reserved_full_merge_generic_check(void)
2380 int nid_next = nid_req + 1;
2381 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2382 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2383 struct memblock_region *next_node = &memblock.memory.regions[nid_next];
2384 void *allocated_ptr = NULL;
2385 struct region r1, r2;
2386 phys_addr_t size = req_node->size;
2387 phys_addr_t total_size;
2388 phys_addr_t max_addr;
2389 phys_addr_t min_addr;
2392 setup_numa_memblock(node_fractions);
2394 r1.base = next_node->base;
2398 r2.base = r1.base - (size + r2.size);
2400 total_size = r1.size + r2.size + size;
2401 min_addr = r2.base + r2.size;
2404 memblock_reserve(r1.base, r1.size);
2405 memblock_reserve(r2.base, r2.size);
2407 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2408 min_addr, max_addr, nid_req);
2410 ASSERT_NE(allocated_ptr, NULL);
2411 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2413 ASSERT_EQ(new_rgn->size, total_size);
2414 ASSERT_EQ(new_rgn->base, r2.base);
2416 ASSERT_LE(new_rgn->base, req_node->base);
2417 ASSERT_LE(region_end(req_node), region_end(new_rgn));
2419 ASSERT_EQ(memblock.reserved.cnt, 1);
2420 ASSERT_EQ(memblock.reserved.total_size, total_size);
2428 * A test that tries to allocate memory within min_addr and max_add range,
2429 * where the total range can fit the region, but it is split between two nodes
2430 * and everything else is reserved. Additionally, nid is set to NUMA_NO_NODE
2431 * instead of requesting a specific node:
2436 * | +---------------------+-----------|
2437 * | | prev node | next node |
2438 * +------+---------------------+-----------+
2440 * |----------------------+ +-----|
2442 * +----------------------+-----------+-----+
2449 * Expect no allocation to happen.
2451 static int alloc_try_nid_numa_split_all_reserved_generic_check(void)
2453 void *allocated_ptr = NULL;
2454 struct memblock_region *next_node = &memblock.memory.regions[7];
2455 struct region r1, r2;
2456 phys_addr_t size = SZ_256;
2457 phys_addr_t max_addr;
2458 phys_addr_t min_addr;
2461 setup_numa_memblock(node_fractions);
2463 r2.base = next_node->base + SZ_128;
2464 r2.size = memblock_end_of_DRAM() - r2.base;
2466 r1.size = MEM_SIZE - (r2.size + size);
2467 r1.base = memblock_start_of_DRAM();
2469 min_addr = r1.base + r1.size;
2472 memblock_reserve(r1.base, r1.size);
2473 memblock_reserve(r2.base, r2.size);
2475 allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
2479 ASSERT_EQ(allocated_ptr, NULL);
2486 /* Test case wrappers for NUMA tests */
2487 static int alloc_try_nid_numa_simple_check(void)
2489 test_print("\tRunning %s...\n", __func__);
2490 memblock_set_bottom_up(false);
2491 alloc_try_nid_top_down_numa_simple_check();
2492 memblock_set_bottom_up(true);
2493 alloc_try_nid_bottom_up_numa_simple_check();
2498 static int alloc_try_nid_numa_small_node_check(void)
2500 test_print("\tRunning %s...\n", __func__);
2501 memblock_set_bottom_up(false);
2502 alloc_try_nid_top_down_numa_small_node_check();
2503 memblock_set_bottom_up(true);
2504 alloc_try_nid_bottom_up_numa_small_node_check();
2509 static int alloc_try_nid_numa_node_reserved_check(void)
2511 test_print("\tRunning %s...\n", __func__);
2512 memblock_set_bottom_up(false);
2513 alloc_try_nid_top_down_numa_node_reserved_check();
2514 memblock_set_bottom_up(true);
2515 alloc_try_nid_bottom_up_numa_node_reserved_check();
2520 static int alloc_try_nid_numa_part_reserved_check(void)
2522 test_print("\tRunning %s...\n", __func__);
2523 memblock_set_bottom_up(false);
2524 alloc_try_nid_top_down_numa_part_reserved_check();
2525 memblock_set_bottom_up(true);
2526 alloc_try_nid_bottom_up_numa_part_reserved_check();
2531 static int alloc_try_nid_numa_part_reserved_fallback_check(void)
2533 test_print("\tRunning %s...\n", __func__);
2534 memblock_set_bottom_up(false);
2535 alloc_try_nid_top_down_numa_part_reserved_fallback_check();
2536 memblock_set_bottom_up(true);
2537 alloc_try_nid_bottom_up_numa_part_reserved_fallback_check();
2542 static int alloc_try_nid_numa_split_range_low_check(void)
2544 test_print("\tRunning %s...\n", __func__);
2545 memblock_set_bottom_up(false);
2546 alloc_try_nid_top_down_numa_split_range_low_check();
2547 memblock_set_bottom_up(true);
2548 alloc_try_nid_bottom_up_numa_split_range_low_check();
2553 static int alloc_try_nid_numa_split_range_high_check(void)
2555 test_print("\tRunning %s...\n", __func__);
2556 memblock_set_bottom_up(false);
2557 alloc_try_nid_top_down_numa_split_range_high_check();
2558 memblock_set_bottom_up(true);
2559 alloc_try_nid_bottom_up_numa_split_range_high_check();
2564 static int alloc_try_nid_numa_no_overlap_split_check(void)
2566 test_print("\tRunning %s...\n", __func__);
2567 memblock_set_bottom_up(false);
2568 alloc_try_nid_top_down_numa_no_overlap_split_check();
2569 memblock_set_bottom_up(true);
2570 alloc_try_nid_bottom_up_numa_no_overlap_split_check();
2575 static int alloc_try_nid_numa_no_overlap_low_check(void)
2577 test_print("\tRunning %s...\n", __func__);
2578 memblock_set_bottom_up(false);
2579 alloc_try_nid_top_down_numa_no_overlap_low_check();
2580 memblock_set_bottom_up(true);
2581 alloc_try_nid_bottom_up_numa_no_overlap_low_check();
2586 static int alloc_try_nid_numa_no_overlap_high_check(void)
2588 test_print("\tRunning %s...\n", __func__);
2589 memblock_set_bottom_up(false);
2590 alloc_try_nid_top_down_numa_no_overlap_high_check();
2591 memblock_set_bottom_up(true);
2592 alloc_try_nid_bottom_up_numa_no_overlap_high_check();
2597 static int alloc_try_nid_numa_large_region_check(void)
2599 test_print("\tRunning %s...\n", __func__);
2600 run_top_down(alloc_try_nid_numa_large_region_generic_check);
2601 run_bottom_up(alloc_try_nid_numa_large_region_generic_check);
2606 static int alloc_try_nid_numa_reserved_full_merge_check(void)
2608 test_print("\tRunning %s...\n", __func__);
2609 run_top_down(alloc_try_nid_numa_reserved_full_merge_generic_check);
2610 run_bottom_up(alloc_try_nid_numa_reserved_full_merge_generic_check);
2615 static int alloc_try_nid_numa_split_all_reserved_check(void)
2617 test_print("\tRunning %s...\n", __func__);
2618 run_top_down(alloc_try_nid_numa_split_all_reserved_generic_check);
2619 run_bottom_up(alloc_try_nid_numa_split_all_reserved_generic_check);
2624 int __memblock_alloc_nid_numa_checks(void)
2626 test_print("Running %s NUMA tests...\n",
2627 get_memblock_alloc_try_nid_name(alloc_nid_test_flags));
2629 alloc_try_nid_numa_simple_check();
2630 alloc_try_nid_numa_small_node_check();
2631 alloc_try_nid_numa_node_reserved_check();
2632 alloc_try_nid_numa_part_reserved_check();
2633 alloc_try_nid_numa_part_reserved_fallback_check();
2634 alloc_try_nid_numa_split_range_low_check();
2635 alloc_try_nid_numa_split_range_high_check();
2637 alloc_try_nid_numa_no_overlap_split_check();
2638 alloc_try_nid_numa_no_overlap_low_check();
2639 alloc_try_nid_numa_no_overlap_high_check();
2640 alloc_try_nid_numa_large_region_check();
2641 alloc_try_nid_numa_reserved_full_merge_check();
2642 alloc_try_nid_numa_split_all_reserved_check();
2647 static int memblock_alloc_nid_checks_internal(int flags)
2649 alloc_nid_test_flags = flags;
2652 prefix_push(get_memblock_alloc_try_nid_name(flags));
2654 reset_memblock_attributes();
2655 dummy_physical_memory_init();
2657 memblock_alloc_nid_range_checks();
2658 memblock_alloc_nid_numa_checks();
2660 dummy_physical_memory_cleanup();
2667 int memblock_alloc_nid_checks(void)
2669 memblock_alloc_nid_checks_internal(TEST_F_NONE);
2670 memblock_alloc_nid_checks_internal(TEST_F_RAW);