Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'memblock-v6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock

Pull memblock updates from Mike Rapoport:
"Test suite improvements:

- Added verification that memblock allocations zero the allocated
memory

- Added more test cases for memblock_add(), memblock_remove(),
memblock_reserve() and memblock_free()

- Added tests for memblock_*_raw() family

- Added tests for NUMA-aware allocations in memblock_alloc_try_nid()
and memblock_alloc_try_nid_raw()"

* tag 'memblock-v6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock:
memblock tests: add generic NUMA tests for memblock_alloc_try_nid*
memblock tests: add bottom-up NUMA tests for memblock_alloc_try_nid*
memblock tests: add top-down NUMA tests for memblock_alloc_try_nid*
memblock tests: add simulation of physical memory with multiple NUMA nodes
memblock_tests: move variable declarations to single block
memblock tests: remove 'cleared' from comment blocks
memblock tests: add tests for memblock_trim_memory
memblock tests: add tests for memblock_*bottom_up functions
memblock tests: update alloc_nid_api to test memblock_alloc_try_nid_raw
memblock tests: update alloc_api to test memblock_alloc_raw
memblock tests: add additional tests for basic api and memblock_alloc
memblock tests: add labels to verbose output for generic alloc tests
memblock tests: update zeroed memory check for memblock_alloc_* tests
memblock tests: update tests to check if memblock_alloc zeroed memory
memblock tests: update reference to obsolete build option in comments
memblock tests: add command line help option

+2663 -337
+1 -1
tools/testing/memblock/scripts/Makefile.include
··· 3 3 4 4 # Simulate CONFIG_NUMA=y 5 5 ifeq ($(NUMA), 1) 6 - CFLAGS += -D CONFIG_NUMA 6 + CFLAGS += -D CONFIG_NUMA -D CONFIG_NODES_SHIFT=4 7 7 endif 8 8 9 9 # Use 32 bit physical addresses.
+138 -87
tools/testing/memblock/tests/alloc_api.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 2 #include "alloc_api.h" 3 3 4 + static int alloc_test_flags = TEST_F_NONE; 5 + 6 + static inline const char * const get_memblock_alloc_name(int flags) 7 + { 8 + if (flags & TEST_F_RAW) 9 + return "memblock_alloc_raw"; 10 + return "memblock_alloc"; 11 + } 12 + 13 + static inline void *run_memblock_alloc(phys_addr_t size, phys_addr_t align) 14 + { 15 + if (alloc_test_flags & TEST_F_RAW) 16 + return memblock_alloc_raw(size, align); 17 + return memblock_alloc(size, align); 18 + } 19 + 4 20 /* 5 21 * A simple test that tries to allocate a small memory region. 6 22 * Expect to allocate an aligned region near the end of the available memory. ··· 25 9 { 26 10 struct memblock_region *rgn = &memblock.reserved.regions[0]; 27 11 void *allocated_ptr = NULL; 28 - 29 - PREFIX_PUSH(); 30 - 31 12 phys_addr_t size = SZ_2; 32 13 phys_addr_t expected_start; 33 14 15 + PREFIX_PUSH(); 34 16 setup_memblock(); 35 17 36 18 expected_start = memblock_end_of_DRAM() - SMP_CACHE_BYTES; 37 19 38 - allocated_ptr = memblock_alloc(size, SMP_CACHE_BYTES); 20 + allocated_ptr = run_memblock_alloc(size, SMP_CACHE_BYTES); 39 21 40 22 ASSERT_NE(allocated_ptr, NULL); 23 + assert_mem_content(allocated_ptr, size, alloc_test_flags); 24 + 41 25 ASSERT_EQ(rgn->size, size); 42 26 ASSERT_EQ(rgn->base, expected_start); 43 27 ··· 74 58 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; 75 59 struct region r1; 76 60 void *allocated_ptr = NULL; 77 - 78 - PREFIX_PUSH(); 79 - 80 61 phys_addr_t r2_size = SZ_16; 81 62 /* Use custom alignment */ 82 63 phys_addr_t alignment = SMP_CACHE_BYTES * 2; 83 64 phys_addr_t total_size; 84 65 phys_addr_t expected_start; 85 66 67 + PREFIX_PUSH(); 86 68 setup_memblock(); 87 69 88 70 r1.base = memblock_end_of_DRAM() - SZ_2; ··· 91 77 92 78 memblock_reserve(r1.base, r1.size); 93 79 94 - allocated_ptr = memblock_alloc(r2_size, alignment); 80 + allocated_ptr = run_memblock_alloc(r2_size, alignment); 95 81 96 82 ASSERT_NE(allocated_ptr, NULL); 83 + assert_mem_content(allocated_ptr, r2_size, alloc_test_flags); 84 + 97 85 ASSERT_EQ(rgn1->size, r1.size); 98 86 ASSERT_EQ(rgn1->base, r1.base); 99 87 ··· 124 108 { 125 109 struct memblock_region *rgn = &memblock.reserved.regions[0]; 126 110 void *allocated_ptr = NULL; 127 - 128 - PREFIX_PUSH(); 129 - 130 111 /* 131 112 * The first region ends at the aligned address to test region merging 132 113 */ ··· 131 118 phys_addr_t r2_size = SZ_512; 132 119 phys_addr_t total_size = r1_size + r2_size; 133 120 121 + PREFIX_PUSH(); 134 122 setup_memblock(); 135 123 136 124 memblock_reserve(memblock_end_of_DRAM() - total_size, r1_size); 137 125 138 - allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES); 126 + allocated_ptr = run_memblock_alloc(r2_size, SMP_CACHE_BYTES); 139 127 140 128 ASSERT_NE(allocated_ptr, NULL); 129 + assert_mem_content(allocated_ptr, r2_size, alloc_test_flags); 130 + 141 131 ASSERT_EQ(rgn->size, total_size); 142 132 ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - total_size); 143 133 ··· 168 152 struct memblock_region *rgn = &memblock.reserved.regions[0]; 169 153 struct region r1; 170 154 void *allocated_ptr = NULL; 171 - 172 - PREFIX_PUSH(); 173 - 174 155 phys_addr_t r2_size = SZ_512; 175 156 phys_addr_t total_size; 176 157 158 + PREFIX_PUSH(); 177 159 setup_memblock(); 178 160 179 161 /* ··· 184 170 185 171 memblock_reserve(r1.base, r1.size); 186 172 187 - allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES); 173 + allocated_ptr = run_memblock_alloc(r2_size, SMP_CACHE_BYTES); 188 174 189 175 ASSERT_NE(allocated_ptr, NULL); 176 + assert_mem_content(allocated_ptr, r2_size, alloc_test_flags); 177 + 190 178 ASSERT_EQ(rgn->size, total_size); 191 179 ASSERT_EQ(rgn->base, r1.base - r2_size); 192 180 ··· 217 201 struct memblock_region *rgn = &memblock.reserved.regions[0]; 218 202 struct region r1, r2; 219 203 void *allocated_ptr = NULL; 220 - 221 - PREFIX_PUSH(); 222 - 223 204 phys_addr_t r3_size = SZ_1K; 224 205 phys_addr_t total_size; 225 206 207 + PREFIX_PUSH(); 226 208 setup_memblock(); 227 209 228 210 r1.base = memblock_end_of_DRAM() - SZ_512; ··· 234 220 memblock_reserve(r1.base, r1.size); 235 221 memblock_reserve(r2.base, r2.size); 236 222 237 - allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES); 223 + allocated_ptr = run_memblock_alloc(r3_size, SMP_CACHE_BYTES); 238 224 239 225 ASSERT_NE(allocated_ptr, NULL); 226 + assert_mem_content(allocated_ptr, r3_size, alloc_test_flags); 227 + 240 228 ASSERT_EQ(rgn->size, r2.size + r3_size); 241 229 ASSERT_EQ(rgn->base, r2.base - r3_size); 242 230 ··· 266 250 struct memblock_region *rgn = &memblock.reserved.regions[0]; 267 251 struct region r1, r2; 268 252 void *allocated_ptr = NULL; 269 - 270 - PREFIX_PUSH(); 271 - 272 253 phys_addr_t gap_size = SMP_CACHE_BYTES; 273 254 phys_addr_t r3_size = SZ_64; 274 255 /* ··· 274 261 phys_addr_t rgn_size = (MEM_SIZE - (2 * gap_size + r3_size)) / 2; 275 262 phys_addr_t total_size; 276 263 264 + PREFIX_PUSH(); 277 265 setup_memblock(); 278 266 279 267 r1.size = rgn_size; ··· 288 274 memblock_reserve(r1.base, r1.size); 289 275 memblock_reserve(r2.base, r2.size); 290 276 291 - allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES); 277 + allocated_ptr = run_memblock_alloc(r3_size, SMP_CACHE_BYTES); 292 278 293 279 ASSERT_NE(allocated_ptr, NULL); 280 + assert_mem_content(allocated_ptr, r3_size, alloc_test_flags); 281 + 294 282 ASSERT_EQ(rgn->size, total_size); 295 283 ASSERT_EQ(rgn->base, r1.base - r2.size - r3_size); 296 284 ··· 320 304 static int alloc_small_gaps_generic_check(void) 321 305 { 322 306 void *allocated_ptr = NULL; 323 - 324 - PREFIX_PUSH(); 325 - 326 307 phys_addr_t region_size = SZ_1K; 327 308 phys_addr_t gap_size = SZ_256; 328 309 phys_addr_t region_end; 329 310 311 + PREFIX_PUSH(); 330 312 setup_memblock(); 331 313 332 314 region_end = memblock_start_of_DRAM(); ··· 334 320 region_end += gap_size + region_size; 335 321 } 336 322 337 - allocated_ptr = memblock_alloc(region_size, SMP_CACHE_BYTES); 323 + allocated_ptr = run_memblock_alloc(region_size, SMP_CACHE_BYTES); 338 324 339 325 ASSERT_EQ(allocated_ptr, NULL); 340 326 ··· 352 338 void *allocated_ptr = NULL; 353 339 354 340 PREFIX_PUSH(); 355 - 356 341 setup_memblock(); 357 342 358 343 /* Simulate full memory */ 359 344 memblock_reserve(memblock_start_of_DRAM(), MEM_SIZE); 360 345 361 - allocated_ptr = memblock_alloc(SZ_256, SMP_CACHE_BYTES); 346 + allocated_ptr = run_memblock_alloc(SZ_256, SMP_CACHE_BYTES); 362 347 363 348 ASSERT_EQ(allocated_ptr, NULL); 364 349 ··· 382 369 static int alloc_no_space_generic_check(void) 383 370 { 384 371 void *allocated_ptr = NULL; 385 - 386 - PREFIX_PUSH(); 387 - 388 - setup_memblock(); 389 - 390 372 phys_addr_t available_size = SZ_256; 391 373 phys_addr_t reserved_size = MEM_SIZE - available_size; 374 + 375 + PREFIX_PUSH(); 376 + setup_memblock(); 392 377 393 378 /* Simulate almost-full memory */ 394 379 memblock_reserve(memblock_start_of_DRAM(), reserved_size); 395 380 396 - allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES); 381 + allocated_ptr = run_memblock_alloc(SZ_1K, SMP_CACHE_BYTES); 397 382 398 383 ASSERT_EQ(allocated_ptr, NULL); 399 384 ··· 415 404 { 416 405 struct memblock_region *rgn = &memblock.reserved.regions[0]; 417 406 void *allocated_ptr = NULL; 418 - 419 - PREFIX_PUSH(); 420 - 421 407 phys_addr_t available_size = SZ_256; 422 408 phys_addr_t reserved_size = MEM_SIZE - available_size; 423 409 410 + PREFIX_PUSH(); 424 411 setup_memblock(); 425 412 426 413 /* Simulate almost-full memory */ 427 414 memblock_reserve(memblock_start_of_DRAM(), reserved_size); 428 415 429 - allocated_ptr = memblock_alloc(available_size, SMP_CACHE_BYTES); 416 + allocated_ptr = run_memblock_alloc(available_size, SMP_CACHE_BYTES); 430 417 431 418 ASSERT_NE(allocated_ptr, NULL); 419 + assert_mem_content(allocated_ptr, available_size, alloc_test_flags); 420 + 432 421 ASSERT_EQ(rgn->size, MEM_SIZE); 433 422 ASSERT_EQ(rgn->base, memblock_start_of_DRAM()); 434 423 ··· 454 443 455 444 reset_memblock_regions(); 456 445 457 - allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES); 446 + allocated_ptr = run_memblock_alloc(SZ_1K, SMP_CACHE_BYTES); 447 + 448 + ASSERT_EQ(allocated_ptr, NULL); 449 + ASSERT_EQ(rgn->size, 0); 450 + ASSERT_EQ(rgn->base, 0); 451 + ASSERT_EQ(memblock.reserved.total_size, 0); 452 + 453 + test_pass_pop(); 454 + 455 + return 0; 456 + } 457 + 458 + /* 459 + * A test that tries to allocate a region that is larger than the total size of 460 + * available memory (memblock.memory): 461 + * 462 + * +-----------------------------------+ 463 + * | new | 464 + * +-----------------------------------+ 465 + * | | 466 + * | | 467 + * +---------------------------------+ 468 + * 469 + * Expect no allocation to happen. 470 + */ 471 + static int alloc_too_large_generic_check(void) 472 + { 473 + struct memblock_region *rgn = &memblock.reserved.regions[0]; 474 + void *allocated_ptr = NULL; 475 + 476 + PREFIX_PUSH(); 477 + setup_memblock(); 478 + 479 + allocated_ptr = run_memblock_alloc(MEM_SIZE + SZ_2, SMP_CACHE_BYTES); 458 480 459 481 ASSERT_EQ(allocated_ptr, NULL); 460 482 ASSERT_EQ(rgn->size, 0); ··· 510 466 void *allocated_ptr = NULL; 511 467 512 468 PREFIX_PUSH(); 513 - 514 469 setup_memblock(); 515 470 516 - allocated_ptr = memblock_alloc(SZ_2, SMP_CACHE_BYTES); 471 + allocated_ptr = run_memblock_alloc(SZ_2, SMP_CACHE_BYTES); 517 472 518 473 ASSERT_NE(allocated_ptr, NULL); 474 + assert_mem_content(allocated_ptr, SZ_2, alloc_test_flags); 475 + 519 476 ASSERT_EQ(rgn->size, SZ_2); 520 477 ASSERT_EQ(rgn->base, memblock_start_of_DRAM()); 521 478 ··· 551 506 struct memblock_region *rgn2 = &memblock.reserved.regions[1]; 552 507 struct region r1; 553 508 void *allocated_ptr = NULL; 554 - 555 - PREFIX_PUSH(); 556 - 557 509 phys_addr_t r2_size = SZ_16; 558 510 /* Use custom alignment */ 559 511 phys_addr_t alignment = SMP_CACHE_BYTES * 2; 560 512 phys_addr_t total_size; 561 513 phys_addr_t expected_start; 562 514 515 + PREFIX_PUSH(); 563 516 setup_memblock(); 564 517 565 518 r1.base = memblock_start_of_DRAM() + SZ_2; ··· 568 525 569 526 memblock_reserve(r1.base, r1.size); 570 527 571 - allocated_ptr = memblock_alloc(r2_size, alignment); 528 + allocated_ptr = run_memblock_alloc(r2_size, alignment); 572 529 573 530 ASSERT_NE(allocated_ptr, NULL); 531 + assert_mem_content(allocated_ptr, r2_size, alloc_test_flags); 574 532 575 533 ASSERT_EQ(rgn1->size, r1.size); 576 534 ASSERT_EQ(rgn1->base, r1.base); ··· 601 557 { 602 558 struct memblock_region *rgn = &memblock.reserved.regions[0]; 603 559 void *allocated_ptr = NULL; 604 - 605 - PREFIX_PUSH(); 606 - 607 560 phys_addr_t r1_size = SZ_512; 608 561 phys_addr_t r2_size = SZ_128; 609 562 phys_addr_t total_size = r1_size + r2_size; 610 563 564 + PREFIX_PUSH(); 611 565 setup_memblock(); 612 566 613 567 memblock_reserve(memblock_start_of_DRAM() + r1_size, r2_size); 614 568 615 - allocated_ptr = memblock_alloc(r1_size, SMP_CACHE_BYTES); 569 + allocated_ptr = run_memblock_alloc(r1_size, SMP_CACHE_BYTES); 616 570 617 571 ASSERT_NE(allocated_ptr, NULL); 572 + assert_mem_content(allocated_ptr, r1_size, alloc_test_flags); 573 + 618 574 ASSERT_EQ(rgn->size, total_size); 619 575 ASSERT_EQ(rgn->base, memblock_start_of_DRAM()); 620 576 ··· 641 597 struct memblock_region *rgn = &memblock.reserved.regions[0]; 642 598 struct region r1; 643 599 void *allocated_ptr = NULL; 644 - 645 - PREFIX_PUSH(); 646 - 647 600 phys_addr_t r2_size = SZ_512; 648 601 phys_addr_t total_size; 649 602 603 + PREFIX_PUSH(); 650 604 setup_memblock(); 651 605 652 606 /* ··· 657 615 658 616 memblock_reserve(r1.base, r1.size); 659 617 660 - allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES); 618 + allocated_ptr = run_memblock_alloc(r2_size, SMP_CACHE_BYTES); 661 619 662 620 ASSERT_NE(allocated_ptr, NULL); 621 + assert_mem_content(allocated_ptr, r2_size, alloc_test_flags); 622 + 663 623 ASSERT_EQ(rgn->size, total_size); 664 624 ASSERT_EQ(rgn->base, r1.base); 665 625 ··· 691 647 struct memblock_region *rgn = &memblock.reserved.regions[1]; 692 648 struct region r1, r2; 693 649 void *allocated_ptr = NULL; 694 - 695 - PREFIX_PUSH(); 696 - 697 650 phys_addr_t r3_size = SZ_1K; 698 651 phys_addr_t total_size; 699 652 653 + PREFIX_PUSH(); 700 654 setup_memblock(); 701 655 702 656 r1.base = memblock_start_of_DRAM(); ··· 708 666 memblock_reserve(r1.base, r1.size); 709 667 memblock_reserve(r2.base, r2.size); 710 668 711 - allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES); 669 + allocated_ptr = run_memblock_alloc(r3_size, SMP_CACHE_BYTES); 712 670 713 671 ASSERT_NE(allocated_ptr, NULL); 672 + assert_mem_content(allocated_ptr, r3_size, alloc_test_flags); 673 + 714 674 ASSERT_EQ(rgn->size, r2.size + r3_size); 715 675 ASSERT_EQ(rgn->base, r2.base); 716 676 ··· 772 728 static int alloc_in_between_check(void) 773 729 { 774 730 test_print("\tRunning %s...\n", __func__); 775 - memblock_set_bottom_up(false); 776 - alloc_in_between_generic_check(); 777 - memblock_set_bottom_up(true); 778 - alloc_in_between_generic_check(); 731 + run_top_down(alloc_in_between_generic_check); 732 + run_bottom_up(alloc_in_between_generic_check); 779 733 780 734 return 0; 781 735 } ··· 792 750 static int alloc_small_gaps_check(void) 793 751 { 794 752 test_print("\tRunning %s...\n", __func__); 795 - memblock_set_bottom_up(false); 796 - alloc_small_gaps_generic_check(); 797 - memblock_set_bottom_up(true); 798 - alloc_small_gaps_generic_check(); 753 + run_top_down(alloc_small_gaps_generic_check); 754 + run_bottom_up(alloc_small_gaps_generic_check); 799 755 800 756 return 0; 801 757 } ··· 801 761 static int alloc_all_reserved_check(void) 802 762 { 803 763 test_print("\tRunning %s...\n", __func__); 804 - memblock_set_bottom_up(false); 805 - alloc_all_reserved_generic_check(); 806 - memblock_set_bottom_up(true); 807 - alloc_all_reserved_generic_check(); 764 + run_top_down(alloc_all_reserved_generic_check); 765 + run_bottom_up(alloc_all_reserved_generic_check); 808 766 809 767 return 0; 810 768 } ··· 810 772 static int alloc_no_space_check(void) 811 773 { 812 774 test_print("\tRunning %s...\n", __func__); 813 - memblock_set_bottom_up(false); 814 - alloc_no_space_generic_check(); 815 - memblock_set_bottom_up(true); 816 - alloc_no_space_generic_check(); 775 + run_top_down(alloc_no_space_generic_check); 776 + run_bottom_up(alloc_no_space_generic_check); 817 777 818 778 return 0; 819 779 } ··· 819 783 static int alloc_limited_space_check(void) 820 784 { 821 785 test_print("\tRunning %s...\n", __func__); 822 - memblock_set_bottom_up(false); 823 - alloc_limited_space_generic_check(); 824 - memblock_set_bottom_up(true); 825 - alloc_limited_space_generic_check(); 786 + run_top_down(alloc_limited_space_generic_check); 787 + run_bottom_up(alloc_limited_space_generic_check); 826 788 827 789 return 0; 828 790 } ··· 828 794 static int alloc_no_memory_check(void) 829 795 { 830 796 test_print("\tRunning %s...\n", __func__); 831 - memblock_set_bottom_up(false); 832 - alloc_no_memory_generic_check(); 833 - memblock_set_bottom_up(true); 834 - alloc_no_memory_generic_check(); 797 + run_top_down(alloc_no_memory_generic_check); 798 + run_bottom_up(alloc_no_memory_generic_check); 835 799 836 800 return 0; 837 801 } 838 802 839 - int memblock_alloc_checks(void) 803 + static int alloc_too_large_check(void) 840 804 { 841 - const char *func_testing = "memblock_alloc"; 805 + test_print("\tRunning %s...\n", __func__); 806 + run_top_down(alloc_too_large_generic_check); 807 + run_bottom_up(alloc_too_large_generic_check); 842 808 809 + return 0; 810 + } 811 + 812 + static int memblock_alloc_checks_internal(int flags) 813 + { 814 + const char *func = get_memblock_alloc_name(flags); 815 + 816 + alloc_test_flags = flags; 843 817 prefix_reset(); 844 - prefix_push(func_testing); 845 - test_print("Running %s tests...\n", func_testing); 818 + prefix_push(func); 819 + test_print("Running %s tests...\n", func); 846 820 847 821 reset_memblock_attributes(); 848 822 dummy_physical_memory_init(); ··· 866 824 alloc_no_space_check(); 867 825 alloc_limited_space_check(); 868 826 alloc_no_memory_check(); 827 + alloc_too_large_check(); 869 828 870 829 dummy_physical_memory_cleanup(); 871 830 872 831 prefix_pop(); 832 + 833 + return 0; 834 + } 835 + 836 + int memblock_alloc_checks(void) 837 + { 838 + memblock_alloc_checks_internal(TEST_F_NONE); 839 + memblock_alloc_checks_internal(TEST_F_RAW); 873 840 874 841 return 0; 875 842 }
+14 -38
tools/testing/memblock/tests/alloc_helpers_api.c
··· 19 19 { 20 20 struct memblock_region *rgn = &memblock.reserved.regions[0]; 21 21 void *allocated_ptr = NULL; 22 - char *b; 23 - 24 - PREFIX_PUSH(); 25 - 26 22 phys_addr_t size = SZ_16; 27 23 phys_addr_t min_addr; 28 24 25 + PREFIX_PUSH(); 29 26 setup_memblock(); 30 27 31 28 min_addr = memblock_end_of_DRAM() - SMP_CACHE_BYTES; 32 29 33 30 allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr); 34 - b = (char *)allocated_ptr; 35 31 36 32 ASSERT_NE(allocated_ptr, NULL); 37 - ASSERT_EQ(*b, 0); 33 + ASSERT_MEM_EQ(allocated_ptr, 0, size); 38 34 39 35 ASSERT_EQ(rgn->size, size); 40 36 ASSERT_EQ(rgn->base, min_addr); ··· 62 66 { 63 67 struct memblock_region *rgn = &memblock.reserved.regions[0]; 64 68 void *allocated_ptr = NULL; 65 - char *b; 66 - 67 - PREFIX_PUSH(); 68 - 69 69 phys_addr_t size = SZ_32; 70 70 phys_addr_t min_addr; 71 71 72 + PREFIX_PUSH(); 72 73 setup_memblock(); 73 74 74 75 /* A misaligned address */ 75 76 min_addr = memblock_end_of_DRAM() - (SMP_CACHE_BYTES * 2 - 1); 76 77 77 78 allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr); 78 - b = (char *)allocated_ptr; 79 79 80 80 ASSERT_NE(allocated_ptr, NULL); 81 - ASSERT_EQ(*b, 0); 81 + ASSERT_MEM_EQ(allocated_ptr, 0, size); 82 82 83 83 ASSERT_EQ(rgn->size, size); 84 84 ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - SMP_CACHE_BYTES); ··· 109 117 { 110 118 struct memblock_region *rgn = &memblock.reserved.regions[0]; 111 119 void *allocated_ptr = NULL; 112 - 113 - PREFIX_PUSH(); 114 - 115 120 phys_addr_t size = SZ_32; 116 121 phys_addr_t min_addr; 117 122 123 + PREFIX_PUSH(); 118 124 setup_memblock(); 119 125 120 126 /* The address is too close to the end of the memory */ ··· 152 162 { 153 163 struct memblock_region *rgn = &memblock.reserved.regions[0]; 154 164 void *allocated_ptr = NULL; 155 - 156 - PREFIX_PUSH(); 157 - 158 165 phys_addr_t r1_size = SZ_64; 159 166 phys_addr_t r2_size = SZ_2; 160 167 phys_addr_t total_size = r1_size + r2_size; 161 168 phys_addr_t min_addr; 162 169 170 + PREFIX_PUSH(); 163 171 setup_memblock(); 164 172 165 173 min_addr = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; ··· 189 201 { 190 202 struct memblock_region *rgn = &memblock.reserved.regions[0]; 191 203 void *allocated_ptr = NULL; 192 - 193 - PREFIX_PUSH(); 194 - 195 204 phys_addr_t r1_size = SZ_64; 196 205 phys_addr_t min_addr; 197 206 phys_addr_t start_addr; 198 207 208 + PREFIX_PUSH(); 199 209 setup_memblock(); 200 210 201 211 start_addr = (phys_addr_t)memblock_start_of_DRAM(); ··· 235 249 { 236 250 struct memblock_region *rgn = &memblock.reserved.regions[0]; 237 251 void *allocated_ptr = NULL; 238 - 239 - PREFIX_PUSH(); 240 - 241 252 phys_addr_t size = SZ_32; 242 253 phys_addr_t min_addr; 243 254 255 + PREFIX_PUSH(); 244 256 setup_memblock(); 245 257 246 258 /* The address is too close to the end of the memory */ ··· 277 293 { 278 294 struct memblock_region *rgn = &memblock.reserved.regions[0]; 279 295 void *allocated_ptr = NULL; 280 - 281 - PREFIX_PUSH(); 282 - 283 296 phys_addr_t r1_size = SZ_64; 284 297 phys_addr_t min_addr; 285 298 phys_addr_t r2_size; 286 299 300 + PREFIX_PUSH(); 287 301 setup_memblock(); 288 302 289 303 min_addr = memblock_start_of_DRAM() + SZ_128; ··· 313 331 { 314 332 struct memblock_region *rgn = &memblock.reserved.regions[0]; 315 333 void *allocated_ptr = NULL; 316 - 317 - PREFIX_PUSH(); 318 - 319 334 phys_addr_t r1_size = SZ_64; 320 335 phys_addr_t min_addr; 321 336 phys_addr_t start_addr; 322 337 338 + PREFIX_PUSH(); 323 339 setup_memblock(); 324 340 325 341 start_addr = (phys_addr_t)memblock_start_of_DRAM(); ··· 341 361 static int alloc_from_simple_check(void) 342 362 { 343 363 test_print("\tRunning %s...\n", __func__); 344 - memblock_set_bottom_up(false); 345 - alloc_from_simple_generic_check(); 346 - memblock_set_bottom_up(true); 347 - alloc_from_simple_generic_check(); 364 + run_top_down(alloc_from_simple_generic_check); 365 + run_bottom_up(alloc_from_simple_generic_check); 348 366 349 367 return 0; 350 368 } ··· 350 372 static int alloc_from_misaligned_check(void) 351 373 { 352 374 test_print("\tRunning %s...\n", __func__); 353 - memblock_set_bottom_up(false); 354 - alloc_from_misaligned_generic_check(); 355 - memblock_set_bottom_up(true); 356 - alloc_from_misaligned_generic_check(); 375 + run_top_down(alloc_from_misaligned_generic_check); 376 + run_bottom_up(alloc_from_misaligned_generic_check); 357 377 358 378 return 0; 359 379 }
+1604 -206
tools/testing/memblock/tests/alloc_nid_api.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 2 #include "alloc_nid_api.h" 3 3 4 + static int alloc_nid_test_flags = TEST_F_NONE; 5 + 6 + /* 7 + * contains the fraction of MEM_SIZE contained in each node in basis point 8 + * units (one hundredth of 1% or 1/10000) 9 + */ 10 + static const unsigned int node_fractions[] = { 11 + 2500, /* 1/4 */ 12 + 625, /* 1/16 */ 13 + 1250, /* 1/8 */ 14 + 1250, /* 1/8 */ 15 + 625, /* 1/16 */ 16 + 625, /* 1/16 */ 17 + 2500, /* 1/4 */ 18 + 625, /* 1/16 */ 19 + }; 20 + 21 + static inline const char * const get_memblock_alloc_try_nid_name(int flags) 22 + { 23 + if (flags & TEST_F_RAW) 24 + return "memblock_alloc_try_nid_raw"; 25 + return "memblock_alloc_try_nid"; 26 + } 27 + 28 + static inline void *run_memblock_alloc_try_nid(phys_addr_t size, 29 + phys_addr_t align, 30 + phys_addr_t min_addr, 31 + phys_addr_t max_addr, int nid) 32 + { 33 + if (alloc_nid_test_flags & TEST_F_RAW) 34 + return memblock_alloc_try_nid_raw(size, align, min_addr, 35 + max_addr, nid); 36 + return memblock_alloc_try_nid(size, align, min_addr, max_addr, nid); 37 + } 38 + 4 39 /* 5 40 * A simple test that tries to allocate a memory region within min_addr and 6 41 * max_addr range: ··· 48 13 * | | 49 14 * min_addr max_addr 50 15 * 51 - * Expect to allocate a cleared region that ends at max_addr. 16 + * Expect to allocate a region that ends at max_addr. 52 17 */ 53 18 static int alloc_try_nid_top_down_simple_check(void) 54 19 { 55 20 struct memblock_region *rgn = &memblock.reserved.regions[0]; 56 21 void *allocated_ptr = NULL; 57 - char *b; 58 - 59 - PREFIX_PUSH(); 60 - 61 22 phys_addr_t size = SZ_128; 62 23 phys_addr_t min_addr; 63 24 phys_addr_t max_addr; 64 25 phys_addr_t rgn_end; 65 26 27 + PREFIX_PUSH(); 66 28 setup_memblock(); 67 29 68 30 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2; 69 31 max_addr = min_addr + SZ_512; 70 32 71 - allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 72 - min_addr, max_addr, NUMA_NO_NODE); 73 - b = (char *)allocated_ptr; 33 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 34 + min_addr, max_addr, 35 + NUMA_NO_NODE); 74 36 rgn_end = rgn->base + rgn->size; 75 37 76 38 ASSERT_NE(allocated_ptr, NULL); 77 - ASSERT_EQ(*b, 0); 39 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 78 40 79 41 ASSERT_EQ(rgn->size, size); 80 42 ASSERT_EQ(rgn->base, max_addr - size); ··· 100 68 * Aligned address 101 69 * boundary 102 70 * 103 - * Expect to allocate a cleared, aligned region that ends before max_addr. 71 + * Expect to allocate an aligned region that ends before max_addr. 104 72 */ 105 73 static int alloc_try_nid_top_down_end_misaligned_check(void) 106 74 { 107 75 struct memblock_region *rgn = &memblock.reserved.regions[0]; 108 76 void *allocated_ptr = NULL; 109 - char *b; 110 - 111 - PREFIX_PUSH(); 112 - 113 77 phys_addr_t size = SZ_128; 114 78 phys_addr_t misalign = SZ_2; 115 79 phys_addr_t min_addr; 116 80 phys_addr_t max_addr; 117 81 phys_addr_t rgn_end; 118 82 83 + PREFIX_PUSH(); 119 84 setup_memblock(); 120 85 121 86 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2; 122 87 max_addr = min_addr + SZ_512 + misalign; 123 88 124 - allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 125 - min_addr, max_addr, NUMA_NO_NODE); 126 - b = (char *)allocated_ptr; 89 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 90 + min_addr, max_addr, 91 + NUMA_NO_NODE); 127 92 rgn_end = rgn->base + rgn->size; 128 93 129 94 ASSERT_NE(allocated_ptr, NULL); 130 - ASSERT_EQ(*b, 0); 95 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 131 96 132 97 ASSERT_EQ(rgn->size, size); 133 98 ASSERT_EQ(rgn->base, max_addr - size - misalign); ··· 150 121 * | | 151 122 * min_addr max_addr 152 123 * 153 - * Expect to allocate a cleared region that starts at min_addr and ends at 124 + * Expect to allocate a region that starts at min_addr and ends at 154 125 * max_addr, given that min_addr is aligned. 155 126 */ 156 127 static int alloc_try_nid_exact_address_generic_check(void) 157 128 { 158 129 struct memblock_region *rgn = &memblock.reserved.regions[0]; 159 130 void *allocated_ptr = NULL; 160 - char *b; 161 - 162 - PREFIX_PUSH(); 163 - 164 131 phys_addr_t size = SZ_1K; 165 132 phys_addr_t min_addr; 166 133 phys_addr_t max_addr; 167 134 phys_addr_t rgn_end; 168 135 136 + PREFIX_PUSH(); 169 137 setup_memblock(); 170 138 171 139 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES; 172 140 max_addr = min_addr + size; 173 141 174 - allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 175 - min_addr, max_addr, NUMA_NO_NODE); 176 - b = (char *)allocated_ptr; 142 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 143 + min_addr, max_addr, 144 + NUMA_NO_NODE); 177 145 rgn_end = rgn->base + rgn->size; 178 146 179 147 ASSERT_NE(allocated_ptr, NULL); 180 - ASSERT_EQ(*b, 0); 148 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 181 149 182 150 ASSERT_EQ(rgn->size, size); 183 151 ASSERT_EQ(rgn->base, min_addr); ··· 202 176 * address | 203 177 * boundary min_add 204 178 * 205 - * Expect to drop the lower limit and allocate a cleared memory region which 179 + * Expect to drop the lower limit and allocate a memory region which 206 180 * ends at max_addr (if the address is aligned). 207 181 */ 208 182 static int alloc_try_nid_top_down_narrow_range_check(void) 209 183 { 210 184 struct memblock_region *rgn = &memblock.reserved.regions[0]; 211 185 void *allocated_ptr = NULL; 212 - char *b; 213 - 214 - PREFIX_PUSH(); 215 - 216 186 phys_addr_t size = SZ_256; 217 187 phys_addr_t min_addr; 218 188 phys_addr_t max_addr; 219 189 190 + PREFIX_PUSH(); 220 191 setup_memblock(); 221 192 222 193 min_addr = memblock_start_of_DRAM() + SZ_512; 223 194 max_addr = min_addr + SMP_CACHE_BYTES; 224 195 225 - allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 226 - min_addr, max_addr, NUMA_NO_NODE); 227 - b = (char *)allocated_ptr; 196 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 197 + min_addr, max_addr, 198 + NUMA_NO_NODE); 228 199 229 200 ASSERT_NE(allocated_ptr, NULL); 230 - ASSERT_EQ(*b, 0); 201 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 231 202 232 203 ASSERT_EQ(rgn->size, size); 233 204 ASSERT_EQ(rgn->base, max_addr - size); ··· 260 237 static int alloc_try_nid_low_max_generic_check(void) 261 238 { 262 239 void *allocated_ptr = NULL; 263 - 264 - PREFIX_PUSH(); 265 - 266 240 phys_addr_t size = SZ_1K; 267 241 phys_addr_t min_addr; 268 242 phys_addr_t max_addr; 269 243 244 + PREFIX_PUSH(); 270 245 setup_memblock(); 271 246 272 247 min_addr = memblock_start_of_DRAM(); 273 248 max_addr = min_addr + SMP_CACHE_BYTES; 274 249 275 - allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 276 - min_addr, max_addr, NUMA_NO_NODE); 250 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 251 + min_addr, max_addr, 252 + NUMA_NO_NODE); 277 253 278 254 ASSERT_EQ(allocated_ptr, NULL); 279 255 ··· 299 277 { 300 278 struct memblock_region *rgn = &memblock.reserved.regions[0]; 301 279 void *allocated_ptr = NULL; 302 - char *b; 303 - 304 - PREFIX_PUSH(); 305 - 306 280 phys_addr_t r1_size = SZ_128; 307 281 phys_addr_t r2_size = SZ_64; 308 282 phys_addr_t total_size = r1_size + r2_size; ··· 306 288 phys_addr_t max_addr; 307 289 phys_addr_t reserved_base; 308 290 291 + PREFIX_PUSH(); 309 292 setup_memblock(); 310 293 311 294 max_addr = memblock_end_of_DRAM(); ··· 315 296 316 297 memblock_reserve(reserved_base, r1_size); 317 298 318 - allocated_ptr = memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES, 319 - min_addr, max_addr, NUMA_NO_NODE); 320 - b = (char *)allocated_ptr; 299 + allocated_ptr = run_memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES, 300 + min_addr, max_addr, 301 + NUMA_NO_NODE); 321 302 322 303 ASSERT_NE(allocated_ptr, NULL); 323 - ASSERT_EQ(*b, 0); 304 + assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags); 324 305 325 306 ASSERT_EQ(rgn->size, total_size); 326 307 ASSERT_EQ(rgn->base, reserved_base); ··· 351 332 { 352 333 struct memblock_region *rgn = &memblock.reserved.regions[0]; 353 334 void *allocated_ptr = NULL; 354 - char *b; 355 - 356 - PREFIX_PUSH(); 357 - 358 335 phys_addr_t r1_size = SZ_64; 359 336 phys_addr_t r2_size = SZ_128; 360 337 phys_addr_t total_size = r1_size + r2_size; 361 338 phys_addr_t min_addr; 362 339 phys_addr_t max_addr; 363 340 341 + PREFIX_PUSH(); 364 342 setup_memblock(); 365 343 366 344 max_addr = memblock_end_of_DRAM() - r1_size; ··· 365 349 366 350 memblock_reserve(max_addr, r1_size); 367 351 368 - allocated_ptr = memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES, 369 - min_addr, max_addr, NUMA_NO_NODE); 370 - b = (char *)allocated_ptr; 352 + allocated_ptr = run_memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES, 353 + min_addr, max_addr, 354 + NUMA_NO_NODE); 371 355 372 356 ASSERT_NE(allocated_ptr, NULL); 373 - ASSERT_EQ(*b, 0); 357 + assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags); 374 358 375 359 ASSERT_EQ(rgn->size, total_size); 376 360 ASSERT_EQ(rgn->base, min_addr); ··· 405 389 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; 406 390 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; 407 391 void *allocated_ptr = NULL; 408 - char *b; 409 392 struct region r1, r2; 410 - 411 - PREFIX_PUSH(); 412 - 413 393 phys_addr_t r3_size = SZ_64; 414 394 phys_addr_t gap_size = SMP_CACHE_BYTES; 415 395 phys_addr_t total_size; 416 396 phys_addr_t max_addr; 417 397 phys_addr_t min_addr; 418 398 399 + PREFIX_PUSH(); 419 400 setup_memblock(); 420 401 421 402 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; ··· 428 415 memblock_reserve(r1.base, r1.size); 429 416 memblock_reserve(r2.base, r2.size); 430 417 431 - allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 432 - min_addr, max_addr, NUMA_NO_NODE); 433 - b = (char *)allocated_ptr; 418 + allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 419 + min_addr, max_addr, 420 + NUMA_NO_NODE); 434 421 435 422 ASSERT_NE(allocated_ptr, NULL); 436 - ASSERT_EQ(*b, 0); 423 + assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags); 437 424 438 425 ASSERT_EQ(rgn1->size, r1.size + r3_size); 439 426 ASSERT_EQ(rgn1->base, max_addr - r3_size); ··· 469 456 { 470 457 struct memblock_region *rgn = &memblock.reserved.regions[0]; 471 458 void *allocated_ptr = NULL; 472 - char *b; 473 459 struct region r1, r2; 474 - 475 - PREFIX_PUSH(); 476 - 477 460 phys_addr_t r3_size = SZ_64; 478 461 phys_addr_t total_size; 479 462 phys_addr_t max_addr; 480 463 phys_addr_t min_addr; 481 464 465 + PREFIX_PUSH(); 482 466 setup_memblock(); 483 467 484 468 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; ··· 491 481 memblock_reserve(r1.base, r1.size); 492 482 memblock_reserve(r2.base, r2.size); 493 483 494 - allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 495 - min_addr, max_addr, NUMA_NO_NODE); 496 - b = (char *)allocated_ptr; 484 + allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 485 + min_addr, max_addr, 486 + NUMA_NO_NODE); 497 487 498 488 ASSERT_NE(allocated_ptr, NULL); 499 - ASSERT_EQ(*b, 0); 489 + assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags); 500 490 501 491 ASSERT_EQ(rgn->size, total_size); 502 492 ASSERT_EQ(rgn->base, r2.base); ··· 532 522 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; 533 523 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; 534 524 void *allocated_ptr = NULL; 535 - char *b; 536 525 struct region r1, r2; 537 - 538 - PREFIX_PUSH(); 539 - 540 526 phys_addr_t r3_size = SZ_256; 541 527 phys_addr_t gap_size = SMP_CACHE_BYTES; 542 528 phys_addr_t total_size; 543 529 phys_addr_t max_addr; 544 530 phys_addr_t min_addr; 545 531 532 + PREFIX_PUSH(); 546 533 setup_memblock(); 547 534 548 535 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; ··· 555 548 memblock_reserve(r1.base, r1.size); 556 549 memblock_reserve(r2.base, r2.size); 557 550 558 - allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 559 - min_addr, max_addr, NUMA_NO_NODE); 560 - b = (char *)allocated_ptr; 551 + allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 552 + min_addr, max_addr, 553 + NUMA_NO_NODE); 561 554 562 555 ASSERT_NE(allocated_ptr, NULL); 563 - ASSERT_EQ(*b, 0); 556 + assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags); 564 557 565 558 ASSERT_EQ(rgn1->size, r1.size); 566 559 ASSERT_EQ(rgn1->base, r1.base); ··· 600 593 { 601 594 void *allocated_ptr = NULL; 602 595 struct region r1, r2; 603 - 604 - PREFIX_PUSH(); 605 - 606 596 phys_addr_t r3_size = SZ_256; 607 597 phys_addr_t gap_size = SMP_CACHE_BYTES; 608 598 phys_addr_t max_addr; 609 599 phys_addr_t min_addr; 610 600 601 + PREFIX_PUSH(); 611 602 setup_memblock(); 612 603 613 604 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES; ··· 620 615 memblock_reserve(r1.base, r1.size); 621 616 memblock_reserve(r2.base, r2.size); 622 617 623 - allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 624 - min_addr, max_addr, NUMA_NO_NODE); 618 + allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 619 + min_addr, max_addr, 620 + NUMA_NO_NODE); 625 621 626 622 ASSERT_EQ(allocated_ptr, NULL); 627 623 ··· 634 628 /* 635 629 * A test that tries to allocate a memory region, where max_addr is 636 630 * bigger than the end address of the available memory. Expect to allocate 637 - * a cleared region that ends before the end of the memory. 631 + * a region that ends before the end of the memory. 638 632 */ 639 633 static int alloc_try_nid_top_down_cap_max_check(void) 640 634 { 641 635 struct memblock_region *rgn = &memblock.reserved.regions[0]; 642 636 void *allocated_ptr = NULL; 643 - char *b; 644 - 645 - PREFIX_PUSH(); 646 - 647 637 phys_addr_t size = SZ_256; 648 638 phys_addr_t min_addr; 649 639 phys_addr_t max_addr; 650 640 641 + PREFIX_PUSH(); 651 642 setup_memblock(); 652 643 653 644 min_addr = memblock_end_of_DRAM() - SZ_1K; 654 645 max_addr = memblock_end_of_DRAM() + SZ_256; 655 646 656 - allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 657 - min_addr, max_addr, NUMA_NO_NODE); 658 - b = (char *)allocated_ptr; 647 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 648 + min_addr, max_addr, 649 + NUMA_NO_NODE); 659 650 660 651 ASSERT_NE(allocated_ptr, NULL); 661 - ASSERT_EQ(*b, 0); 652 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 662 653 663 654 ASSERT_EQ(rgn->size, size); 664 655 ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size); ··· 671 668 /* 672 669 * A test that tries to allocate a memory region, where min_addr is 673 670 * smaller than the start address of the available memory. Expect to allocate 674 - * a cleared region that ends before the end of the memory. 671 + * a region that ends before the end of the memory. 675 672 */ 676 673 static int alloc_try_nid_top_down_cap_min_check(void) 677 674 { 678 675 struct memblock_region *rgn = &memblock.reserved.regions[0]; 679 676 void *allocated_ptr = NULL; 680 - char *b; 681 - 682 - PREFIX_PUSH(); 683 - 684 677 phys_addr_t size = SZ_1K; 685 678 phys_addr_t min_addr; 686 679 phys_addr_t max_addr; 687 680 681 + PREFIX_PUSH(); 688 682 setup_memblock(); 689 683 690 684 min_addr = memblock_start_of_DRAM() - SZ_256; 691 685 max_addr = memblock_end_of_DRAM(); 692 686 693 - allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 694 - min_addr, max_addr, NUMA_NO_NODE); 695 - b = (char *)allocated_ptr; 687 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 688 + min_addr, max_addr, 689 + NUMA_NO_NODE); 696 690 697 691 ASSERT_NE(allocated_ptr, NULL); 698 - ASSERT_EQ(*b, 0); 692 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 699 693 700 694 ASSERT_EQ(rgn->size, size); 701 695 ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size); ··· 717 717 * | | 718 718 * min_addr max_addr 719 719 * 720 - * Expect to allocate a cleared region that ends before max_addr. 720 + * Expect to allocate a region that ends before max_addr. 721 721 */ 722 722 static int alloc_try_nid_bottom_up_simple_check(void) 723 723 { 724 724 struct memblock_region *rgn = &memblock.reserved.regions[0]; 725 725 void *allocated_ptr = NULL; 726 - char *b; 727 - 728 - PREFIX_PUSH(); 729 - 730 726 phys_addr_t size = SZ_128; 731 727 phys_addr_t min_addr; 732 728 phys_addr_t max_addr; 733 729 phys_addr_t rgn_end; 734 730 731 + PREFIX_PUSH(); 735 732 setup_memblock(); 736 733 737 734 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2; 738 735 max_addr = min_addr + SZ_512; 739 736 740 - allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 741 - min_addr, max_addr, 742 - NUMA_NO_NODE); 743 - b = (char *)allocated_ptr; 737 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 738 + min_addr, max_addr, 739 + NUMA_NO_NODE); 744 740 rgn_end = rgn->base + rgn->size; 745 741 746 742 ASSERT_NE(allocated_ptr, NULL); 747 - ASSERT_EQ(*b, 0); 743 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 748 744 749 745 ASSERT_EQ(rgn->size, size); 750 746 ASSERT_EQ(rgn->base, min_addr); ··· 769 773 * Aligned address 770 774 * boundary 771 775 * 772 - * Expect to allocate a cleared, aligned region that ends before max_addr. 776 + * Expect to allocate an aligned region that ends before max_addr. 773 777 */ 774 778 static int alloc_try_nid_bottom_up_start_misaligned_check(void) 775 779 { 776 780 struct memblock_region *rgn = &memblock.reserved.regions[0]; 777 781 void *allocated_ptr = NULL; 778 - char *b; 779 - 780 - PREFIX_PUSH(); 781 - 782 782 phys_addr_t size = SZ_128; 783 783 phys_addr_t misalign = SZ_2; 784 784 phys_addr_t min_addr; 785 785 phys_addr_t max_addr; 786 786 phys_addr_t rgn_end; 787 787 788 + PREFIX_PUSH(); 788 789 setup_memblock(); 789 790 790 791 min_addr = memblock_start_of_DRAM() + misalign; 791 792 max_addr = min_addr + SZ_512; 792 793 793 - allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 794 - min_addr, max_addr, 795 - NUMA_NO_NODE); 796 - b = (char *)allocated_ptr; 794 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 795 + min_addr, max_addr, 796 + NUMA_NO_NODE); 797 797 rgn_end = rgn->base + rgn->size; 798 798 799 799 ASSERT_NE(allocated_ptr, NULL); 800 - ASSERT_EQ(*b, 0); 800 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 801 801 802 802 ASSERT_EQ(rgn->size, size); 803 803 ASSERT_EQ(rgn->base, min_addr + (SMP_CACHE_BYTES - misalign)); ··· 821 829 * | 822 830 * min_add 823 831 * 824 - * Expect to drop the lower limit and allocate a cleared memory region which 832 + * Expect to drop the lower limit and allocate a memory region which 825 833 * starts at the beginning of the available memory. 826 834 */ 827 835 static int alloc_try_nid_bottom_up_narrow_range_check(void) 828 836 { 829 837 struct memblock_region *rgn = &memblock.reserved.regions[0]; 830 838 void *allocated_ptr = NULL; 831 - char *b; 832 - 833 - PREFIX_PUSH(); 834 - 835 839 phys_addr_t size = SZ_256; 836 840 phys_addr_t min_addr; 837 841 phys_addr_t max_addr; 838 842 843 + PREFIX_PUSH(); 839 844 setup_memblock(); 840 845 841 846 min_addr = memblock_start_of_DRAM() + SZ_512; 842 847 max_addr = min_addr + SMP_CACHE_BYTES; 843 848 844 - allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 845 - min_addr, max_addr, 846 - NUMA_NO_NODE); 847 - b = (char *)allocated_ptr; 849 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 850 + min_addr, max_addr, 851 + NUMA_NO_NODE); 848 852 849 853 ASSERT_NE(allocated_ptr, NULL); 850 - ASSERT_EQ(*b, 0); 854 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 851 855 852 856 ASSERT_EQ(rgn->size, size); 853 857 ASSERT_EQ(rgn->base, memblock_start_of_DRAM()); ··· 878 890 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; 879 891 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; 880 892 void *allocated_ptr = NULL; 881 - char *b; 882 893 struct region r1, r2; 883 - 884 - PREFIX_PUSH(); 885 - 886 894 phys_addr_t r3_size = SZ_64; 887 895 phys_addr_t gap_size = SMP_CACHE_BYTES; 888 896 phys_addr_t total_size; 889 897 phys_addr_t max_addr; 890 898 phys_addr_t min_addr; 891 899 900 + PREFIX_PUSH(); 892 901 setup_memblock(); 893 902 894 903 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; ··· 901 916 memblock_reserve(r1.base, r1.size); 902 917 memblock_reserve(r2.base, r2.size); 903 918 904 - allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 905 - min_addr, max_addr, 906 - NUMA_NO_NODE); 907 - b = (char *)allocated_ptr; 919 + allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 920 + min_addr, max_addr, 921 + NUMA_NO_NODE); 908 922 909 923 ASSERT_NE(allocated_ptr, NULL); 910 - ASSERT_EQ(*b, 0); 924 + assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags); 911 925 912 926 ASSERT_EQ(rgn1->size, r1.size); 913 927 ASSERT_EQ(rgn1->base, max_addr); ··· 948 964 struct memblock_region *rgn2 = &memblock.reserved.regions[1]; 949 965 struct memblock_region *rgn3 = &memblock.reserved.regions[0]; 950 966 void *allocated_ptr = NULL; 951 - char *b; 952 967 struct region r1, r2; 953 - 954 - PREFIX_PUSH(); 955 - 956 968 phys_addr_t r3_size = SZ_256; 957 969 phys_addr_t gap_size = SMP_CACHE_BYTES; 958 970 phys_addr_t total_size; 959 971 phys_addr_t max_addr; 960 972 phys_addr_t min_addr; 961 973 974 + PREFIX_PUSH(); 962 975 setup_memblock(); 963 976 964 977 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; ··· 971 990 memblock_reserve(r1.base, r1.size); 972 991 memblock_reserve(r2.base, r2.size); 973 992 974 - allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 975 - min_addr, max_addr, 976 - NUMA_NO_NODE); 977 - b = (char *)allocated_ptr; 993 + allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 994 + min_addr, max_addr, 995 + NUMA_NO_NODE); 978 996 979 997 ASSERT_NE(allocated_ptr, NULL); 980 - ASSERT_EQ(*b, 0); 998 + assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags); 981 999 982 1000 ASSERT_EQ(rgn3->size, r3_size); 983 1001 ASSERT_EQ(rgn3->base, memblock_start_of_DRAM()); ··· 998 1018 /* 999 1019 * A test that tries to allocate a memory region, where max_addr is 1000 1020 * bigger than the end address of the available memory. Expect to allocate 1001 - * a cleared region that starts at the min_addr 1021 + * a region that starts at the min_addr. 1002 1022 */ 1003 1023 static int alloc_try_nid_bottom_up_cap_max_check(void) 1004 1024 { 1005 1025 struct memblock_region *rgn = &memblock.reserved.regions[0]; 1006 1026 void *allocated_ptr = NULL; 1007 - char *b; 1008 - 1009 - PREFIX_PUSH(); 1010 - 1011 1027 phys_addr_t size = SZ_256; 1012 1028 phys_addr_t min_addr; 1013 1029 phys_addr_t max_addr; 1014 1030 1031 + PREFIX_PUSH(); 1015 1032 setup_memblock(); 1016 1033 1017 1034 min_addr = memblock_start_of_DRAM() + SZ_1K; 1018 1035 max_addr = memblock_end_of_DRAM() + SZ_256; 1019 1036 1020 - allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1021 - min_addr, max_addr, 1022 - NUMA_NO_NODE); 1023 - b = (char *)allocated_ptr; 1037 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1038 + min_addr, max_addr, 1039 + NUMA_NO_NODE); 1024 1040 1025 1041 ASSERT_NE(allocated_ptr, NULL); 1026 - ASSERT_EQ(*b, 0); 1042 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 1027 1043 1028 1044 ASSERT_EQ(rgn->size, size); 1029 1045 ASSERT_EQ(rgn->base, min_addr); ··· 1035 1059 /* 1036 1060 * A test that tries to allocate a memory region, where min_addr is 1037 1061 * smaller than the start address of the available memory. Expect to allocate 1038 - * a cleared region at the beginning of the available memory. 1062 + * a region at the beginning of the available memory. 1039 1063 */ 1040 1064 static int alloc_try_nid_bottom_up_cap_min_check(void) 1041 1065 { 1042 1066 struct memblock_region *rgn = &memblock.reserved.regions[0]; 1043 1067 void *allocated_ptr = NULL; 1044 - char *b; 1045 - 1046 - PREFIX_PUSH(); 1047 - 1048 1068 phys_addr_t size = SZ_1K; 1049 1069 phys_addr_t min_addr; 1050 1070 phys_addr_t max_addr; 1051 1071 1072 + PREFIX_PUSH(); 1052 1073 setup_memblock(); 1053 1074 1054 1075 min_addr = memblock_start_of_DRAM(); 1055 1076 max_addr = memblock_end_of_DRAM() - SZ_256; 1056 1077 1057 - allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1058 - min_addr, max_addr, 1059 - NUMA_NO_NODE); 1060 - b = (char *)allocated_ptr; 1078 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1079 + min_addr, max_addr, 1080 + NUMA_NO_NODE); 1061 1081 1062 1082 ASSERT_NE(allocated_ptr, NULL); 1063 - ASSERT_EQ(*b, 0); 1083 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 1064 1084 1065 1085 ASSERT_EQ(rgn->size, size); 1066 1086 ASSERT_EQ(rgn->base, memblock_start_of_DRAM()); ··· 1069 1097 return 0; 1070 1098 } 1071 1099 1072 - /* Test case wrappers */ 1100 + /* Test case wrappers for range tests */ 1073 1101 static int alloc_try_nid_simple_check(void) 1074 1102 { 1075 1103 test_print("\tRunning %s...\n", __func__); ··· 1150 1178 static int alloc_try_nid_min_reserved_check(void) 1151 1179 { 1152 1180 test_print("\tRunning %s...\n", __func__); 1153 - memblock_set_bottom_up(false); 1154 - alloc_try_nid_min_reserved_generic_check(); 1155 - memblock_set_bottom_up(true); 1156 - alloc_try_nid_min_reserved_generic_check(); 1181 + run_top_down(alloc_try_nid_min_reserved_generic_check); 1182 + run_bottom_up(alloc_try_nid_min_reserved_generic_check); 1157 1183 1158 1184 return 0; 1159 1185 } ··· 1159 1189 static int alloc_try_nid_max_reserved_check(void) 1160 1190 { 1161 1191 test_print("\tRunning %s...\n", __func__); 1162 - memblock_set_bottom_up(false); 1163 - alloc_try_nid_max_reserved_generic_check(); 1164 - memblock_set_bottom_up(true); 1165 - alloc_try_nid_max_reserved_generic_check(); 1192 + run_top_down(alloc_try_nid_max_reserved_generic_check); 1193 + run_bottom_up(alloc_try_nid_max_reserved_generic_check); 1166 1194 1167 1195 return 0; 1168 1196 } ··· 1168 1200 static int alloc_try_nid_exact_address_check(void) 1169 1201 { 1170 1202 test_print("\tRunning %s...\n", __func__); 1171 - memblock_set_bottom_up(false); 1172 - alloc_try_nid_exact_address_generic_check(); 1173 - memblock_set_bottom_up(true); 1174 - alloc_try_nid_exact_address_generic_check(); 1203 + run_top_down(alloc_try_nid_exact_address_generic_check); 1204 + run_bottom_up(alloc_try_nid_exact_address_generic_check); 1175 1205 1176 1206 return 0; 1177 1207 } ··· 1177 1211 static int alloc_try_nid_reserved_full_merge_check(void) 1178 1212 { 1179 1213 test_print("\tRunning %s...\n", __func__); 1180 - memblock_set_bottom_up(false); 1181 - alloc_try_nid_reserved_full_merge_generic_check(); 1182 - memblock_set_bottom_up(true); 1183 - alloc_try_nid_reserved_full_merge_generic_check(); 1214 + run_top_down(alloc_try_nid_reserved_full_merge_generic_check); 1215 + run_bottom_up(alloc_try_nid_reserved_full_merge_generic_check); 1184 1216 1185 1217 return 0; 1186 1218 } ··· 1186 1222 static int alloc_try_nid_reserved_all_check(void) 1187 1223 { 1188 1224 test_print("\tRunning %s...\n", __func__); 1189 - memblock_set_bottom_up(false); 1190 - alloc_try_nid_reserved_all_generic_check(); 1191 - memblock_set_bottom_up(true); 1192 - alloc_try_nid_reserved_all_generic_check(); 1225 + run_top_down(alloc_try_nid_reserved_all_generic_check); 1226 + run_bottom_up(alloc_try_nid_reserved_all_generic_check); 1193 1227 1194 1228 return 0; 1195 1229 } ··· 1195 1233 static int alloc_try_nid_low_max_check(void) 1196 1234 { 1197 1235 test_print("\tRunning %s...\n", __func__); 1198 - memblock_set_bottom_up(false); 1199 - alloc_try_nid_low_max_generic_check(); 1200 - memblock_set_bottom_up(true); 1201 - alloc_try_nid_low_max_generic_check(); 1236 + run_top_down(alloc_try_nid_low_max_generic_check); 1237 + run_bottom_up(alloc_try_nid_low_max_generic_check); 1202 1238 1203 1239 return 0; 1204 1240 } 1205 1241 1206 - int memblock_alloc_nid_checks(void) 1242 + static int memblock_alloc_nid_range_checks(void) 1207 1243 { 1208 - const char *func_testing = "memblock_alloc_try_nid"; 1209 - 1210 - prefix_reset(); 1211 - prefix_push(func_testing); 1212 - test_print("Running %s tests...\n", func_testing); 1213 - 1214 - reset_memblock_attributes(); 1215 - dummy_physical_memory_init(); 1244 + test_print("Running %s range tests...\n", 1245 + get_memblock_alloc_try_nid_name(alloc_nid_test_flags)); 1216 1246 1217 1247 alloc_try_nid_simple_check(); 1218 1248 alloc_try_nid_misaligned_check(); ··· 1221 1267 alloc_try_nid_reserved_all_check(); 1222 1268 alloc_try_nid_low_max_check(); 1223 1269 1270 + return 0; 1271 + } 1272 + 1273 + /* 1274 + * A test that tries to allocate a memory region in a specific NUMA node that 1275 + * has enough memory to allocate a region of the requested size. 1276 + * Expect to allocate an aligned region at the end of the requested node. 1277 + */ 1278 + static int alloc_try_nid_top_down_numa_simple_check(void) 1279 + { 1280 + int nid_req = 3; 1281 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 1282 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 1283 + void *allocated_ptr = NULL; 1284 + phys_addr_t size; 1285 + phys_addr_t min_addr; 1286 + phys_addr_t max_addr; 1287 + 1288 + PREFIX_PUSH(); 1289 + setup_numa_memblock(node_fractions); 1290 + 1291 + ASSERT_LE(SZ_4, req_node->size); 1292 + size = req_node->size / SZ_4; 1293 + min_addr = memblock_start_of_DRAM(); 1294 + max_addr = memblock_end_of_DRAM(); 1295 + 1296 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1297 + min_addr, max_addr, nid_req); 1298 + 1299 + ASSERT_NE(allocated_ptr, NULL); 1300 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 1301 + 1302 + ASSERT_EQ(new_rgn->size, size); 1303 + ASSERT_EQ(new_rgn->base, region_end(req_node) - size); 1304 + ASSERT_LE(req_node->base, new_rgn->base); 1305 + 1306 + ASSERT_EQ(memblock.reserved.cnt, 1); 1307 + ASSERT_EQ(memblock.reserved.total_size, size); 1308 + 1309 + test_pass_pop(); 1310 + 1311 + return 0; 1312 + } 1313 + 1314 + /* 1315 + * A test that tries to allocate a memory region in a specific NUMA node that 1316 + * does not have enough memory to allocate a region of the requested size: 1317 + * 1318 + * | +-----+ +------------------+ | 1319 + * | | req | | expected | | 1320 + * +---+-----+----------+------------------+-----+ 1321 + * 1322 + * | +---------+ | 1323 + * | | rgn | | 1324 + * +-----------------------------+---------+-----+ 1325 + * 1326 + * Expect to allocate an aligned region at the end of the last node that has 1327 + * enough memory (in this case, nid = 6) after falling back to NUMA_NO_NODE. 1328 + */ 1329 + static int alloc_try_nid_top_down_numa_small_node_check(void) 1330 + { 1331 + int nid_req = 1; 1332 + int nid_exp = 6; 1333 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 1334 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 1335 + struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; 1336 + void *allocated_ptr = NULL; 1337 + phys_addr_t size; 1338 + phys_addr_t min_addr; 1339 + phys_addr_t max_addr; 1340 + 1341 + PREFIX_PUSH(); 1342 + setup_numa_memblock(node_fractions); 1343 + 1344 + size = SZ_2 * req_node->size; 1345 + min_addr = memblock_start_of_DRAM(); 1346 + max_addr = memblock_end_of_DRAM(); 1347 + 1348 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1349 + min_addr, max_addr, nid_req); 1350 + 1351 + ASSERT_NE(allocated_ptr, NULL); 1352 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 1353 + 1354 + ASSERT_EQ(new_rgn->size, size); 1355 + ASSERT_EQ(new_rgn->base, region_end(exp_node) - size); 1356 + ASSERT_LE(exp_node->base, new_rgn->base); 1357 + 1358 + ASSERT_EQ(memblock.reserved.cnt, 1); 1359 + ASSERT_EQ(memblock.reserved.total_size, size); 1360 + 1361 + test_pass_pop(); 1362 + 1363 + return 0; 1364 + } 1365 + 1366 + /* 1367 + * A test that tries to allocate a memory region in a specific NUMA node that 1368 + * is fully reserved: 1369 + * 1370 + * | +---------+ +------------------+ | 1371 + * | |requested| | expected | | 1372 + * +--------------+---------+------------+------------------+-----+ 1373 + * 1374 + * | +---------+ +---------+ | 1375 + * | | reserved| | new | | 1376 + * +--------------+---------+---------------------+---------+-----+ 1377 + * 1378 + * Expect to allocate an aligned region at the end of the last node that is 1379 + * large enough and has enough unreserved memory (in this case, nid = 6) after 1380 + * falling back to NUMA_NO_NODE. The region count and total size get updated. 1381 + */ 1382 + static int alloc_try_nid_top_down_numa_node_reserved_check(void) 1383 + { 1384 + int nid_req = 2; 1385 + int nid_exp = 6; 1386 + struct memblock_region *new_rgn = &memblock.reserved.regions[1]; 1387 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 1388 + struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; 1389 + void *allocated_ptr = NULL; 1390 + phys_addr_t size; 1391 + phys_addr_t min_addr; 1392 + phys_addr_t max_addr; 1393 + 1394 + PREFIX_PUSH(); 1395 + setup_numa_memblock(node_fractions); 1396 + 1397 + size = req_node->size; 1398 + min_addr = memblock_start_of_DRAM(); 1399 + max_addr = memblock_end_of_DRAM(); 1400 + 1401 + memblock_reserve(req_node->base, req_node->size); 1402 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1403 + min_addr, max_addr, nid_req); 1404 + 1405 + ASSERT_NE(allocated_ptr, NULL); 1406 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 1407 + 1408 + ASSERT_EQ(new_rgn->size, size); 1409 + ASSERT_EQ(new_rgn->base, region_end(exp_node) - size); 1410 + ASSERT_LE(exp_node->base, new_rgn->base); 1411 + 1412 + ASSERT_EQ(memblock.reserved.cnt, 2); 1413 + ASSERT_EQ(memblock.reserved.total_size, size + req_node->size); 1414 + 1415 + test_pass_pop(); 1416 + 1417 + return 0; 1418 + } 1419 + 1420 + /* 1421 + * A test that tries to allocate a memory region in a specific NUMA node that 1422 + * is partially reserved but has enough memory for the allocated region: 1423 + * 1424 + * | +---------------------------------------+ | 1425 + * | | requested | | 1426 + * +-----------+---------------------------------------+----------+ 1427 + * 1428 + * | +------------------+ +-----+ | 1429 + * | | reserved | | new | | 1430 + * +-----------+------------------+--------------+-----+----------+ 1431 + * 1432 + * Expect to allocate an aligned region at the end of the requested node. The 1433 + * region count and total size get updated. 1434 + */ 1435 + static int alloc_try_nid_top_down_numa_part_reserved_check(void) 1436 + { 1437 + int nid_req = 4; 1438 + struct memblock_region *new_rgn = &memblock.reserved.regions[1]; 1439 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 1440 + void *allocated_ptr = NULL; 1441 + struct region r1; 1442 + phys_addr_t size; 1443 + phys_addr_t min_addr; 1444 + phys_addr_t max_addr; 1445 + 1446 + PREFIX_PUSH(); 1447 + setup_numa_memblock(node_fractions); 1448 + 1449 + ASSERT_LE(SZ_8, req_node->size); 1450 + r1.base = req_node->base; 1451 + r1.size = req_node->size / SZ_2; 1452 + size = r1.size / SZ_4; 1453 + min_addr = memblock_start_of_DRAM(); 1454 + max_addr = memblock_end_of_DRAM(); 1455 + 1456 + memblock_reserve(r1.base, r1.size); 1457 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1458 + min_addr, max_addr, nid_req); 1459 + 1460 + ASSERT_NE(allocated_ptr, NULL); 1461 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 1462 + 1463 + ASSERT_EQ(new_rgn->size, size); 1464 + ASSERT_EQ(new_rgn->base, region_end(req_node) - size); 1465 + ASSERT_LE(req_node->base, new_rgn->base); 1466 + 1467 + ASSERT_EQ(memblock.reserved.cnt, 2); 1468 + ASSERT_EQ(memblock.reserved.total_size, size + r1.size); 1469 + 1470 + test_pass_pop(); 1471 + 1472 + return 0; 1473 + } 1474 + 1475 + /* 1476 + * A test that tries to allocate a memory region in a specific NUMA node that 1477 + * is partially reserved and does not have enough contiguous memory for the 1478 + * allocated region: 1479 + * 1480 + * | +-----------------------+ +----------------------| 1481 + * | | requested | | expected | 1482 + * +-----------+-----------------------+---------+----------------------+ 1483 + * 1484 + * | +----------+ +-----------| 1485 + * | | reserved | | new | 1486 + * +-----------------+----------+---------------------------+-----------+ 1487 + * 1488 + * Expect to allocate an aligned region at the end of the last node that is 1489 + * large enough and has enough unreserved memory (in this case, 1490 + * nid = NUMA_NODES - 1) after falling back to NUMA_NO_NODE. The region count 1491 + * and total size get updated. 1492 + */ 1493 + static int alloc_try_nid_top_down_numa_part_reserved_fallback_check(void) 1494 + { 1495 + int nid_req = 4; 1496 + int nid_exp = NUMA_NODES - 1; 1497 + struct memblock_region *new_rgn = &memblock.reserved.regions[1]; 1498 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 1499 + struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; 1500 + void *allocated_ptr = NULL; 1501 + struct region r1; 1502 + phys_addr_t size; 1503 + phys_addr_t min_addr; 1504 + phys_addr_t max_addr; 1505 + 1506 + PREFIX_PUSH(); 1507 + setup_numa_memblock(node_fractions); 1508 + 1509 + ASSERT_LE(SZ_4, req_node->size); 1510 + size = req_node->size / SZ_2; 1511 + r1.base = req_node->base + (size / SZ_2); 1512 + r1.size = size; 1513 + 1514 + min_addr = memblock_start_of_DRAM(); 1515 + max_addr = memblock_end_of_DRAM(); 1516 + 1517 + memblock_reserve(r1.base, r1.size); 1518 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1519 + min_addr, max_addr, nid_req); 1520 + 1521 + ASSERT_NE(allocated_ptr, NULL); 1522 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 1523 + 1524 + ASSERT_EQ(new_rgn->size, size); 1525 + ASSERT_EQ(new_rgn->base, region_end(exp_node) - size); 1526 + ASSERT_LE(exp_node->base, new_rgn->base); 1527 + 1528 + ASSERT_EQ(memblock.reserved.cnt, 2); 1529 + ASSERT_EQ(memblock.reserved.total_size, size + r1.size); 1530 + 1531 + test_pass_pop(); 1532 + 1533 + return 0; 1534 + } 1535 + 1536 + /* 1537 + * A test that tries to allocate a memory region that spans over the min_addr 1538 + * and max_addr range and overlaps with two different nodes, where the first 1539 + * node is the requested node: 1540 + * 1541 + * min_addr 1542 + * | max_addr 1543 + * | | 1544 + * v v 1545 + * | +-----------------------+-----------+ | 1546 + * | | requested | node3 | | 1547 + * +-----------+-----------------------+-----------+--------------+ 1548 + * + + 1549 + * | +-----------+ | 1550 + * | | rgn | | 1551 + * +-----------------------+-----------+--------------------------+ 1552 + * 1553 + * Expect to drop the lower limit and allocate a memory region that ends at 1554 + * the end of the requested node. 1555 + */ 1556 + static int alloc_try_nid_top_down_numa_split_range_low_check(void) 1557 + { 1558 + int nid_req = 2; 1559 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 1560 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 1561 + void *allocated_ptr = NULL; 1562 + phys_addr_t size = SZ_512; 1563 + phys_addr_t min_addr; 1564 + phys_addr_t max_addr; 1565 + phys_addr_t req_node_end; 1566 + 1567 + PREFIX_PUSH(); 1568 + setup_numa_memblock(node_fractions); 1569 + 1570 + req_node_end = region_end(req_node); 1571 + min_addr = req_node_end - SZ_256; 1572 + max_addr = min_addr + size; 1573 + 1574 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1575 + min_addr, max_addr, nid_req); 1576 + 1577 + ASSERT_NE(allocated_ptr, NULL); 1578 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 1579 + 1580 + ASSERT_EQ(new_rgn->size, size); 1581 + ASSERT_EQ(new_rgn->base, req_node_end - size); 1582 + ASSERT_LE(req_node->base, new_rgn->base); 1583 + 1584 + ASSERT_EQ(memblock.reserved.cnt, 1); 1585 + ASSERT_EQ(memblock.reserved.total_size, size); 1586 + 1587 + test_pass_pop(); 1588 + 1589 + return 0; 1590 + } 1591 + 1592 + /* 1593 + * A test that tries to allocate a memory region that spans over the min_addr 1594 + * and max_addr range and overlaps with two different nodes, where the second 1595 + * node is the requested node: 1596 + * 1597 + * min_addr 1598 + * | max_addr 1599 + * | | 1600 + * v v 1601 + * | +--------------------------+---------+ | 1602 + * | | expected |requested| | 1603 + * +------+--------------------------+---------+----------------+ 1604 + * + + 1605 + * | +---------+ | 1606 + * | | rgn | | 1607 + * +-----------------------+---------+--------------------------+ 1608 + * 1609 + * Expect to drop the lower limit and allocate a memory region that 1610 + * ends at the end of the first node that overlaps with the range. 1611 + */ 1612 + static int alloc_try_nid_top_down_numa_split_range_high_check(void) 1613 + { 1614 + int nid_req = 3; 1615 + int nid_exp = nid_req - 1; 1616 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 1617 + struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; 1618 + void *allocated_ptr = NULL; 1619 + phys_addr_t size = SZ_512; 1620 + phys_addr_t min_addr; 1621 + phys_addr_t max_addr; 1622 + phys_addr_t exp_node_end; 1623 + 1624 + PREFIX_PUSH(); 1625 + setup_numa_memblock(node_fractions); 1626 + 1627 + exp_node_end = region_end(exp_node); 1628 + min_addr = exp_node_end - SZ_256; 1629 + max_addr = min_addr + size; 1630 + 1631 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1632 + min_addr, max_addr, nid_req); 1633 + 1634 + ASSERT_NE(allocated_ptr, NULL); 1635 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 1636 + 1637 + ASSERT_EQ(new_rgn->size, size); 1638 + ASSERT_EQ(new_rgn->base, exp_node_end - size); 1639 + ASSERT_LE(exp_node->base, new_rgn->base); 1640 + 1641 + ASSERT_EQ(memblock.reserved.cnt, 1); 1642 + ASSERT_EQ(memblock.reserved.total_size, size); 1643 + 1644 + test_pass_pop(); 1645 + 1646 + return 0; 1647 + } 1648 + 1649 + /* 1650 + * A test that tries to allocate a memory region that spans over the min_addr 1651 + * and max_addr range and overlaps with two different nodes, where the requested 1652 + * node ends before min_addr: 1653 + * 1654 + * min_addr 1655 + * | max_addr 1656 + * | | 1657 + * v v 1658 + * | +---------------+ +-------------+---------+ | 1659 + * | | requested | | node1 | node2 | | 1660 + * +----+---------------+--------+-------------+---------+----------+ 1661 + * + + 1662 + * | +---------+ | 1663 + * | | rgn | | 1664 + * +----------+---------+-------------------------------------------+ 1665 + * 1666 + * Expect to drop the lower limit and allocate a memory region that ends at 1667 + * the end of the requested node. 1668 + */ 1669 + static int alloc_try_nid_top_down_numa_no_overlap_split_check(void) 1670 + { 1671 + int nid_req = 2; 1672 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 1673 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 1674 + struct memblock_region *node2 = &memblock.memory.regions[6]; 1675 + void *allocated_ptr = NULL; 1676 + phys_addr_t size; 1677 + phys_addr_t min_addr; 1678 + phys_addr_t max_addr; 1679 + 1680 + PREFIX_PUSH(); 1681 + setup_numa_memblock(node_fractions); 1682 + 1683 + size = SZ_512; 1684 + min_addr = node2->base - SZ_256; 1685 + max_addr = min_addr + size; 1686 + 1687 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1688 + min_addr, max_addr, nid_req); 1689 + 1690 + ASSERT_NE(allocated_ptr, NULL); 1691 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 1692 + 1693 + ASSERT_EQ(new_rgn->size, size); 1694 + ASSERT_EQ(new_rgn->base, region_end(req_node) - size); 1695 + ASSERT_LE(req_node->base, new_rgn->base); 1696 + 1697 + ASSERT_EQ(memblock.reserved.cnt, 1); 1698 + ASSERT_EQ(memblock.reserved.total_size, size); 1699 + 1700 + test_pass_pop(); 1701 + 1702 + return 0; 1703 + } 1704 + 1705 + /* 1706 + * A test that tries to allocate memory within min_addr and max_add range when 1707 + * the requested node and the range do not overlap, and requested node ends 1708 + * before min_addr. The range overlaps with multiple nodes along node 1709 + * boundaries: 1710 + * 1711 + * min_addr 1712 + * | max_addr 1713 + * | | 1714 + * v v 1715 + * |-----------+ +----------+----...----+----------+ | 1716 + * | requested | | min node | ... | max node | | 1717 + * +-----------+-----------+----------+----...----+----------+------+ 1718 + * + + 1719 + * | +-----+ | 1720 + * | | rgn | | 1721 + * +---------------------------------------------------+-----+------+ 1722 + * 1723 + * Expect to allocate a memory region at the end of the final node in 1724 + * the range after falling back to NUMA_NO_NODE. 1725 + */ 1726 + static int alloc_try_nid_top_down_numa_no_overlap_low_check(void) 1727 + { 1728 + int nid_req = 0; 1729 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 1730 + struct memblock_region *min_node = &memblock.memory.regions[2]; 1731 + struct memblock_region *max_node = &memblock.memory.regions[5]; 1732 + void *allocated_ptr = NULL; 1733 + phys_addr_t size = SZ_64; 1734 + phys_addr_t max_addr; 1735 + phys_addr_t min_addr; 1736 + 1737 + PREFIX_PUSH(); 1738 + setup_numa_memblock(node_fractions); 1739 + 1740 + min_addr = min_node->base; 1741 + max_addr = region_end(max_node); 1742 + 1743 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1744 + min_addr, max_addr, nid_req); 1745 + 1746 + ASSERT_NE(allocated_ptr, NULL); 1747 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 1748 + 1749 + ASSERT_EQ(new_rgn->size, size); 1750 + ASSERT_EQ(new_rgn->base, max_addr - size); 1751 + ASSERT_LE(max_node->base, new_rgn->base); 1752 + 1753 + ASSERT_EQ(memblock.reserved.cnt, 1); 1754 + ASSERT_EQ(memblock.reserved.total_size, size); 1755 + 1756 + test_pass_pop(); 1757 + 1758 + return 0; 1759 + } 1760 + 1761 + /* 1762 + * A test that tries to allocate memory within min_addr and max_add range when 1763 + * the requested node and the range do not overlap, and requested node starts 1764 + * after max_addr. The range overlaps with multiple nodes along node 1765 + * boundaries: 1766 + * 1767 + * min_addr 1768 + * | max_addr 1769 + * | | 1770 + * v v 1771 + * | +----------+----...----+----------+ +-----------+ | 1772 + * | | min node | ... | max node | | requested | | 1773 + * +-----+----------+----...----+----------+--------+-----------+---+ 1774 + * + + 1775 + * | +-----+ | 1776 + * | | rgn | | 1777 + * +---------------------------------+-----+------------------------+ 1778 + * 1779 + * Expect to allocate a memory region at the end of the final node in 1780 + * the range after falling back to NUMA_NO_NODE. 1781 + */ 1782 + static int alloc_try_nid_top_down_numa_no_overlap_high_check(void) 1783 + { 1784 + int nid_req = 7; 1785 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 1786 + struct memblock_region *min_node = &memblock.memory.regions[2]; 1787 + struct memblock_region *max_node = &memblock.memory.regions[5]; 1788 + void *allocated_ptr = NULL; 1789 + phys_addr_t size = SZ_64; 1790 + phys_addr_t max_addr; 1791 + phys_addr_t min_addr; 1792 + 1793 + PREFIX_PUSH(); 1794 + setup_numa_memblock(node_fractions); 1795 + 1796 + min_addr = min_node->base; 1797 + max_addr = region_end(max_node); 1798 + 1799 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1800 + min_addr, max_addr, nid_req); 1801 + 1802 + ASSERT_NE(allocated_ptr, NULL); 1803 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 1804 + 1805 + ASSERT_EQ(new_rgn->size, size); 1806 + ASSERT_EQ(new_rgn->base, max_addr - size); 1807 + ASSERT_LE(max_node->base, new_rgn->base); 1808 + 1809 + ASSERT_EQ(memblock.reserved.cnt, 1); 1810 + ASSERT_EQ(memblock.reserved.total_size, size); 1811 + 1812 + test_pass_pop(); 1813 + 1814 + return 0; 1815 + } 1816 + 1817 + /* 1818 + * A test that tries to allocate a memory region in a specific NUMA node that 1819 + * has enough memory to allocate a region of the requested size. 1820 + * Expect to allocate an aligned region at the beginning of the requested node. 1821 + */ 1822 + static int alloc_try_nid_bottom_up_numa_simple_check(void) 1823 + { 1824 + int nid_req = 3; 1825 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 1826 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 1827 + void *allocated_ptr = NULL; 1828 + phys_addr_t size; 1829 + phys_addr_t min_addr; 1830 + phys_addr_t max_addr; 1831 + 1832 + PREFIX_PUSH(); 1833 + setup_numa_memblock(node_fractions); 1834 + 1835 + ASSERT_LE(SZ_4, req_node->size); 1836 + size = req_node->size / SZ_4; 1837 + min_addr = memblock_start_of_DRAM(); 1838 + max_addr = memblock_end_of_DRAM(); 1839 + 1840 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1841 + min_addr, max_addr, nid_req); 1842 + 1843 + ASSERT_NE(allocated_ptr, NULL); 1844 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 1845 + 1846 + ASSERT_EQ(new_rgn->size, size); 1847 + ASSERT_EQ(new_rgn->base, req_node->base); 1848 + ASSERT_LE(region_end(new_rgn), region_end(req_node)); 1849 + 1850 + ASSERT_EQ(memblock.reserved.cnt, 1); 1851 + ASSERT_EQ(memblock.reserved.total_size, size); 1852 + 1853 + test_pass_pop(); 1854 + 1855 + return 0; 1856 + } 1857 + 1858 + /* 1859 + * A test that tries to allocate a memory region in a specific NUMA node that 1860 + * does not have enough memory to allocate a region of the requested size: 1861 + * 1862 + * |----------------------+-----+ | 1863 + * | expected | req | | 1864 + * +----------------------+-----+----------------+ 1865 + * 1866 + * |---------+ | 1867 + * | rgn | | 1868 + * +---------+-----------------------------------+ 1869 + * 1870 + * Expect to allocate an aligned region at the beginning of the first node that 1871 + * has enough memory (in this case, nid = 0) after falling back to NUMA_NO_NODE. 1872 + */ 1873 + static int alloc_try_nid_bottom_up_numa_small_node_check(void) 1874 + { 1875 + int nid_req = 1; 1876 + int nid_exp = 0; 1877 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 1878 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 1879 + struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; 1880 + void *allocated_ptr = NULL; 1881 + phys_addr_t size; 1882 + phys_addr_t min_addr; 1883 + phys_addr_t max_addr; 1884 + 1885 + PREFIX_PUSH(); 1886 + setup_numa_memblock(node_fractions); 1887 + 1888 + size = SZ_2 * req_node->size; 1889 + min_addr = memblock_start_of_DRAM(); 1890 + max_addr = memblock_end_of_DRAM(); 1891 + 1892 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1893 + min_addr, max_addr, nid_req); 1894 + 1895 + ASSERT_NE(allocated_ptr, NULL); 1896 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 1897 + 1898 + ASSERT_EQ(new_rgn->size, size); 1899 + ASSERT_EQ(new_rgn->base, exp_node->base); 1900 + ASSERT_LE(region_end(new_rgn), region_end(exp_node)); 1901 + 1902 + ASSERT_EQ(memblock.reserved.cnt, 1); 1903 + ASSERT_EQ(memblock.reserved.total_size, size); 1904 + 1905 + test_pass_pop(); 1906 + 1907 + return 0; 1908 + } 1909 + 1910 + /* 1911 + * A test that tries to allocate a memory region in a specific NUMA node that 1912 + * is fully reserved: 1913 + * 1914 + * |----------------------+ +-----------+ | 1915 + * | expected | | requested | | 1916 + * +----------------------+-----+-----------+--------------------+ 1917 + * 1918 + * |-----------+ +-----------+ | 1919 + * | new | | reserved | | 1920 + * +-----------+----------------+-----------+--------------------+ 1921 + * 1922 + * Expect to allocate an aligned region at the beginning of the first node that 1923 + * is large enough and has enough unreserved memory (in this case, nid = 0) 1924 + * after falling back to NUMA_NO_NODE. The region count and total size get 1925 + * updated. 1926 + */ 1927 + static int alloc_try_nid_bottom_up_numa_node_reserved_check(void) 1928 + { 1929 + int nid_req = 2; 1930 + int nid_exp = 0; 1931 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 1932 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 1933 + struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; 1934 + void *allocated_ptr = NULL; 1935 + phys_addr_t size; 1936 + phys_addr_t min_addr; 1937 + phys_addr_t max_addr; 1938 + 1939 + PREFIX_PUSH(); 1940 + setup_numa_memblock(node_fractions); 1941 + 1942 + size = req_node->size; 1943 + min_addr = memblock_start_of_DRAM(); 1944 + max_addr = memblock_end_of_DRAM(); 1945 + 1946 + memblock_reserve(req_node->base, req_node->size); 1947 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1948 + min_addr, max_addr, nid_req); 1949 + 1950 + ASSERT_NE(allocated_ptr, NULL); 1951 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 1952 + 1953 + ASSERT_EQ(new_rgn->size, size); 1954 + ASSERT_EQ(new_rgn->base, exp_node->base); 1955 + ASSERT_LE(region_end(new_rgn), region_end(exp_node)); 1956 + 1957 + ASSERT_EQ(memblock.reserved.cnt, 2); 1958 + ASSERT_EQ(memblock.reserved.total_size, size + req_node->size); 1959 + 1960 + test_pass_pop(); 1961 + 1962 + return 0; 1963 + } 1964 + 1965 + /* 1966 + * A test that tries to allocate a memory region in a specific NUMA node that 1967 + * is partially reserved but has enough memory for the allocated region: 1968 + * 1969 + * | +---------------------------------------+ | 1970 + * | | requested | | 1971 + * +-----------+---------------------------------------+---------+ 1972 + * 1973 + * | +------------------+-----+ | 1974 + * | | reserved | new | | 1975 + * +-----------+------------------+-----+------------------------+ 1976 + * 1977 + * Expect to allocate an aligned region in the requested node that merges with 1978 + * the existing reserved region. The total size gets updated. 1979 + */ 1980 + static int alloc_try_nid_bottom_up_numa_part_reserved_check(void) 1981 + { 1982 + int nid_req = 4; 1983 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 1984 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 1985 + void *allocated_ptr = NULL; 1986 + struct region r1; 1987 + phys_addr_t size; 1988 + phys_addr_t min_addr; 1989 + phys_addr_t max_addr; 1990 + phys_addr_t total_size; 1991 + 1992 + PREFIX_PUSH(); 1993 + setup_numa_memblock(node_fractions); 1994 + 1995 + ASSERT_LE(SZ_8, req_node->size); 1996 + r1.base = req_node->base; 1997 + r1.size = req_node->size / SZ_2; 1998 + size = r1.size / SZ_4; 1999 + min_addr = memblock_start_of_DRAM(); 2000 + max_addr = memblock_end_of_DRAM(); 2001 + total_size = size + r1.size; 2002 + 2003 + memblock_reserve(r1.base, r1.size); 2004 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 2005 + min_addr, max_addr, nid_req); 2006 + 2007 + ASSERT_NE(allocated_ptr, NULL); 2008 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 2009 + 2010 + ASSERT_EQ(new_rgn->size, total_size); 2011 + ASSERT_EQ(new_rgn->base, req_node->base); 2012 + ASSERT_LE(region_end(new_rgn), region_end(req_node)); 2013 + 2014 + ASSERT_EQ(memblock.reserved.cnt, 1); 2015 + ASSERT_EQ(memblock.reserved.total_size, total_size); 2016 + 2017 + test_pass_pop(); 2018 + 2019 + return 0; 2020 + } 2021 + 2022 + /* 2023 + * A test that tries to allocate a memory region in a specific NUMA node that 2024 + * is partially reserved and does not have enough contiguous memory for the 2025 + * allocated region: 2026 + * 2027 + * |----------------------+ +-----------------------+ | 2028 + * | expected | | requested | | 2029 + * +----------------------+-------+-----------------------+---------+ 2030 + * 2031 + * |-----------+ +----------+ | 2032 + * | new | | reserved | | 2033 + * +-----------+------------------------+----------+----------------+ 2034 + * 2035 + * Expect to allocate an aligned region at the beginning of the first 2036 + * node that is large enough and has enough unreserved memory (in this case, 2037 + * nid = 0) after falling back to NUMA_NO_NODE. The region count and total size 2038 + * get updated. 2039 + */ 2040 + static int alloc_try_nid_bottom_up_numa_part_reserved_fallback_check(void) 2041 + { 2042 + int nid_req = 4; 2043 + int nid_exp = 0; 2044 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 2045 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 2046 + struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; 2047 + void *allocated_ptr = NULL; 2048 + struct region r1; 2049 + phys_addr_t size; 2050 + phys_addr_t min_addr; 2051 + phys_addr_t max_addr; 2052 + 2053 + PREFIX_PUSH(); 2054 + setup_numa_memblock(node_fractions); 2055 + 2056 + ASSERT_LE(SZ_4, req_node->size); 2057 + size = req_node->size / SZ_2; 2058 + r1.base = req_node->base + (size / SZ_2); 2059 + r1.size = size; 2060 + 2061 + min_addr = memblock_start_of_DRAM(); 2062 + max_addr = memblock_end_of_DRAM(); 2063 + 2064 + memblock_reserve(r1.base, r1.size); 2065 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 2066 + min_addr, max_addr, nid_req); 2067 + 2068 + ASSERT_NE(allocated_ptr, NULL); 2069 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 2070 + 2071 + ASSERT_EQ(new_rgn->size, size); 2072 + ASSERT_EQ(new_rgn->base, exp_node->base); 2073 + ASSERT_LE(region_end(new_rgn), region_end(exp_node)); 2074 + 2075 + ASSERT_EQ(memblock.reserved.cnt, 2); 2076 + ASSERT_EQ(memblock.reserved.total_size, size + r1.size); 2077 + 2078 + test_pass_pop(); 2079 + 2080 + return 0; 2081 + } 2082 + 2083 + /* 2084 + * A test that tries to allocate a memory region that spans over the min_addr 2085 + * and max_addr range and overlaps with two different nodes, where the first 2086 + * node is the requested node: 2087 + * 2088 + * min_addr 2089 + * | max_addr 2090 + * | | 2091 + * v v 2092 + * | +-----------------------+-----------+ | 2093 + * | | requested | node3 | | 2094 + * +-----------+-----------------------+-----------+--------------+ 2095 + * + + 2096 + * | +-----------+ | 2097 + * | | rgn | | 2098 + * +-----------+-----------+--------------------------------------+ 2099 + * 2100 + * Expect to drop the lower limit and allocate a memory region at the beginning 2101 + * of the requested node. 2102 + */ 2103 + static int alloc_try_nid_bottom_up_numa_split_range_low_check(void) 2104 + { 2105 + int nid_req = 2; 2106 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 2107 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 2108 + void *allocated_ptr = NULL; 2109 + phys_addr_t size = SZ_512; 2110 + phys_addr_t min_addr; 2111 + phys_addr_t max_addr; 2112 + phys_addr_t req_node_end; 2113 + 2114 + PREFIX_PUSH(); 2115 + setup_numa_memblock(node_fractions); 2116 + 2117 + req_node_end = region_end(req_node); 2118 + min_addr = req_node_end - SZ_256; 2119 + max_addr = min_addr + size; 2120 + 2121 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 2122 + min_addr, max_addr, nid_req); 2123 + 2124 + ASSERT_NE(allocated_ptr, NULL); 2125 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 2126 + 2127 + ASSERT_EQ(new_rgn->size, size); 2128 + ASSERT_EQ(new_rgn->base, req_node->base); 2129 + ASSERT_LE(region_end(new_rgn), req_node_end); 2130 + 2131 + ASSERT_EQ(memblock.reserved.cnt, 1); 2132 + ASSERT_EQ(memblock.reserved.total_size, size); 2133 + 2134 + test_pass_pop(); 2135 + 2136 + return 0; 2137 + } 2138 + 2139 + /* 2140 + * A test that tries to allocate a memory region that spans over the min_addr 2141 + * and max_addr range and overlaps with two different nodes, where the second 2142 + * node is the requested node: 2143 + * 2144 + * min_addr 2145 + * | max_addr 2146 + * | | 2147 + * v v 2148 + * |------------------+ +----------------------+---------+ | 2149 + * | expected | | previous |requested| | 2150 + * +------------------+--------+----------------------+---------+------+ 2151 + * + + 2152 + * |---------+ | 2153 + * | rgn | | 2154 + * +---------+---------------------------------------------------------+ 2155 + * 2156 + * Expect to drop the lower limit and allocate a memory region at the beginning 2157 + * of the first node that has enough memory. 2158 + */ 2159 + static int alloc_try_nid_bottom_up_numa_split_range_high_check(void) 2160 + { 2161 + int nid_req = 3; 2162 + int nid_exp = 0; 2163 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 2164 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 2165 + struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; 2166 + void *allocated_ptr = NULL; 2167 + phys_addr_t size = SZ_512; 2168 + phys_addr_t min_addr; 2169 + phys_addr_t max_addr; 2170 + phys_addr_t exp_node_end; 2171 + 2172 + PREFIX_PUSH(); 2173 + setup_numa_memblock(node_fractions); 2174 + 2175 + exp_node_end = region_end(req_node); 2176 + min_addr = req_node->base - SZ_256; 2177 + max_addr = min_addr + size; 2178 + 2179 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 2180 + min_addr, max_addr, nid_req); 2181 + 2182 + ASSERT_NE(allocated_ptr, NULL); 2183 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 2184 + 2185 + ASSERT_EQ(new_rgn->size, size); 2186 + ASSERT_EQ(new_rgn->base, exp_node->base); 2187 + ASSERT_LE(region_end(new_rgn), exp_node_end); 2188 + 2189 + ASSERT_EQ(memblock.reserved.cnt, 1); 2190 + ASSERT_EQ(memblock.reserved.total_size, size); 2191 + 2192 + test_pass_pop(); 2193 + 2194 + return 0; 2195 + } 2196 + 2197 + /* 2198 + * A test that tries to allocate a memory region that spans over the min_addr 2199 + * and max_addr range and overlaps with two different nodes, where the requested 2200 + * node ends before min_addr: 2201 + * 2202 + * min_addr 2203 + * | max_addr 2204 + * | | 2205 + * v v 2206 + * | +---------------+ +-------------+---------+ | 2207 + * | | requested | | node1 | node2 | | 2208 + * +----+---------------+--------+-------------+---------+---------+ 2209 + * + + 2210 + * | +---------+ | 2211 + * | | rgn | | 2212 + * +----+---------+------------------------------------------------+ 2213 + * 2214 + * Expect to drop the lower limit and allocate a memory region that starts at 2215 + * the beginning of the requested node. 2216 + */ 2217 + static int alloc_try_nid_bottom_up_numa_no_overlap_split_check(void) 2218 + { 2219 + int nid_req = 2; 2220 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 2221 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 2222 + struct memblock_region *node2 = &memblock.memory.regions[6]; 2223 + void *allocated_ptr = NULL; 2224 + phys_addr_t size; 2225 + phys_addr_t min_addr; 2226 + phys_addr_t max_addr; 2227 + 2228 + PREFIX_PUSH(); 2229 + setup_numa_memblock(node_fractions); 2230 + 2231 + size = SZ_512; 2232 + min_addr = node2->base - SZ_256; 2233 + max_addr = min_addr + size; 2234 + 2235 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 2236 + min_addr, max_addr, nid_req); 2237 + 2238 + ASSERT_NE(allocated_ptr, NULL); 2239 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 2240 + 2241 + ASSERT_EQ(new_rgn->size, size); 2242 + ASSERT_EQ(new_rgn->base, req_node->base); 2243 + ASSERT_LE(region_end(new_rgn), region_end(req_node)); 2244 + 2245 + ASSERT_EQ(memblock.reserved.cnt, 1); 2246 + ASSERT_EQ(memblock.reserved.total_size, size); 2247 + 2248 + test_pass_pop(); 2249 + 2250 + return 0; 2251 + } 2252 + 2253 + /* 2254 + * A test that tries to allocate memory within min_addr and max_add range when 2255 + * the requested node and the range do not overlap, and requested node ends 2256 + * before min_addr. The range overlaps with multiple nodes along node 2257 + * boundaries: 2258 + * 2259 + * min_addr 2260 + * | max_addr 2261 + * | | 2262 + * v v 2263 + * |-----------+ +----------+----...----+----------+ | 2264 + * | requested | | min node | ... | max node | | 2265 + * +-----------+-----------+----------+----...----+----------+------+ 2266 + * + + 2267 + * | +-----+ | 2268 + * | | rgn | | 2269 + * +-----------------------+-----+----------------------------------+ 2270 + * 2271 + * Expect to allocate a memory region at the beginning of the first node 2272 + * in the range after falling back to NUMA_NO_NODE. 2273 + */ 2274 + static int alloc_try_nid_bottom_up_numa_no_overlap_low_check(void) 2275 + { 2276 + int nid_req = 0; 2277 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 2278 + struct memblock_region *min_node = &memblock.memory.regions[2]; 2279 + struct memblock_region *max_node = &memblock.memory.regions[5]; 2280 + void *allocated_ptr = NULL; 2281 + phys_addr_t size = SZ_64; 2282 + phys_addr_t max_addr; 2283 + phys_addr_t min_addr; 2284 + 2285 + PREFIX_PUSH(); 2286 + setup_numa_memblock(node_fractions); 2287 + 2288 + min_addr = min_node->base; 2289 + max_addr = region_end(max_node); 2290 + 2291 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 2292 + min_addr, max_addr, nid_req); 2293 + 2294 + ASSERT_NE(allocated_ptr, NULL); 2295 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 2296 + 2297 + ASSERT_EQ(new_rgn->size, size); 2298 + ASSERT_EQ(new_rgn->base, min_addr); 2299 + ASSERT_LE(region_end(new_rgn), region_end(min_node)); 2300 + 2301 + ASSERT_EQ(memblock.reserved.cnt, 1); 2302 + ASSERT_EQ(memblock.reserved.total_size, size); 2303 + 2304 + test_pass_pop(); 2305 + 2306 + return 0; 2307 + } 2308 + 2309 + /* 2310 + * A test that tries to allocate memory within min_addr and max_add range when 2311 + * the requested node and the range do not overlap, and requested node starts 2312 + * after max_addr. The range overlaps with multiple nodes along node 2313 + * boundaries: 2314 + * 2315 + * min_addr 2316 + * | max_addr 2317 + * | | 2318 + * v v 2319 + * | +----------+----...----+----------+ +---------+ | 2320 + * | | min node | ... | max node | |requested| | 2321 + * +-----+----------+----...----+----------+---------+---------+---+ 2322 + * + + 2323 + * | +-----+ | 2324 + * | | rgn | | 2325 + * +-----+-----+---------------------------------------------------+ 2326 + * 2327 + * Expect to allocate a memory region at the beginning of the first node 2328 + * in the range after falling back to NUMA_NO_NODE. 2329 + */ 2330 + static int alloc_try_nid_bottom_up_numa_no_overlap_high_check(void) 2331 + { 2332 + int nid_req = 7; 2333 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 2334 + struct memblock_region *min_node = &memblock.memory.regions[2]; 2335 + struct memblock_region *max_node = &memblock.memory.regions[5]; 2336 + void *allocated_ptr = NULL; 2337 + phys_addr_t size = SZ_64; 2338 + phys_addr_t max_addr; 2339 + phys_addr_t min_addr; 2340 + 2341 + PREFIX_PUSH(); 2342 + setup_numa_memblock(node_fractions); 2343 + 2344 + min_addr = min_node->base; 2345 + max_addr = region_end(max_node); 2346 + 2347 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 2348 + min_addr, max_addr, nid_req); 2349 + 2350 + ASSERT_NE(allocated_ptr, NULL); 2351 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 2352 + 2353 + ASSERT_EQ(new_rgn->size, size); 2354 + ASSERT_EQ(new_rgn->base, min_addr); 2355 + ASSERT_LE(region_end(new_rgn), region_end(min_node)); 2356 + 2357 + ASSERT_EQ(memblock.reserved.cnt, 1); 2358 + ASSERT_EQ(memblock.reserved.total_size, size); 2359 + 2360 + test_pass_pop(); 2361 + 2362 + return 0; 2363 + } 2364 + 2365 + /* 2366 + * A test that tries to allocate a memory region in a specific NUMA node that 2367 + * does not have enough memory to allocate a region of the requested size. 2368 + * Additionally, none of the nodes have enough memory to allocate the region: 2369 + * 2370 + * +-----------------------------------+ 2371 + * | new | 2372 + * +-----------------------------------+ 2373 + * |-------+-------+-------+-------+-------+-------+-------+-------| 2374 + * | node0 | node1 | node2 | node3 | node4 | node5 | node6 | node7 | 2375 + * +-------+-------+-------+-------+-------+-------+-------+-------+ 2376 + * 2377 + * Expect no allocation to happen. 2378 + */ 2379 + static int alloc_try_nid_numa_large_region_generic_check(void) 2380 + { 2381 + int nid_req = 3; 2382 + void *allocated_ptr = NULL; 2383 + phys_addr_t size = MEM_SIZE / SZ_2; 2384 + phys_addr_t min_addr; 2385 + phys_addr_t max_addr; 2386 + 2387 + PREFIX_PUSH(); 2388 + setup_numa_memblock(node_fractions); 2389 + 2390 + min_addr = memblock_start_of_DRAM(); 2391 + max_addr = memblock_end_of_DRAM(); 2392 + 2393 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 2394 + min_addr, max_addr, nid_req); 2395 + ASSERT_EQ(allocated_ptr, NULL); 2396 + 2397 + test_pass_pop(); 2398 + 2399 + return 0; 2400 + } 2401 + 2402 + /* 2403 + * A test that tries to allocate memory within min_addr and max_addr range when 2404 + * there are two reserved regions at the borders. The requested node starts at 2405 + * min_addr and ends at max_addr and is the same size as the region to be 2406 + * allocated: 2407 + * 2408 + * min_addr 2409 + * | max_addr 2410 + * | | 2411 + * v v 2412 + * | +-----------+-----------------------+-----------------------| 2413 + * | | node5 | requested | node7 | 2414 + * +------+-----------+-----------------------+-----------------------+ 2415 + * + + 2416 + * | +----+-----------------------+----+ | 2417 + * | | r2 | new | r1 | | 2418 + * +-------------+----+-----------------------+----+------------------+ 2419 + * 2420 + * Expect to merge all of the regions into one. The region counter and total 2421 + * size fields get updated. 2422 + */ 2423 + static int alloc_try_nid_numa_reserved_full_merge_generic_check(void) 2424 + { 2425 + int nid_req = 6; 2426 + int nid_next = nid_req + 1; 2427 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 2428 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 2429 + struct memblock_region *next_node = &memblock.memory.regions[nid_next]; 2430 + void *allocated_ptr = NULL; 2431 + struct region r1, r2; 2432 + phys_addr_t size = req_node->size; 2433 + phys_addr_t total_size; 2434 + phys_addr_t max_addr; 2435 + phys_addr_t min_addr; 2436 + 2437 + PREFIX_PUSH(); 2438 + setup_numa_memblock(node_fractions); 2439 + 2440 + r1.base = next_node->base; 2441 + r1.size = SZ_128; 2442 + 2443 + r2.size = SZ_128; 2444 + r2.base = r1.base - (size + r2.size); 2445 + 2446 + total_size = r1.size + r2.size + size; 2447 + min_addr = r2.base + r2.size; 2448 + max_addr = r1.base; 2449 + 2450 + memblock_reserve(r1.base, r1.size); 2451 + memblock_reserve(r2.base, r2.size); 2452 + 2453 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 2454 + min_addr, max_addr, nid_req); 2455 + 2456 + ASSERT_NE(allocated_ptr, NULL); 2457 + assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); 2458 + 2459 + ASSERT_EQ(new_rgn->size, total_size); 2460 + ASSERT_EQ(new_rgn->base, r2.base); 2461 + 2462 + ASSERT_LE(new_rgn->base, req_node->base); 2463 + ASSERT_LE(region_end(req_node), region_end(new_rgn)); 2464 + 2465 + ASSERT_EQ(memblock.reserved.cnt, 1); 2466 + ASSERT_EQ(memblock.reserved.total_size, total_size); 2467 + 2468 + test_pass_pop(); 2469 + 2470 + return 0; 2471 + } 2472 + 2473 + /* 2474 + * A test that tries to allocate memory within min_addr and max_add range, 2475 + * where the total range can fit the region, but it is split between two nodes 2476 + * and everything else is reserved. Additionally, nid is set to NUMA_NO_NODE 2477 + * instead of requesting a specific node: 2478 + * 2479 + * +-----------+ 2480 + * | new | 2481 + * +-----------+ 2482 + * | +---------------------+-----------| 2483 + * | | prev node | next node | 2484 + * +------+---------------------+-----------+ 2485 + * + + 2486 + * |----------------------+ +-----| 2487 + * | r1 | | r2 | 2488 + * +----------------------+-----------+-----+ 2489 + * ^ ^ 2490 + * | | 2491 + * | max_addr 2492 + * | 2493 + * min_addr 2494 + * 2495 + * Expect no allocation to happen. 2496 + */ 2497 + static int alloc_try_nid_numa_split_all_reserved_generic_check(void) 2498 + { 2499 + void *allocated_ptr = NULL; 2500 + struct memblock_region *next_node = &memblock.memory.regions[7]; 2501 + struct region r1, r2; 2502 + phys_addr_t size = SZ_256; 2503 + phys_addr_t max_addr; 2504 + phys_addr_t min_addr; 2505 + 2506 + PREFIX_PUSH(); 2507 + setup_numa_memblock(node_fractions); 2508 + 2509 + r2.base = next_node->base + SZ_128; 2510 + r2.size = memblock_end_of_DRAM() - r2.base; 2511 + 2512 + r1.size = MEM_SIZE - (r2.size + size); 2513 + r1.base = memblock_start_of_DRAM(); 2514 + 2515 + min_addr = r1.base + r1.size; 2516 + max_addr = r2.base; 2517 + 2518 + memblock_reserve(r1.base, r1.size); 2519 + memblock_reserve(r2.base, r2.size); 2520 + 2521 + allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 2522 + min_addr, max_addr, 2523 + NUMA_NO_NODE); 2524 + 2525 + ASSERT_EQ(allocated_ptr, NULL); 2526 + 2527 + test_pass_pop(); 2528 + 2529 + return 0; 2530 + } 2531 + 2532 + /* Test case wrappers for NUMA tests */ 2533 + static int alloc_try_nid_numa_simple_check(void) 2534 + { 2535 + test_print("\tRunning %s...\n", __func__); 2536 + memblock_set_bottom_up(false); 2537 + alloc_try_nid_top_down_numa_simple_check(); 2538 + memblock_set_bottom_up(true); 2539 + alloc_try_nid_bottom_up_numa_simple_check(); 2540 + 2541 + return 0; 2542 + } 2543 + 2544 + static int alloc_try_nid_numa_small_node_check(void) 2545 + { 2546 + test_print("\tRunning %s...\n", __func__); 2547 + memblock_set_bottom_up(false); 2548 + alloc_try_nid_top_down_numa_small_node_check(); 2549 + memblock_set_bottom_up(true); 2550 + alloc_try_nid_bottom_up_numa_small_node_check(); 2551 + 2552 + return 0; 2553 + } 2554 + 2555 + static int alloc_try_nid_numa_node_reserved_check(void) 2556 + { 2557 + test_print("\tRunning %s...\n", __func__); 2558 + memblock_set_bottom_up(false); 2559 + alloc_try_nid_top_down_numa_node_reserved_check(); 2560 + memblock_set_bottom_up(true); 2561 + alloc_try_nid_bottom_up_numa_node_reserved_check(); 2562 + 2563 + return 0; 2564 + } 2565 + 2566 + static int alloc_try_nid_numa_part_reserved_check(void) 2567 + { 2568 + test_print("\tRunning %s...\n", __func__); 2569 + memblock_set_bottom_up(false); 2570 + alloc_try_nid_top_down_numa_part_reserved_check(); 2571 + memblock_set_bottom_up(true); 2572 + alloc_try_nid_bottom_up_numa_part_reserved_check(); 2573 + 2574 + return 0; 2575 + } 2576 + 2577 + static int alloc_try_nid_numa_part_reserved_fallback_check(void) 2578 + { 2579 + test_print("\tRunning %s...\n", __func__); 2580 + memblock_set_bottom_up(false); 2581 + alloc_try_nid_top_down_numa_part_reserved_fallback_check(); 2582 + memblock_set_bottom_up(true); 2583 + alloc_try_nid_bottom_up_numa_part_reserved_fallback_check(); 2584 + 2585 + return 0; 2586 + } 2587 + 2588 + static int alloc_try_nid_numa_split_range_low_check(void) 2589 + { 2590 + test_print("\tRunning %s...\n", __func__); 2591 + memblock_set_bottom_up(false); 2592 + alloc_try_nid_top_down_numa_split_range_low_check(); 2593 + memblock_set_bottom_up(true); 2594 + alloc_try_nid_bottom_up_numa_split_range_low_check(); 2595 + 2596 + return 0; 2597 + } 2598 + 2599 + static int alloc_try_nid_numa_split_range_high_check(void) 2600 + { 2601 + test_print("\tRunning %s...\n", __func__); 2602 + memblock_set_bottom_up(false); 2603 + alloc_try_nid_top_down_numa_split_range_high_check(); 2604 + memblock_set_bottom_up(true); 2605 + alloc_try_nid_bottom_up_numa_split_range_high_check(); 2606 + 2607 + return 0; 2608 + } 2609 + 2610 + static int alloc_try_nid_numa_no_overlap_split_check(void) 2611 + { 2612 + test_print("\tRunning %s...\n", __func__); 2613 + memblock_set_bottom_up(false); 2614 + alloc_try_nid_top_down_numa_no_overlap_split_check(); 2615 + memblock_set_bottom_up(true); 2616 + alloc_try_nid_bottom_up_numa_no_overlap_split_check(); 2617 + 2618 + return 0; 2619 + } 2620 + 2621 + static int alloc_try_nid_numa_no_overlap_low_check(void) 2622 + { 2623 + test_print("\tRunning %s...\n", __func__); 2624 + memblock_set_bottom_up(false); 2625 + alloc_try_nid_top_down_numa_no_overlap_low_check(); 2626 + memblock_set_bottom_up(true); 2627 + alloc_try_nid_bottom_up_numa_no_overlap_low_check(); 2628 + 2629 + return 0; 2630 + } 2631 + 2632 + static int alloc_try_nid_numa_no_overlap_high_check(void) 2633 + { 2634 + test_print("\tRunning %s...\n", __func__); 2635 + memblock_set_bottom_up(false); 2636 + alloc_try_nid_top_down_numa_no_overlap_high_check(); 2637 + memblock_set_bottom_up(true); 2638 + alloc_try_nid_bottom_up_numa_no_overlap_high_check(); 2639 + 2640 + return 0; 2641 + } 2642 + 2643 + static int alloc_try_nid_numa_large_region_check(void) 2644 + { 2645 + test_print("\tRunning %s...\n", __func__); 2646 + run_top_down(alloc_try_nid_numa_large_region_generic_check); 2647 + run_bottom_up(alloc_try_nid_numa_large_region_generic_check); 2648 + 2649 + return 0; 2650 + } 2651 + 2652 + static int alloc_try_nid_numa_reserved_full_merge_check(void) 2653 + { 2654 + test_print("\tRunning %s...\n", __func__); 2655 + run_top_down(alloc_try_nid_numa_reserved_full_merge_generic_check); 2656 + run_bottom_up(alloc_try_nid_numa_reserved_full_merge_generic_check); 2657 + 2658 + return 0; 2659 + } 2660 + 2661 + static int alloc_try_nid_numa_split_all_reserved_check(void) 2662 + { 2663 + test_print("\tRunning %s...\n", __func__); 2664 + run_top_down(alloc_try_nid_numa_split_all_reserved_generic_check); 2665 + run_bottom_up(alloc_try_nid_numa_split_all_reserved_generic_check); 2666 + 2667 + return 0; 2668 + } 2669 + 2670 + int __memblock_alloc_nid_numa_checks(void) 2671 + { 2672 + test_print("Running %s NUMA tests...\n", 2673 + get_memblock_alloc_try_nid_name(alloc_nid_test_flags)); 2674 + 2675 + alloc_try_nid_numa_simple_check(); 2676 + alloc_try_nid_numa_small_node_check(); 2677 + alloc_try_nid_numa_node_reserved_check(); 2678 + alloc_try_nid_numa_part_reserved_check(); 2679 + alloc_try_nid_numa_part_reserved_fallback_check(); 2680 + alloc_try_nid_numa_split_range_low_check(); 2681 + alloc_try_nid_numa_split_range_high_check(); 2682 + 2683 + alloc_try_nid_numa_no_overlap_split_check(); 2684 + alloc_try_nid_numa_no_overlap_low_check(); 2685 + alloc_try_nid_numa_no_overlap_high_check(); 2686 + alloc_try_nid_numa_large_region_check(); 2687 + alloc_try_nid_numa_reserved_full_merge_check(); 2688 + alloc_try_nid_numa_split_all_reserved_check(); 2689 + 2690 + return 0; 2691 + } 2692 + 2693 + static int memblock_alloc_nid_checks_internal(int flags) 2694 + { 2695 + alloc_nid_test_flags = flags; 2696 + 2697 + prefix_reset(); 2698 + prefix_push(get_memblock_alloc_try_nid_name(flags)); 2699 + 2700 + reset_memblock_attributes(); 2701 + dummy_physical_memory_init(); 2702 + 2703 + memblock_alloc_nid_range_checks(); 2704 + memblock_alloc_nid_numa_checks(); 2705 + 1224 2706 dummy_physical_memory_cleanup(); 1225 2707 1226 2708 prefix_pop(); 2709 + 2710 + return 0; 2711 + } 2712 + 2713 + int memblock_alloc_nid_checks(void) 2714 + { 2715 + memblock_alloc_nid_checks_internal(TEST_F_NONE); 2716 + memblock_alloc_nid_checks_internal(TEST_F_RAW); 1227 2717 1228 2718 return 0; 1229 2719 }
+16
tools/testing/memblock/tests/alloc_nid_api.h
··· 5 5 #include "common.h" 6 6 7 7 int memblock_alloc_nid_checks(void); 8 + int __memblock_alloc_nid_numa_checks(void); 9 + 10 + #ifdef CONFIG_NUMA 11 + static inline int memblock_alloc_nid_numa_checks(void) 12 + { 13 + __memblock_alloc_nid_numa_checks(); 14 + return 0; 15 + } 16 + 17 + #else 18 + static inline int memblock_alloc_nid_numa_checks(void) 19 + { 20 + return 0; 21 + } 22 + 23 + #endif /* CONFIG_NUMA */ 8 24 9 25 #endif
+767
tools/testing/memblock/tests/basic_api.c
··· 8 8 #define FUNC_RESERVE "memblock_reserve" 9 9 #define FUNC_REMOVE "memblock_remove" 10 10 #define FUNC_FREE "memblock_free" 11 + #define FUNC_TRIM "memblock_trim_memory" 11 12 12 13 static int memblock_initialization_check(void) 13 14 { ··· 327 326 return 0; 328 327 } 329 328 329 + /* 330 + * A test that tries to add two memory blocks that don't overlap with one 331 + * another and then add a third memory block in the space between the first two: 332 + * 333 + * | +--------+--------+--------+ | 334 + * | | r1 | r3 | r2 | | 335 + * +--------+--------+--------+--------+--+ 336 + * 337 + * Expect to merge the three entries into one region that starts at r1.base 338 + * and has size of r1.size + r2.size + r3.size. The region counter and total 339 + * size of the available memory are updated. 340 + */ 341 + static int memblock_add_between_check(void) 342 + { 343 + struct memblock_region *rgn; 344 + phys_addr_t total_size; 345 + 346 + rgn = &memblock.memory.regions[0]; 347 + 348 + struct region r1 = { 349 + .base = SZ_1G, 350 + .size = SZ_8K 351 + }; 352 + struct region r2 = { 353 + .base = SZ_1G + SZ_16K, 354 + .size = SZ_8K 355 + }; 356 + struct region r3 = { 357 + .base = SZ_1G + SZ_8K, 358 + .size = SZ_8K 359 + }; 360 + 361 + PREFIX_PUSH(); 362 + 363 + total_size = r1.size + r2.size + r3.size; 364 + 365 + reset_memblock_regions(); 366 + memblock_add(r1.base, r1.size); 367 + memblock_add(r2.base, r2.size); 368 + memblock_add(r3.base, r3.size); 369 + 370 + ASSERT_EQ(rgn->base, r1.base); 371 + ASSERT_EQ(rgn->size, total_size); 372 + 373 + ASSERT_EQ(memblock.memory.cnt, 1); 374 + ASSERT_EQ(memblock.memory.total_size, total_size); 375 + 376 + test_pass_pop(); 377 + 378 + return 0; 379 + } 380 + 381 + /* 382 + * A simple test that tries to add a memory block r when r extends past 383 + * PHYS_ADDR_MAX: 384 + * 385 + * +--------+ 386 + * | r | 387 + * +--------+ 388 + * | +----+ 389 + * | | rgn| 390 + * +----------------------------+----+ 391 + * 392 + * Expect to add a memory block of size PHYS_ADDR_MAX - r.base. Expect the 393 + * total size of available memory and the counter to be updated. 394 + */ 395 + static int memblock_add_near_max_check(void) 396 + { 397 + struct memblock_region *rgn; 398 + phys_addr_t total_size; 399 + 400 + rgn = &memblock.memory.regions[0]; 401 + 402 + struct region r = { 403 + .base = PHYS_ADDR_MAX - SZ_1M, 404 + .size = SZ_2M 405 + }; 406 + 407 + PREFIX_PUSH(); 408 + 409 + total_size = PHYS_ADDR_MAX - r.base; 410 + 411 + reset_memblock_regions(); 412 + memblock_add(r.base, r.size); 413 + 414 + ASSERT_EQ(rgn->base, r.base); 415 + ASSERT_EQ(rgn->size, total_size); 416 + 417 + ASSERT_EQ(memblock.memory.cnt, 1); 418 + ASSERT_EQ(memblock.memory.total_size, total_size); 419 + 420 + test_pass_pop(); 421 + 422 + return 0; 423 + } 424 + 330 425 static int memblock_add_checks(void) 331 426 { 332 427 prefix_reset(); ··· 436 339 memblock_add_overlap_bottom_check(); 437 340 memblock_add_within_check(); 438 341 memblock_add_twice_check(); 342 + memblock_add_between_check(); 343 + memblock_add_near_max_check(); 439 344 440 345 prefix_pop(); 441 346 ··· 703 604 return 0; 704 605 } 705 606 607 + /* 608 + * A test that tries to mark two memory blocks that don't overlap as reserved 609 + * and then reserve a third memory block in the space between the first two: 610 + * 611 + * | +--------+--------+--------+ | 612 + * | | r1 | r3 | r2 | | 613 + * +--------+--------+--------+--------+--+ 614 + * 615 + * Expect to merge the three entries into one reserved region that starts at 616 + * r1.base and has size of r1.size + r2.size + r3.size. The region counter and 617 + * total for memblock.reserved are updated. 618 + */ 619 + static int memblock_reserve_between_check(void) 620 + { 621 + struct memblock_region *rgn; 622 + phys_addr_t total_size; 623 + 624 + rgn = &memblock.reserved.regions[0]; 625 + 626 + struct region r1 = { 627 + .base = SZ_1G, 628 + .size = SZ_8K 629 + }; 630 + struct region r2 = { 631 + .base = SZ_1G + SZ_16K, 632 + .size = SZ_8K 633 + }; 634 + struct region r3 = { 635 + .base = SZ_1G + SZ_8K, 636 + .size = SZ_8K 637 + }; 638 + 639 + PREFIX_PUSH(); 640 + 641 + total_size = r1.size + r2.size + r3.size; 642 + 643 + reset_memblock_regions(); 644 + memblock_reserve(r1.base, r1.size); 645 + memblock_reserve(r2.base, r2.size); 646 + memblock_reserve(r3.base, r3.size); 647 + 648 + ASSERT_EQ(rgn->base, r1.base); 649 + ASSERT_EQ(rgn->size, total_size); 650 + 651 + ASSERT_EQ(memblock.reserved.cnt, 1); 652 + ASSERT_EQ(memblock.reserved.total_size, total_size); 653 + 654 + test_pass_pop(); 655 + 656 + return 0; 657 + } 658 + 659 + /* 660 + * A simple test that tries to reserve a memory block r when r extends past 661 + * PHYS_ADDR_MAX: 662 + * 663 + * +--------+ 664 + * | r | 665 + * +--------+ 666 + * | +----+ 667 + * | | rgn| 668 + * +----------------------------+----+ 669 + * 670 + * Expect to reserve a memory block of size PHYS_ADDR_MAX - r.base. Expect the 671 + * total size of reserved memory and the counter to be updated. 672 + */ 673 + static int memblock_reserve_near_max_check(void) 674 + { 675 + struct memblock_region *rgn; 676 + phys_addr_t total_size; 677 + 678 + rgn = &memblock.reserved.regions[0]; 679 + 680 + struct region r = { 681 + .base = PHYS_ADDR_MAX - SZ_1M, 682 + .size = SZ_2M 683 + }; 684 + 685 + PREFIX_PUSH(); 686 + 687 + total_size = PHYS_ADDR_MAX - r.base; 688 + 689 + reset_memblock_regions(); 690 + memblock_reserve(r.base, r.size); 691 + 692 + ASSERT_EQ(rgn->base, r.base); 693 + ASSERT_EQ(rgn->size, total_size); 694 + 695 + ASSERT_EQ(memblock.reserved.cnt, 1); 696 + ASSERT_EQ(memblock.reserved.total_size, total_size); 697 + 698 + test_pass_pop(); 699 + 700 + return 0; 701 + } 702 + 706 703 static int memblock_reserve_checks(void) 707 704 { 708 705 prefix_reset(); ··· 811 616 memblock_reserve_overlap_bottom_check(); 812 617 memblock_reserve_within_check(); 813 618 memblock_reserve_twice_check(); 619 + memblock_reserve_between_check(); 620 + memblock_reserve_near_max_check(); 814 621 815 622 prefix_pop(); 816 623 ··· 1084 887 return 0; 1085 888 } 1086 889 890 + /* 891 + * A simple test that tries to remove a region r1 from the array of 892 + * available memory regions when r1 is the only available region. 893 + * Expect to add a memory block r1 and then remove r1 so that a dummy 894 + * region is added. The region counter stays the same, and the total size 895 + * is updated. 896 + */ 897 + static int memblock_remove_only_region_check(void) 898 + { 899 + struct memblock_region *rgn; 900 + 901 + rgn = &memblock.memory.regions[0]; 902 + 903 + struct region r1 = { 904 + .base = SZ_2K, 905 + .size = SZ_4K 906 + }; 907 + 908 + PREFIX_PUSH(); 909 + 910 + reset_memblock_regions(); 911 + memblock_add(r1.base, r1.size); 912 + memblock_remove(r1.base, r1.size); 913 + 914 + ASSERT_EQ(rgn->base, 0); 915 + ASSERT_EQ(rgn->size, 0); 916 + 917 + ASSERT_EQ(memblock.memory.cnt, 1); 918 + ASSERT_EQ(memblock.memory.total_size, 0); 919 + 920 + test_pass_pop(); 921 + 922 + return 0; 923 + } 924 + 925 + /* 926 + * A simple test that tries remove a region r2 from the array of available 927 + * memory regions when r2 extends past PHYS_ADDR_MAX: 928 + * 929 + * +--------+ 930 + * | r2 | 931 + * +--------+ 932 + * | +---+....+ 933 + * | |rgn| | 934 + * +------------------------+---+----+ 935 + * 936 + * Expect that only the portion between PHYS_ADDR_MAX and r2.base is removed. 937 + * Expect the total size of available memory to be updated and the counter to 938 + * not be updated. 939 + */ 940 + static int memblock_remove_near_max_check(void) 941 + { 942 + struct memblock_region *rgn; 943 + phys_addr_t total_size; 944 + 945 + rgn = &memblock.memory.regions[0]; 946 + 947 + struct region r1 = { 948 + .base = PHYS_ADDR_MAX - SZ_2M, 949 + .size = SZ_2M 950 + }; 951 + 952 + struct region r2 = { 953 + .base = PHYS_ADDR_MAX - SZ_1M, 954 + .size = SZ_2M 955 + }; 956 + 957 + PREFIX_PUSH(); 958 + 959 + total_size = r1.size - (PHYS_ADDR_MAX - r2.base); 960 + 961 + reset_memblock_regions(); 962 + memblock_add(r1.base, r1.size); 963 + memblock_remove(r2.base, r2.size); 964 + 965 + ASSERT_EQ(rgn->base, r1.base); 966 + ASSERT_EQ(rgn->size, total_size); 967 + 968 + ASSERT_EQ(memblock.memory.cnt, 1); 969 + ASSERT_EQ(memblock.memory.total_size, total_size); 970 + 971 + test_pass_pop(); 972 + 973 + return 0; 974 + } 975 + 976 + /* 977 + * A test that tries to remove a region r3 that overlaps with two existing 978 + * regions r1 and r2: 979 + * 980 + * +----------------+ 981 + * | r3 | 982 + * +----------------+ 983 + * | +----+..... ........+--------+ 984 + * | | |r1 : : |r2 | | 985 + * +----+----+----+---+-------+--------+-----+ 986 + * 987 + * Expect that only the intersections of r1 with r3 and r2 with r3 are removed 988 + * from the available memory pool. Expect the total size of available memory to 989 + * be updated and the counter to not be updated. 990 + */ 991 + static int memblock_remove_overlap_two_check(void) 992 + { 993 + struct memblock_region *rgn1, *rgn2; 994 + phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size; 995 + 996 + rgn1 = &memblock.memory.regions[0]; 997 + rgn2 = &memblock.memory.regions[1]; 998 + 999 + struct region r1 = { 1000 + .base = SZ_16M, 1001 + .size = SZ_32M 1002 + }; 1003 + struct region r2 = { 1004 + .base = SZ_64M, 1005 + .size = SZ_64M 1006 + }; 1007 + struct region r3 = { 1008 + .base = SZ_32M, 1009 + .size = SZ_64M 1010 + }; 1011 + 1012 + PREFIX_PUSH(); 1013 + 1014 + r2_end = r2.base + r2.size; 1015 + r3_end = r3.base + r3.size; 1016 + new_r1_size = r3.base - r1.base; 1017 + new_r2_size = r2_end - r3_end; 1018 + total_size = new_r1_size + new_r2_size; 1019 + 1020 + reset_memblock_regions(); 1021 + memblock_add(r1.base, r1.size); 1022 + memblock_add(r2.base, r2.size); 1023 + memblock_remove(r3.base, r3.size); 1024 + 1025 + ASSERT_EQ(rgn1->base, r1.base); 1026 + ASSERT_EQ(rgn1->size, new_r1_size); 1027 + 1028 + ASSERT_EQ(rgn2->base, r3_end); 1029 + ASSERT_EQ(rgn2->size, new_r2_size); 1030 + 1031 + ASSERT_EQ(memblock.memory.cnt, 2); 1032 + ASSERT_EQ(memblock.memory.total_size, total_size); 1033 + 1034 + test_pass_pop(); 1035 + 1036 + return 0; 1037 + } 1038 + 1087 1039 static int memblock_remove_checks(void) 1088 1040 { 1089 1041 prefix_reset(); ··· 1244 898 memblock_remove_overlap_top_check(); 1245 899 memblock_remove_overlap_bottom_check(); 1246 900 memblock_remove_within_check(); 901 + memblock_remove_only_region_check(); 902 + memblock_remove_near_max_check(); 903 + memblock_remove_overlap_two_check(); 1247 904 1248 905 prefix_pop(); 1249 906 ··· 1512 1163 return 0; 1513 1164 } 1514 1165 1166 + /* 1167 + * A simple test that tries to free a memory block r1 that was marked 1168 + * earlier as reserved when r1 is the only available region. 1169 + * Expect to reserve a memory block r1 and then free r1 so that r1 is 1170 + * overwritten with a dummy region. The region counter stays the same, 1171 + * and the total size is updated. 1172 + */ 1173 + static int memblock_free_only_region_check(void) 1174 + { 1175 + struct memblock_region *rgn; 1176 + 1177 + rgn = &memblock.reserved.regions[0]; 1178 + 1179 + struct region r1 = { 1180 + .base = SZ_2K, 1181 + .size = SZ_4K 1182 + }; 1183 + 1184 + PREFIX_PUSH(); 1185 + 1186 + reset_memblock_regions(); 1187 + memblock_reserve(r1.base, r1.size); 1188 + memblock_free((void *)r1.base, r1.size); 1189 + 1190 + ASSERT_EQ(rgn->base, 0); 1191 + ASSERT_EQ(rgn->size, 0); 1192 + 1193 + ASSERT_EQ(memblock.reserved.cnt, 1); 1194 + ASSERT_EQ(memblock.reserved.total_size, 0); 1195 + 1196 + test_pass_pop(); 1197 + 1198 + return 0; 1199 + } 1200 + 1201 + /* 1202 + * A simple test that tries free a region r2 when r2 extends past PHYS_ADDR_MAX: 1203 + * 1204 + * +--------+ 1205 + * | r2 | 1206 + * +--------+ 1207 + * | +---+....+ 1208 + * | |rgn| | 1209 + * +------------------------+---+----+ 1210 + * 1211 + * Expect that only the portion between PHYS_ADDR_MAX and r2.base is freed. 1212 + * Expect the total size of reserved memory to be updated and the counter to 1213 + * not be updated. 1214 + */ 1215 + static int memblock_free_near_max_check(void) 1216 + { 1217 + struct memblock_region *rgn; 1218 + phys_addr_t total_size; 1219 + 1220 + rgn = &memblock.reserved.regions[0]; 1221 + 1222 + struct region r1 = { 1223 + .base = PHYS_ADDR_MAX - SZ_2M, 1224 + .size = SZ_2M 1225 + }; 1226 + 1227 + struct region r2 = { 1228 + .base = PHYS_ADDR_MAX - SZ_1M, 1229 + .size = SZ_2M 1230 + }; 1231 + 1232 + PREFIX_PUSH(); 1233 + 1234 + total_size = r1.size - (PHYS_ADDR_MAX - r2.base); 1235 + 1236 + reset_memblock_regions(); 1237 + memblock_reserve(r1.base, r1.size); 1238 + memblock_free((void *)r2.base, r2.size); 1239 + 1240 + ASSERT_EQ(rgn->base, r1.base); 1241 + ASSERT_EQ(rgn->size, total_size); 1242 + 1243 + ASSERT_EQ(memblock.reserved.cnt, 1); 1244 + ASSERT_EQ(memblock.reserved.total_size, total_size); 1245 + 1246 + test_pass_pop(); 1247 + 1248 + return 0; 1249 + } 1250 + 1251 + /* 1252 + * A test that tries to free a reserved region r3 that overlaps with two 1253 + * existing reserved regions r1 and r2: 1254 + * 1255 + * +----------------+ 1256 + * | r3 | 1257 + * +----------------+ 1258 + * | +----+..... ........+--------+ 1259 + * | | |r1 : : |r2 | | 1260 + * +----+----+----+---+-------+--------+-----+ 1261 + * 1262 + * Expect that only the intersections of r1 with r3 and r2 with r3 are freed 1263 + * from the collection of reserved memory. Expect the total size of reserved 1264 + * memory to be updated and the counter to not be updated. 1265 + */ 1266 + static int memblock_free_overlap_two_check(void) 1267 + { 1268 + struct memblock_region *rgn1, *rgn2; 1269 + phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size; 1270 + 1271 + rgn1 = &memblock.reserved.regions[0]; 1272 + rgn2 = &memblock.reserved.regions[1]; 1273 + 1274 + struct region r1 = { 1275 + .base = SZ_16M, 1276 + .size = SZ_32M 1277 + }; 1278 + struct region r2 = { 1279 + .base = SZ_64M, 1280 + .size = SZ_64M 1281 + }; 1282 + struct region r3 = { 1283 + .base = SZ_32M, 1284 + .size = SZ_64M 1285 + }; 1286 + 1287 + PREFIX_PUSH(); 1288 + 1289 + r2_end = r2.base + r2.size; 1290 + r3_end = r3.base + r3.size; 1291 + new_r1_size = r3.base - r1.base; 1292 + new_r2_size = r2_end - r3_end; 1293 + total_size = new_r1_size + new_r2_size; 1294 + 1295 + reset_memblock_regions(); 1296 + memblock_reserve(r1.base, r1.size); 1297 + memblock_reserve(r2.base, r2.size); 1298 + memblock_free((void *)r3.base, r3.size); 1299 + 1300 + ASSERT_EQ(rgn1->base, r1.base); 1301 + ASSERT_EQ(rgn1->size, new_r1_size); 1302 + 1303 + ASSERT_EQ(rgn2->base, r3_end); 1304 + ASSERT_EQ(rgn2->size, new_r2_size); 1305 + 1306 + ASSERT_EQ(memblock.reserved.cnt, 2); 1307 + ASSERT_EQ(memblock.reserved.total_size, total_size); 1308 + 1309 + test_pass_pop(); 1310 + 1311 + return 0; 1312 + } 1313 + 1515 1314 static int memblock_free_checks(void) 1516 1315 { 1517 1316 prefix_reset(); ··· 1671 1174 memblock_free_overlap_top_check(); 1672 1175 memblock_free_overlap_bottom_check(); 1673 1176 memblock_free_within_check(); 1177 + memblock_free_only_region_check(); 1178 + memblock_free_near_max_check(); 1179 + memblock_free_overlap_two_check(); 1180 + 1181 + prefix_pop(); 1182 + 1183 + return 0; 1184 + } 1185 + 1186 + static int memblock_set_bottom_up_check(void) 1187 + { 1188 + prefix_push("memblock_set_bottom_up"); 1189 + 1190 + memblock_set_bottom_up(false); 1191 + ASSERT_EQ(memblock.bottom_up, false); 1192 + memblock_set_bottom_up(true); 1193 + ASSERT_EQ(memblock.bottom_up, true); 1194 + 1195 + reset_memblock_attributes(); 1196 + test_pass_pop(); 1197 + 1198 + return 0; 1199 + } 1200 + 1201 + static int memblock_bottom_up_check(void) 1202 + { 1203 + prefix_push("memblock_bottom_up"); 1204 + 1205 + memblock_set_bottom_up(false); 1206 + ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up); 1207 + ASSERT_EQ(memblock_bottom_up(), false); 1208 + memblock_set_bottom_up(true); 1209 + ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up); 1210 + ASSERT_EQ(memblock_bottom_up(), true); 1211 + 1212 + reset_memblock_attributes(); 1213 + test_pass_pop(); 1214 + 1215 + return 0; 1216 + } 1217 + 1218 + static int memblock_bottom_up_checks(void) 1219 + { 1220 + test_print("Running memblock_*bottom_up tests...\n"); 1221 + 1222 + prefix_reset(); 1223 + memblock_set_bottom_up_check(); 1224 + prefix_reset(); 1225 + memblock_bottom_up_check(); 1226 + 1227 + return 0; 1228 + } 1229 + 1230 + /* 1231 + * A test that tries to trim memory when both ends of the memory region are 1232 + * aligned. Expect that the memory will not be trimmed. Expect the counter to 1233 + * not be updated. 1234 + */ 1235 + static int memblock_trim_memory_aligned_check(void) 1236 + { 1237 + struct memblock_region *rgn; 1238 + const phys_addr_t alignment = SMP_CACHE_BYTES; 1239 + 1240 + rgn = &memblock.memory.regions[0]; 1241 + 1242 + struct region r = { 1243 + .base = alignment, 1244 + .size = alignment * 4 1245 + }; 1246 + 1247 + PREFIX_PUSH(); 1248 + 1249 + reset_memblock_regions(); 1250 + memblock_add(r.base, r.size); 1251 + memblock_trim_memory(alignment); 1252 + 1253 + ASSERT_EQ(rgn->base, r.base); 1254 + ASSERT_EQ(rgn->size, r.size); 1255 + 1256 + ASSERT_EQ(memblock.memory.cnt, 1); 1257 + 1258 + test_pass_pop(); 1259 + 1260 + return 0; 1261 + } 1262 + 1263 + /* 1264 + * A test that tries to trim memory when there are two available regions, r1 and 1265 + * r2. Region r1 is aligned on both ends and region r2 is unaligned on one end 1266 + * and smaller than the alignment: 1267 + * 1268 + * alignment 1269 + * |--------| 1270 + * | +-----------------+ +------+ | 1271 + * | | r1 | | r2 | | 1272 + * +--------+-----------------+--------+------+---+ 1273 + * ^ ^ ^ ^ ^ 1274 + * |________|________|________| | 1275 + * | Unaligned address 1276 + * Aligned addresses 1277 + * 1278 + * Expect that r1 will not be trimmed and r2 will be removed. Expect the 1279 + * counter to be updated. 1280 + */ 1281 + static int memblock_trim_memory_too_small_check(void) 1282 + { 1283 + struct memblock_region *rgn; 1284 + const phys_addr_t alignment = SMP_CACHE_BYTES; 1285 + 1286 + rgn = &memblock.memory.regions[0]; 1287 + 1288 + struct region r1 = { 1289 + .base = alignment, 1290 + .size = alignment * 2 1291 + }; 1292 + struct region r2 = { 1293 + .base = alignment * 4, 1294 + .size = alignment - SZ_2 1295 + }; 1296 + 1297 + PREFIX_PUSH(); 1298 + 1299 + reset_memblock_regions(); 1300 + memblock_add(r1.base, r1.size); 1301 + memblock_add(r2.base, r2.size); 1302 + memblock_trim_memory(alignment); 1303 + 1304 + ASSERT_EQ(rgn->base, r1.base); 1305 + ASSERT_EQ(rgn->size, r1.size); 1306 + 1307 + ASSERT_EQ(memblock.memory.cnt, 1); 1308 + 1309 + test_pass_pop(); 1310 + 1311 + return 0; 1312 + } 1313 + 1314 + /* 1315 + * A test that tries to trim memory when there are two available regions, r1 and 1316 + * r2. Region r1 is aligned on both ends and region r2 is unaligned at the base 1317 + * and aligned at the end: 1318 + * 1319 + * Unaligned address 1320 + * | 1321 + * v 1322 + * | +-----------------+ +---------------+ | 1323 + * | | r1 | | r2 | | 1324 + * +--------+-----------------+----------+---------------+---+ 1325 + * ^ ^ ^ ^ ^ ^ 1326 + * |________|________|________|________|________| 1327 + * | 1328 + * Aligned addresses 1329 + * 1330 + * Expect that r1 will not be trimmed and r2 will be trimmed at the base. 1331 + * Expect the counter to not be updated. 1332 + */ 1333 + static int memblock_trim_memory_unaligned_base_check(void) 1334 + { 1335 + struct memblock_region *rgn1, *rgn2; 1336 + const phys_addr_t alignment = SMP_CACHE_BYTES; 1337 + phys_addr_t offset = SZ_2; 1338 + phys_addr_t new_r2_base, new_r2_size; 1339 + 1340 + rgn1 = &memblock.memory.regions[0]; 1341 + rgn2 = &memblock.memory.regions[1]; 1342 + 1343 + struct region r1 = { 1344 + .base = alignment, 1345 + .size = alignment * 2 1346 + }; 1347 + struct region r2 = { 1348 + .base = alignment * 4 + offset, 1349 + .size = alignment * 2 - offset 1350 + }; 1351 + 1352 + PREFIX_PUSH(); 1353 + 1354 + new_r2_base = r2.base + (alignment - offset); 1355 + new_r2_size = r2.size - (alignment - offset); 1356 + 1357 + reset_memblock_regions(); 1358 + memblock_add(r1.base, r1.size); 1359 + memblock_add(r2.base, r2.size); 1360 + memblock_trim_memory(alignment); 1361 + 1362 + ASSERT_EQ(rgn1->base, r1.base); 1363 + ASSERT_EQ(rgn1->size, r1.size); 1364 + 1365 + ASSERT_EQ(rgn2->base, new_r2_base); 1366 + ASSERT_EQ(rgn2->size, new_r2_size); 1367 + 1368 + ASSERT_EQ(memblock.memory.cnt, 2); 1369 + 1370 + test_pass_pop(); 1371 + 1372 + return 0; 1373 + } 1374 + 1375 + /* 1376 + * A test that tries to trim memory when there are two available regions, r1 and 1377 + * r2. Region r1 is aligned on both ends and region r2 is aligned at the base 1378 + * and unaligned at the end: 1379 + * 1380 + * Unaligned address 1381 + * | 1382 + * v 1383 + * | +-----------------+ +---------------+ | 1384 + * | | r1 | | r2 | | 1385 + * +--------+-----------------+--------+---------------+---+ 1386 + * ^ ^ ^ ^ ^ ^ 1387 + * |________|________|________|________|________| 1388 + * | 1389 + * Aligned addresses 1390 + * 1391 + * Expect that r1 will not be trimmed and r2 will be trimmed at the end. 1392 + * Expect the counter to not be updated. 1393 + */ 1394 + static int memblock_trim_memory_unaligned_end_check(void) 1395 + { 1396 + struct memblock_region *rgn1, *rgn2; 1397 + const phys_addr_t alignment = SMP_CACHE_BYTES; 1398 + phys_addr_t offset = SZ_2; 1399 + phys_addr_t new_r2_size; 1400 + 1401 + rgn1 = &memblock.memory.regions[0]; 1402 + rgn2 = &memblock.memory.regions[1]; 1403 + 1404 + struct region r1 = { 1405 + .base = alignment, 1406 + .size = alignment * 2 1407 + }; 1408 + struct region r2 = { 1409 + .base = alignment * 4, 1410 + .size = alignment * 2 - offset 1411 + }; 1412 + 1413 + PREFIX_PUSH(); 1414 + 1415 + new_r2_size = r2.size - (alignment - offset); 1416 + 1417 + reset_memblock_regions(); 1418 + memblock_add(r1.base, r1.size); 1419 + memblock_add(r2.base, r2.size); 1420 + memblock_trim_memory(alignment); 1421 + 1422 + ASSERT_EQ(rgn1->base, r1.base); 1423 + ASSERT_EQ(rgn1->size, r1.size); 1424 + 1425 + ASSERT_EQ(rgn2->base, r2.base); 1426 + ASSERT_EQ(rgn2->size, new_r2_size); 1427 + 1428 + ASSERT_EQ(memblock.memory.cnt, 2); 1429 + 1430 + test_pass_pop(); 1431 + 1432 + return 0; 1433 + } 1434 + 1435 + static int memblock_trim_memory_checks(void) 1436 + { 1437 + prefix_reset(); 1438 + prefix_push(FUNC_TRIM); 1439 + test_print("Running %s tests...\n", FUNC_TRIM); 1440 + 1441 + memblock_trim_memory_aligned_check(); 1442 + memblock_trim_memory_too_small_check(); 1443 + memblock_trim_memory_unaligned_base_check(); 1444 + memblock_trim_memory_unaligned_end_check(); 1674 1445 1675 1446 prefix_pop(); 1676 1447 ··· 1952 1187 memblock_reserve_checks(); 1953 1188 memblock_remove_checks(); 1954 1189 memblock_free_checks(); 1190 + memblock_bottom_up_checks(); 1191 + memblock_trim_memory_checks(); 1955 1192 1956 1193 return 0; 1957 1194 }
+41 -1
tools/testing/memblock/tests/common.c
··· 9 9 #define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS 10 10 #define PREFIXES_MAX 15 11 11 #define DELIM ": " 12 + #define BASIS 10000 12 13 13 14 static struct test_memory memory_block; 14 15 static const char __maybe_unused *prefixes[PREFIXES_MAX]; 15 16 static int __maybe_unused nr_prefixes; 16 17 17 - static const char *short_opts = "mv"; 18 + static const char *short_opts = "hmv"; 18 19 static const struct option long_opts[] = { 20 + {"help", 0, NULL, 'h'}, 19 21 {"movable-node", 0, NULL, 'm'}, 20 22 {"verbose", 0, NULL, 'v'}, 21 23 {NULL, 0, NULL, 0} 22 24 }; 23 25 24 26 static const char * const help_opts[] = { 27 + "display this help message and exit", 25 28 "disallow allocations from regions marked as hotplugged\n\t\t\t" 26 29 "by simulating enabling the \"movable_node\" kernel\n\t\t\t" 27 30 "parameter", ··· 61 58 memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; 62 59 } 63 60 61 + static inline void fill_memblock(void) 62 + { 63 + memset(memory_block.base, 1, MEM_SIZE); 64 + } 65 + 64 66 void setup_memblock(void) 65 67 { 66 68 reset_memblock_regions(); 67 69 memblock_add((phys_addr_t)memory_block.base, MEM_SIZE); 70 + fill_memblock(); 71 + } 72 + 73 + /** 74 + * setup_numa_memblock: 75 + * Set up a memory layout with multiple NUMA nodes in a previously allocated 76 + * dummy physical memory. 77 + * @node_fracs: an array representing the fraction of MEM_SIZE contained in 78 + * each node in basis point units (one hundredth of 1% or 1/10000). 79 + * For example, if node 0 should contain 1/8 of MEM_SIZE, 80 + * node_fracs[0] = 1250. 81 + * 82 + * The nids will be set to 0 through NUMA_NODES - 1. 83 + */ 84 + void setup_numa_memblock(const unsigned int node_fracs[]) 85 + { 86 + phys_addr_t base; 87 + int flags; 88 + 89 + reset_memblock_regions(); 90 + base = (phys_addr_t)memory_block.base; 91 + flags = (movable_node_is_enabled()) ? MEMBLOCK_NONE : MEMBLOCK_HOTPLUG; 92 + 93 + for (int i = 0; i < NUMA_NODES; i++) { 94 + assert(node_fracs[i] <= BASIS); 95 + phys_addr_t size = MEM_SIZE * node_fracs[i] / BASIS; 96 + 97 + memblock_add_node(base, size, i, flags); 98 + base += size; 99 + } 100 + fill_memblock(); 68 101 } 69 102 70 103 void dummy_physical_memory_init(void) 71 104 { 72 105 memory_block.base = malloc(MEM_SIZE); 73 106 assert(memory_block.base); 107 + fill_memblock(); 74 108 } 75 109 76 110 void dummy_physical_memory_cleanup(void)
+82 -4
tools/testing/memblock/tests/common.h
··· 10 10 #include <linux/printk.h> 11 11 #include <../selftests/kselftest.h> 12 12 13 - #define MEM_SIZE SZ_16K 13 + #define MEM_SIZE SZ_16K 14 + #define NUMA_NODES 8 15 + 16 + enum test_flags { 17 + /* No special request. */ 18 + TEST_F_NONE = 0x0, 19 + /* Perform raw allocations (no zeroing of memory). */ 20 + TEST_F_RAW = 0x1, 21 + }; 14 22 15 23 /** 16 24 * ASSERT_EQ(): 17 25 * Check the condition 18 26 * @_expected == @_seen 19 - * If false, print failed test message (if in VERBOSE mode) and then assert 27 + * If false, print failed test message (if running with --verbose) and then 28 + * assert. 20 29 */ 21 30 #define ASSERT_EQ(_expected, _seen) do { \ 22 31 if ((_expected) != (_seen)) \ ··· 37 28 * ASSERT_NE(): 38 29 * Check the condition 39 30 * @_expected != @_seen 40 - * If false, print failed test message (if in VERBOSE mode) and then assert 31 + * If false, print failed test message (if running with --verbose) and then 32 + * assert. 41 33 */ 42 34 #define ASSERT_NE(_expected, _seen) do { \ 43 35 if ((_expected) == (_seen)) \ ··· 50 40 * ASSERT_LT(): 51 41 * Check the condition 52 42 * @_expected < @_seen 53 - * If false, print failed test message (if in VERBOSE mode) and then assert 43 + * If false, print failed test message (if running with --verbose) and then 44 + * assert. 54 45 */ 55 46 #define ASSERT_LT(_expected, _seen) do { \ 56 47 if ((_expected) >= (_seen)) \ 57 48 test_fail(); \ 58 49 assert((_expected) < (_seen)); \ 50 + } while (0) 51 + 52 + /** 53 + * ASSERT_LE(): 54 + * Check the condition 55 + * @_expected <= @_seen 56 + * If false, print failed test message (if running with --verbose) and then 57 + * assert. 58 + */ 59 + #define ASSERT_LE(_expected, _seen) do { \ 60 + if ((_expected) > (_seen)) \ 61 + test_fail(); \ 62 + assert((_expected) <= (_seen)); \ 63 + } while (0) 64 + 65 + /** 66 + * ASSERT_MEM_EQ(): 67 + * Check that the first @_size bytes of @_seen are all equal to @_expected. 68 + * If false, print failed test message (if running with --verbose) and then 69 + * assert. 70 + */ 71 + #define ASSERT_MEM_EQ(_seen, _expected, _size) do { \ 72 + for (int _i = 0; _i < (_size); _i++) { \ 73 + ASSERT_EQ(((char *)_seen)[_i], (_expected)); \ 74 + } \ 75 + } while (0) 76 + 77 + /** 78 + * ASSERT_MEM_NE(): 79 + * Check that none of the first @_size bytes of @_seen are equal to @_expected. 80 + * If false, print failed test message (if running with --verbose) and then 81 + * assert. 82 + */ 83 + #define ASSERT_MEM_NE(_seen, _expected, _size) do { \ 84 + for (int _i = 0; _i < (_size); _i++) { \ 85 + ASSERT_NE(((char *)_seen)[_i], (_expected)); \ 86 + } \ 59 87 } while (0) 60 88 61 89 #define PREFIX_PUSH() prefix_push(__func__) ··· 113 65 phys_addr_t size; 114 66 }; 115 67 68 + static inline phys_addr_t __maybe_unused region_end(struct memblock_region *rgn) 69 + { 70 + return rgn->base + rgn->size; 71 + } 72 + 116 73 void reset_memblock_regions(void); 117 74 void reset_memblock_attributes(void); 118 75 void setup_memblock(void); 76 + void setup_numa_memblock(const unsigned int node_fracs[]); 119 77 void dummy_physical_memory_init(void); 120 78 void dummy_physical_memory_cleanup(void); 121 79 void parse_args(int argc, char **argv); ··· 137 83 { 138 84 test_pass(); 139 85 prefix_pop(); 86 + } 87 + 88 + static inline void run_top_down(int (*func)()) 89 + { 90 + memblock_set_bottom_up(false); 91 + prefix_push("top-down"); 92 + func(); 93 + prefix_pop(); 94 + } 95 + 96 + static inline void run_bottom_up(int (*func)()) 97 + { 98 + memblock_set_bottom_up(true); 99 + prefix_push("bottom-up"); 100 + func(); 101 + prefix_pop(); 102 + } 103 + 104 + static inline void assert_mem_content(void *mem, int size, int flags) 105 + { 106 + if (flags & TEST_F_RAW) 107 + ASSERT_MEM_NE(mem, 0, size); 108 + else 109 + ASSERT_MEM_EQ(mem, 0, size); 140 110 } 141 111 142 112 #endif