Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

memblock: Remove nid_range argument, arch provides memblock_nid_range() instead

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

+19 -17
+6 -10
arch/sparc/mm/init_64.c
··· 785 785 return -1; 786 786 } 787 787 788 - static unsigned long long nid_range(unsigned long long start, 789 - unsigned long long end, int *nid) 788 + u64 memblock_nid_range(u64 start, u64 end, int *nid) 790 789 { 791 790 *nid = find_node(start); 792 791 start += PAGE_SIZE; ··· 803 804 return start; 804 805 } 805 806 #else 806 - static unsigned long long nid_range(unsigned long long start, 807 - unsigned long long end, int *nid) 807 + u64 memblock_nid_range(u64 start, u64 end, int *nid) 808 808 { 809 809 *nid = 0; 810 810 return end; ··· 820 822 struct pglist_data *p; 821 823 822 824 #ifdef CONFIG_NEED_MULTIPLE_NODES 823 - paddr = memblock_alloc_nid(sizeof(struct pglist_data), 824 - SMP_CACHE_BYTES, nid, nid_range); 825 + paddr = memblock_alloc_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); 825 826 if (!paddr) { 826 827 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); 827 828 prom_halt(); ··· 840 843 if (p->node_spanned_pages) { 841 844 num_pages = bootmem_bootmap_pages(p->node_spanned_pages); 842 845 843 - paddr = memblock_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid, 844 - nid_range); 846 + paddr = memblock_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid); 845 847 if (!paddr) { 846 848 prom_printf("Cannot allocate bootmap for nid[%d]\n", 847 849 nid); ··· 980 984 unsigned long this_end; 981 985 int nid; 982 986 983 - this_end = nid_range(start, end, &nid); 987 + this_end = memblock_nid_range(start, end, &nid); 984 988 985 989 numadbg("Adding active range nid[%d] " 986 990 "start[%lx] end[%lx]\n", ··· 1313 1317 unsigned long this_end; 1314 1318 int n; 1315 1319 1316 - this_end = nid_range(start, end, &n); 1320 + this_end = memblock_nid_range(start, end, &n); 1317 1321 if (n == nid) { 1318 1322 numadbg(" MATCH reserving range [%lx:%lx]\n", 1319 1323 start, this_end);
+5 -2
include/linux/memblock.h
··· 46 46 extern long memblock_remove(u64 base, u64 size); 47 47 extern long __init memblock_free(u64 base, u64 size); 48 48 extern long __init memblock_reserve(u64 base, u64 size); 49 - extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, 50 - u64 (*nid_range)(u64, u64, int *)); 49 + extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid); 51 50 extern u64 __init memblock_alloc(u64 size, u64 align); 52 51 extern u64 __init memblock_alloc_base(u64 size, 53 52 u64, u64 max_addr); ··· 61 62 extern int memblock_is_region_reserved(u64 base, u64 size); 62 63 63 64 extern void memblock_dump_all(void); 65 + 66 + /* Provided by the architecture */ 67 + extern u64 memblock_nid_range(u64 start, u64 end, int *nid); 68 + 64 69 65 70 /* 66 71 * pfn conversion functions
+8 -5
mm/memblock.c
··· 319 319 } 320 320 321 321 static u64 __init memblock_alloc_nid_region(struct memblock_region *mp, 322 - u64 (*nid_range)(u64, u64, int *), 323 322 u64 size, u64 align, int nid) 324 323 { 325 324 u64 start, end; ··· 331 332 u64 this_end; 332 333 int this_nid; 333 334 334 - this_end = nid_range(start, end, &this_nid); 335 + this_end = memblock_nid_range(start, end, &this_nid); 335 336 if (this_nid == nid) { 336 337 u64 ret = memblock_alloc_nid_unreserved(start, this_end, 337 338 size, align); ··· 344 345 return ~(u64)0; 345 346 } 346 347 347 - u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, 348 - u64 (*nid_range)(u64 start, u64 end, int *nid)) 348 + u64 __init memblock_alloc_nid(u64 size, u64 align, int nid) 349 349 { 350 350 struct memblock_type *mem = &memblock.memory; 351 351 int i; ··· 355 357 356 358 for (i = 0; i < mem->cnt; i++) { 357 359 u64 ret = memblock_alloc_nid_region(&mem->regions[i], 358 - nid_range, 359 360 size, align, nid); 360 361 if (ret != ~(u64)0) 361 362 return ret; ··· 528 531 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 529 532 } 530 533 534 + u64 __weak memblock_nid_range(u64 start, u64 end, int *nid) 535 + { 536 + *nid = 0; 537 + 538 + return end; 539 + }