Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

memblock: replace BOOTMEM_ALLOC_* with MEMBLOCK variants

Drop BOOTMEM_ALLOC_ACCESSIBLE and BOOTMEM_ALLOC_ANYWHERE in favor of
identical MEMBLOCK definitions.

Link: http://lkml.kernel.org/r/1536927045-23536-29-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Palmer Dabbelt <palmer@sifive.com>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Serge Semin <fancer.lancer@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mike Rapoport and committed by
Linus Torvalds
97ad1087 bda49a81

+19 -16
+1 -1
arch/ia64/mm/discontig.c
··· 453 453 454 454 ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE, 455 455 __pa(MAX_DMA_ADDRESS), 456 - BOOTMEM_ALLOC_ACCESSIBLE, 456 + MEMBLOCK_ALLOC_ACCESSIBLE, 457 457 bestnode); 458 458 459 459 return ptr;
+1 -1
arch/powerpc/kernel/setup_64.c
··· 764 764 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) 765 765 { 766 766 return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS), 767 - BOOTMEM_ALLOC_ACCESSIBLE, 767 + MEMBLOCK_ALLOC_ACCESSIBLE, 768 768 early_cpu_to_node(cpu)); 769 769 770 770 }
+1 -1
arch/sparc/kernel/smp_64.c
··· 1595 1595 cpu, size, __pa(ptr)); 1596 1596 } else { 1597 1597 ptr = memblock_alloc_try_nid(size, align, goal, 1598 - BOOTMEM_ALLOC_ACCESSIBLE, node); 1598 + MEMBLOCK_ALLOC_ACCESSIBLE, node); 1599 1599 pr_debug("per cpu data for cpu%d %lu bytes on node%d at " 1600 1600 "%016lx\n", cpu, size, node, __pa(ptr)); 1601 1601 }
+1 -1
arch/x86/kernel/setup_percpu.c
··· 114 114 cpu, size, __pa(ptr)); 115 115 } else { 116 116 ptr = memblock_alloc_try_nid_nopanic(size, align, goal, 117 - BOOTMEM_ALLOC_ACCESSIBLE, 117 + MEMBLOCK_ALLOC_ACCESSIBLE, 118 118 node); 119 119 120 120 pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
+2 -2
arch/x86/mm/kasan_init_64.c
··· 29 29 { 30 30 if (panic) 31 31 return memblock_alloc_try_nid(size, size, 32 - __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); 32 + __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid); 33 33 else 34 34 return memblock_alloc_try_nid_nopanic(size, size, 35 - __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); 35 + __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid); 36 36 } 37 37 38 38 static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
+2 -1
mm/hugetlb.c
··· 16 16 #include <linux/cpuset.h> 17 17 #include <linux/mutex.h> 18 18 #include <linux/bootmem.h> 19 + #include <linux/memblock.h> 19 20 #include <linux/sysfs.h> 20 21 #include <linux/slab.h> 21 22 #include <linux/mmdebug.h> ··· 2103 2102 2104 2103 addr = memblock_alloc_try_nid_raw( 2105 2104 huge_page_size(h), huge_page_size(h), 2106 - 0, BOOTMEM_ALLOC_ACCESSIBLE, node); 2105 + 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); 2107 2106 if (addr) { 2108 2107 /* 2109 2108 * Use the beginning of the huge page to store the
+1 -1
mm/kasan/kasan_init.c
··· 84 84 static __init void *early_alloc(size_t size, int node) 85 85 { 86 86 return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), 87 - BOOTMEM_ALLOC_ACCESSIBLE, node); 87 + MEMBLOCK_ALLOC_ACCESSIBLE, node); 88 88 } 89 89 90 90 static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
+4 -4
mm/memblock.c
··· 1342 1342 * hold the requested memory. 1343 1343 * 1344 1344 * The allocation is performed from memory region limited by 1345 - * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. 1345 + * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE. 1346 1346 * 1347 1347 * The memory block is aligned on %SMP_CACHE_BYTES if @align == 0. 1348 1348 * ··· 1429 1429 * @min_addr: the lower bound of the memory region from where the allocation 1430 1430 * is preferred (phys address) 1431 1431 * @max_addr: the upper bound of the memory region from where the allocation 1432 - * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1432 + * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1433 1433 * allocate only from memory limited by memblock.current_limit value 1434 1434 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1435 1435 * ··· 1466 1466 * @min_addr: the lower bound of the memory region from where the allocation 1467 1467 * is preferred (phys address) 1468 1468 * @max_addr: the upper bound of the memory region from where the allocation 1469 - * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1469 + * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1470 1470 * allocate only from memory limited by memblock.current_limit value 1471 1471 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1472 1472 * ··· 1501 1501 * @min_addr: the lower bound of the memory region from where the allocation 1502 1502 * is preferred (phys address) 1503 1503 * @max_addr: the upper bound of the memory region from where the allocation 1504 - * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1504 + * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1505 1505 * allocate only from memory limited by memblock.current_limit value 1506 1506 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1507 1507 *
+1 -1
mm/page_ext.c
··· 163 163 164 164 base = memblock_alloc_try_nid_nopanic( 165 165 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 166 - BOOTMEM_ALLOC_ACCESSIBLE, nid); 166 + MEMBLOCK_ALLOC_ACCESSIBLE, nid); 167 167 if (!base) 168 168 return -ENOMEM; 169 169 NODE_DATA(nid)->node_page_ext = base;
+2 -1
mm/sparse-vmemmap.c
··· 21 21 #include <linux/mm.h> 22 22 #include <linux/mmzone.h> 23 23 #include <linux/bootmem.h> 24 + #include <linux/memblock.h> 24 25 #include <linux/memremap.h> 25 26 #include <linux/highmem.h> 26 27 #include <linux/slab.h> ··· 44 43 unsigned long goal) 45 44 { 46 45 return memblock_alloc_try_nid_raw(size, align, goal, 47 - BOOTMEM_ALLOC_ACCESSIBLE, node); 46 + MEMBLOCK_ALLOC_ACCESSIBLE, node); 48 47 } 49 48 50 49 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
+3 -2
mm/sparse.c
··· 6 6 #include <linux/slab.h> 7 7 #include <linux/mmzone.h> 8 8 #include <linux/bootmem.h> 9 + #include <linux/memblock.h> 9 10 #include <linux/compiler.h> 10 11 #include <linux/highmem.h> 11 12 #include <linux/export.h> ··· 394 393 395 394 map = memblock_alloc_try_nid(size, 396 395 PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 397 - BOOTMEM_ALLOC_ACCESSIBLE, nid); 396 + MEMBLOCK_ALLOC_ACCESSIBLE, nid); 398 397 return map; 399 398 } 400 399 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ ··· 408 407 sparsemap_buf = 409 408 memblock_alloc_try_nid_raw(size, PAGE_SIZE, 410 409 __pa(MAX_DMA_ADDRESS), 411 - BOOTMEM_ALLOC_ACCESSIBLE, nid); 410 + MEMBLOCK_ALLOC_ACCESSIBLE, nid); 412 411 sparsemap_buf_end = sparsemap_buf + size; 413 412 } 414 413