Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

memblock: rename free_all_bootmem to memblock_free_all

The conversion is done using

sed -i 's@free_all_bootmem@memblock_free_all@' \
$(git grep -l free_all_bootmem)

Link: http://lkml.kernel.org/r/1536927045-23536-26-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Palmer Dabbelt <palmer@sifive.com>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Serge Semin <fancer.lancer@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mike Rapoport and committed by
Linus Torvalds
c6ffc5ca 53ab85eb

+39 -39
+1 -1
arch/alpha/mm/init.c
··· 282 282 { 283 283 set_max_mapnr(max_low_pfn); 284 284 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 285 - free_all_bootmem(); 285 + memblock_free_all(); 286 286 mem_init_print_info(NULL); 287 287 } 288 288
+1 -1
arch/arc/mm/init.c
··· 218 218 free_highmem_page(pfn_to_page(tmp)); 219 219 #endif 220 220 221 - free_all_bootmem(); 221 + memblock_free_all(); 222 222 mem_init_print_info(NULL); 223 223 } 224 224
+1 -1
arch/arm/mm/init.c
··· 508 508 509 509 /* this will put all unused low memory onto the freelists */ 510 510 free_unused_memmap(); 511 - free_all_bootmem(); 511 + memblock_free_all(); 512 512 513 513 #ifdef CONFIG_SA1111 514 514 /* now that our DMA memory is actually so designated, we can free it */
+1 -1
arch/arm64/mm/init.c
··· 599 599 free_unused_memmap(); 600 600 #endif 601 601 /* this will put all unused low memory onto the freelists */ 602 - free_all_bootmem(); 602 + memblock_free_all(); 603 603 604 604 kexec_reserve_crashkres_pages(); 605 605
+1 -1
arch/c6x/mm/init.c
··· 62 62 high_memory = (void *)(memory_end & PAGE_MASK); 63 63 64 64 /* this will put all memory onto the freelists */ 65 - free_all_bootmem(); 65 + memblock_free_all(); 66 66 67 67 mem_init_print_info(NULL); 68 68 }
+1 -1
arch/h8300/mm/init.c
··· 96 96 max_mapnr = MAP_NR(high_memory); 97 97 98 98 /* this will put all low memory onto the freelists */ 99 - free_all_bootmem(); 99 + memblock_free_all(); 100 100 101 101 mem_init_print_info(NULL); 102 102 }
+1 -1
arch/hexagon/mm/init.c
··· 68 68 void __init mem_init(void) 69 69 { 70 70 /* No idea where this is actually declared. Seems to evade LXR. */ 71 - free_all_bootmem(); 71 + memblock_free_all(); 72 72 mem_init_print_info(NULL); 73 73 74 74 /*
+1 -1
arch/ia64/mm/init.c
··· 627 627 628 628 set_max_mapnr(max_low_pfn); 629 629 high_memory = __va(max_low_pfn * PAGE_SIZE); 630 - free_all_bootmem(); 630 + memblock_free_all(); 631 631 mem_init_print_info(NULL); 632 632 633 633 /*
+1 -1
arch/m68k/mm/init.c
··· 140 140 void __init mem_init(void) 141 141 { 142 142 /* this will put all memory onto the freelists */ 143 - free_all_bootmem(); 143 + memblock_free_all(); 144 144 init_pointer_tables(); 145 145 mem_init_print_info(NULL); 146 146 }
+1 -1
arch/microblaze/mm/init.c
··· 204 204 high_memory = (void *)__va(memory_start + lowmem_size - 1); 205 205 206 206 /* this will put all memory onto the freelists */ 207 - free_all_bootmem(); 207 + memblock_free_all(); 208 208 #ifdef CONFIG_HIGHMEM 209 209 highmem_setup(); 210 210 #endif
+1 -1
arch/mips/loongson64/loongson-3/numa.c
··· 272 272 void __init mem_init(void) 273 273 { 274 274 high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); 275 - free_all_bootmem(); 275 + memblock_free_all(); 276 276 setup_zero_pages(); /* This comes from node 0 */ 277 277 mem_init_print_info(NULL); 278 278 }
+1 -1
arch/mips/mm/init.c
··· 463 463 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 464 464 465 465 maar_init(); 466 - free_all_bootmem(); 466 + memblock_free_all(); 467 467 setup_zero_pages(); /* Setup zeroed pages. */ 468 468 mem_init_free_highmem(); 469 469 mem_init_print_info(NULL);
+1 -1
arch/mips/sgi-ip27/ip27-memory.c
··· 475 475 void __init mem_init(void) 476 476 { 477 477 high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); 478 - free_all_bootmem(); 478 + memblock_free_all(); 479 479 setup_zero_pages(); /* This comes from node 0 */ 480 480 mem_init_print_info(NULL); 481 481 }
+1 -1
arch/nds32/mm/init.c
··· 192 192 free_highmem(); 193 193 194 194 /* this will put all low memory onto the freelists */ 195 - free_all_bootmem(); 195 + memblock_free_all(); 196 196 mem_init_print_info(NULL); 197 197 198 198 pr_info("virtual kernel memory layout:\n"
+1 -1
arch/nios2/mm/init.c
··· 73 73 high_memory = __va(end_mem); 74 74 75 75 /* this will put all memory onto the freelists */ 76 - free_all_bootmem(); 76 + memblock_free_all(); 77 77 mem_init_print_info(NULL); 78 78 } 79 79
+1 -1
arch/openrisc/mm/init.c
··· 213 213 memset((void *)empty_zero_page, 0, PAGE_SIZE); 214 214 215 215 /* this will put all low memory onto the freelists */ 216 - free_all_bootmem(); 216 + memblock_free_all(); 217 217 218 218 mem_init_print_info(NULL); 219 219
+1 -1
arch/parisc/mm/init.c
··· 621 621 622 622 high_memory = __va((max_pfn << PAGE_SHIFT)); 623 623 set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); 624 - free_all_bootmem(); 624 + memblock_free_all(); 625 625 626 626 #ifdef CONFIG_PA11 627 627 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
+1 -1
arch/powerpc/mm/mem.c
··· 349 349 350 350 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 351 351 set_max_mapnr(max_pfn); 352 - free_all_bootmem(); 352 + memblock_free_all(); 353 353 354 354 #ifdef CONFIG_HIGHMEM 355 355 {
+1 -1
arch/riscv/mm/init.c
··· 55 55 #endif /* CONFIG_FLATMEM */ 56 56 57 57 high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); 58 - free_all_bootmem(); 58 + memblock_free_all(); 59 59 60 60 mem_init_print_info(NULL); 61 61 }
+1 -1
arch/s390/mm/init.c
··· 139 139 cmma_init(); 140 140 141 141 /* this will put all low memory onto the freelists */ 142 - free_all_bootmem(); 142 + memblock_free_all(); 143 143 setup_zero_pages(); /* Setup zeroed pages. */ 144 144 145 145 cmma_init_nodat();
+1 -1
arch/sh/mm/init.c
··· 350 350 high_memory = max_t(void *, high_memory, 351 351 __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); 352 352 353 - free_all_bootmem(); 353 + memblock_free_all(); 354 354 355 355 /* Set this up early, so we can take care of the zero page */ 356 356 cpu_cache_init();
+1 -1
arch/sparc/mm/init_32.c
··· 277 277 278 278 max_mapnr = last_valid_pfn - pfn_base; 279 279 high_memory = __va(max_low_pfn << PAGE_SHIFT); 280 - free_all_bootmem(); 280 + memblock_free_all(); 281 281 282 282 for (i = 0; sp_banks[i].num_bytes != 0; i++) { 283 283 unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
+2 -2
arch/sparc/mm/init_64.c
··· 2545 2545 { 2546 2546 high_memory = __va(last_valid_pfn << PAGE_SHIFT); 2547 2547 2548 - free_all_bootmem(); 2548 + memblock_free_all(); 2549 2549 2550 2550 /* 2551 2551 * Must be done after boot memory is put on freelist, because here we 2552 2552 * might set fields in deferred struct pages that have not yet been 2553 - * initialized, and free_all_bootmem() initializes all the reserved 2553 + * initialized, and memblock_free_all() initializes all the reserved 2554 2554 * deferred pages for us. 2555 2555 */ 2556 2556 register_page_bootmem_info();
+1 -1
arch/um/kernel/mem.c
··· 51 51 uml_reserved = brk_end; 52 52 53 53 /* this will put all low memory onto the freelists */ 54 - free_all_bootmem(); 54 + memblock_free_all(); 55 55 max_low_pfn = totalram_pages; 56 56 max_pfn = totalram_pages; 57 57 mem_init_print_info(NULL);
+1 -1
arch/unicore32/mm/init.c
··· 286 286 free_unused_memmap(&meminfo); 287 287 288 288 /* this will put all unused low memory onto the freelists */ 289 - free_all_bootmem(); 289 + memblock_free_all(); 290 290 291 291 mem_init_print_info(NULL); 292 292 printk(KERN_NOTICE "Virtual kernel memory layout:\n"
+1 -1
arch/x86/mm/highmem_32.c
··· 111 111 112 112 /* 113 113 * Explicitly reset zone->managed_pages because set_highmem_pages_init() 114 - * is invoked before free_all_bootmem() 114 + * is invoked before memblock_free_all() 115 115 */ 116 116 reset_all_zones_managed_pages(); 117 117 for_each_zone(zone) {
+2 -2
arch/x86/mm/init_32.c
··· 771 771 #endif 772 772 /* 773 773 * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to 774 - * be done before free_all_bootmem(). Memblock use free low memory for 774 + * be done before memblock_free_all(). Memblock use free low memory for 775 775 * temporary data (see find_range_array()) and for this purpose can use 776 776 * pages that was already passed to the buddy allocator, hence marked as 777 777 * not accessible in the page tables when compiled with ··· 781 781 set_highmem_pages_init(); 782 782 783 783 /* this will put all low memory onto the freelists */ 784 - free_all_bootmem(); 784 + memblock_free_all(); 785 785 786 786 after_bootmem = 1; 787 787 x86_init.hyper.init_after_bootmem();
+2 -2
arch/x86/mm/init_64.c
··· 1188 1188 /* clear_bss() already clear the empty_zero_page */ 1189 1189 1190 1190 /* this will put all memory onto the freelists */ 1191 - free_all_bootmem(); 1191 + memblock_free_all(); 1192 1192 after_bootmem = 1; 1193 1193 x86_init.hyper.init_after_bootmem(); 1194 1194 1195 1195 /* 1196 1196 * Must be done after boot memory is put on freelist, because here we 1197 1197 * might set fields in deferred struct pages that have not yet been 1198 - * initialized, and free_all_bootmem() initializes all the reserved 1198 + * initialized, and memblock_free_all() initializes all the reserved 1199 1199 * deferred pages for us. 1200 1200 */ 1201 1201 register_page_bootmem_info();
+1 -1
arch/x86/xen/mmu_pv.c
··· 864 864 * The init_mm pagetable is really pinned as soon as its created, but 865 865 * that's before we have page structures to store the bits. So do all 866 866 * the book-keeping now once struct pages for allocated pages are 867 - * initialized. This happens only after free_all_bootmem() is called. 867 + * initialized. This happens only after memblock_free_all() is called. 868 868 */ 869 869 static void __init xen_after_bootmem(void) 870 870 {
+1 -1
arch/xtensa/mm/init.c
··· 152 152 max_mapnr = max_pfn - ARCH_PFN_OFFSET; 153 153 high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); 154 154 155 - free_all_bootmem(); 155 + memblock_free_all(); 156 156 157 157 mem_init_print_info(NULL); 158 158 pr_info("virtual kernel memory layout:\n"
+1 -1
include/linux/bootmem.h
··· 26 26 */ 27 27 extern unsigned long long max_possible_pfn; 28 28 29 - extern unsigned long free_all_bootmem(void); 29 + extern unsigned long memblock_free_all(void); 30 30 extern void reset_node_managed_pages(pg_data_t *pgdat); 31 31 extern void reset_all_zones_managed_pages(void); 32 32
+1 -1
mm/memblock.c
··· 1360 1360 /* 1361 1361 * Detect any accidental use of these APIs after slab is ready, as at 1362 1362 * this moment memblock may be deinitialized already and its 1363 - * internal data may be destroyed (after execution of free_all_bootmem) 1363 + * internal data may be destroyed (after execution of memblock_free_all) 1364 1364 */ 1365 1365 if (WARN_ON_ONCE(slab_is_available())) 1366 1366 return kzalloc_node(size, GFP_NOWAIT, nid);
+2 -2
mm/nobootmem.c
··· 111 111 } 112 112 113 113 /** 114 - * free_all_bootmem - release free pages to the buddy allocator 114 + * memblock_free_all - release free pages to the buddy allocator 115 115 * 116 116 * Return: the number of pages actually released. 117 117 */ 118 - unsigned long __init free_all_bootmem(void) 118 + unsigned long __init memblock_free_all(void) 119 119 { 120 120 unsigned long pages; 121 121
+1 -1
mm/page_alloc.c
··· 5476 5476 5477 5477 /* 5478 5478 * Initially all pages are reserved - free ones are freed 5479 - * up by free_all_bootmem() once the early boot process is 5479 + * up by memblock_free_all() once the early boot process is 5480 5480 * done. Non-atomic initialization, single-pass. 5481 5481 */ 5482 5482 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
+1 -1
mm/page_poison.c
··· 21 21 { 22 22 /* 23 23 * Assumes that debug_pagealloc_enabled is set before 24 - * free_all_bootmem. 24 + * memblock_free_all. 25 25 * Page poisoning is debug page alloc for some arches. If 26 26 * either of those options are enabled, enable poisoning. 27 27 */