Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

memblock: rename memblock_free to memblock_phys_free

Since memblock_free() operates on a physical range, make its name
reflect it and rename it to memblock_phys_free(), so it will be a
logical counterpart to memblock_phys_alloc().

The callers are updated with the below semantic patch:

@@
expression addr;
expression size;
@@
- memblock_free(addr, size);
+ memblock_phys_free(addr, size);

Link: https://lkml.kernel.org/r/20210930185031.18648-6-rppt@kernel.org
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Juergen Gross <jgross@suse.com>
Cc: Shahab Vahedi <Shahab.Vahedi@synopsys.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mike Rapoport and committed by
Linus Torvalds
3ecc6834 621d9739

+79 -76
+2 -1
arch/alpha/kernel/core_irongate.c
··· 233 233 unsigned long size; 234 234 235 235 size = initrd_end - initrd_start; 236 - memblock_free(__pa(initrd_start), PAGE_ALIGN(size)); 236 + memblock_phys_free(__pa(initrd_start), 237 + PAGE_ALIGN(size)); 237 238 if (!move_initrd(pci_mem)) 238 239 printk("irongate_init_arch: initrd too big " 239 240 "(%ldK)\ndisabling initrd\n",
+1 -1
arch/arc/mm/init.c
··· 173 173 #ifdef CONFIG_HIGHMEM 174 174 unsigned long tmp; 175 175 176 - memblock_free(high_mem_start, high_mem_sz); 176 + memblock_phys_free(high_mem_start, high_mem_sz); 177 177 for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++) 178 178 free_highmem_page(pfn_to_page(tmp)); 179 179 #endif
+1 -1
arch/arm/mach-hisi/platmcpm.c
··· 339 339 err_sysctrl: 340 340 iounmap(relocation); 341 341 err_reloc: 342 - memblock_free(hip04_boot_method[0], hip04_boot_method[1]); 342 + memblock_phys_free(hip04_boot_method[0], hip04_boot_method[1]); 343 343 err: 344 344 return ret; 345 345 }
+1 -1
arch/arm/mm/init.c
··· 158 158 panic("Failed to steal %pa bytes at %pS\n", 159 159 &size, (void *)_RET_IP_); 160 160 161 - memblock_free(phys, size); 161 + memblock_phys_free(phys, size); 162 162 memblock_remove(phys, size); 163 163 164 164 return phys;
+2 -2
arch/arm64/mm/mmu.c
··· 738 738 cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); 739 739 init_mm.pgd = swapper_pg_dir; 740 740 741 - memblock_free(__pa_symbol(init_pg_dir), 742 - __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); 741 + memblock_phys_free(__pa_symbol(init_pg_dir), 742 + __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); 743 743 744 744 memblock_allow_resize(); 745 745 }
+1 -1
arch/mips/mm/init.c
··· 529 529 530 530 static void __init pcpu_fc_free(void *ptr, size_t size) 531 531 { 532 - memblock_free(__pa(ptr), size); 532 + memblock_phys_free(__pa(ptr), size); 533 533 } 534 534 535 535 void __init setup_per_cpu_areas(void)
+3 -3
arch/mips/sgi-ip30/ip30-setup.c
··· 69 69 total_mem += size; 70 70 71 71 if (addr >= IP30_REAL_MEMORY_START) 72 - memblock_free(addr, size); 72 + memblock_phys_free(addr, size); 73 73 else if ((addr + size) > IP30_REAL_MEMORY_START) 74 - memblock_free(IP30_REAL_MEMORY_START, 75 - size - IP30_MAX_PROM_MEMORY); 74 + memblock_phys_free(IP30_REAL_MEMORY_START, 75 + size - IP30_MAX_PROM_MEMORY); 76 76 } 77 77 pr_info("Detected %luMB of physical memory.\n", MEM_SHIFT(total_mem)); 78 78 }
+2 -2
arch/powerpc/kernel/dt_cpu_ftrs.c
··· 1095 1095 1096 1096 cpufeatures_setup_finished(); 1097 1097 1098 - memblock_free(__pa(dt_cpu_features), 1099 - sizeof(struct dt_cpu_feature)*nr_dt_cpu_features); 1098 + memblock_phys_free(__pa(dt_cpu_features), 1099 + sizeof(struct dt_cpu_feature) * nr_dt_cpu_features); 1100 1100 1101 1101 return 0; 1102 1102 }
+4 -4
arch/powerpc/kernel/paca.c
··· 322 322 323 323 new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; 324 324 if (new_ptrs_size < paca_ptrs_size) 325 - memblock_free(__pa(paca_ptrs) + new_ptrs_size, 326 - paca_ptrs_size - new_ptrs_size); 325 + memblock_phys_free(__pa(paca_ptrs) + new_ptrs_size, 326 + paca_ptrs_size - new_ptrs_size); 327 327 328 328 paca_nr_cpu_ids = nr_cpu_ids; 329 329 paca_ptrs_size = new_ptrs_size; ··· 331 331 #ifdef CONFIG_PPC_BOOK3S_64 332 332 if (early_radix_enabled()) { 333 333 /* Ugly fixup, see new_slb_shadow() */ 334 - memblock_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr), 335 - sizeof(struct slb_shadow)); 334 + memblock_phys_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr), 335 + sizeof(struct slb_shadow)); 336 336 paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL; 337 337 } 338 338 #endif
+1 -1
arch/powerpc/kernel/setup-common.c
··· 825 825 set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]); 826 826 } 827 827 828 - memblock_free(__pa(cpu_to_phys_id), nr_cpu_ids * sizeof(u32)); 828 + memblock_phys_free(__pa(cpu_to_phys_id), nr_cpu_ids * sizeof(u32)); 829 829 cpu_to_phys_id = NULL; 830 830 } 831 831 #endif
+1 -1
arch/powerpc/kernel/setup_64.c
··· 812 812 813 813 static void __init pcpu_free_bootmem(void *ptr, size_t size) 814 814 { 815 - memblock_free(__pa(ptr), size); 815 + memblock_phys_free(__pa(ptr), size); 816 816 } 817 817 818 818 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
+1 -1
arch/powerpc/platforms/powernv/pci-ioda.c
··· 2981 2981 if (!phb->hose) { 2982 2982 pr_err(" Can't allocate PCI controller for %pOF\n", 2983 2983 np); 2984 - memblock_free(__pa(phb), sizeof(struct pnv_phb)); 2984 + memblock_phys_free(__pa(phb), sizeof(struct pnv_phb)); 2985 2985 return; 2986 2986 } 2987 2987
+2 -1
arch/powerpc/platforms/pseries/svm.c
··· 56 56 return; 57 57 58 58 59 - memblock_free(__pa(vstart), PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 59 + memblock_phys_free(__pa(vstart), 60 + PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 60 61 panic("SVM: Cannot allocate SWIOTLB buffer"); 61 62 } 62 63
+3 -2
arch/riscv/kernel/setup.c
··· 230 230 231 231 /* Clean-up any unused pre-allocated resources */ 232 232 if (res_idx >= 0) 233 - memblock_free(__pa(mem_res), (res_idx + 1) * sizeof(*mem_res)); 233 + memblock_phys_free(__pa(mem_res), 234 + (res_idx + 1) * sizeof(*mem_res)); 234 235 return; 235 236 236 237 error: 237 238 /* Better an empty resource tree than an inconsistent one */ 238 239 release_child_resources(&iomem_resource); 239 - memblock_free(__pa(mem_res), mem_res_sz); 240 + memblock_phys_free(__pa(mem_res), mem_res_sz); 240 241 } 241 242 242 243
+4 -4
arch/s390/kernel/setup.c
··· 693 693 } 694 694 695 695 if (register_memory_notifier(&kdump_mem_nb)) { 696 - memblock_free(crash_base, crash_size); 696 + memblock_phys_free(crash_base, crash_size); 697 697 return; 698 698 } 699 699 ··· 748 748 749 749 get_mem_detect_reserved(&start, &size); 750 750 if (size) 751 - memblock_free(start, size); 751 + memblock_phys_free(start, size); 752 752 } 753 753 754 754 static const char * __init get_mem_info_source(void) ··· 793 793 if (initrd_data.start && initrd_data.size && 794 794 !memblock_is_region_memory(initrd_data.start, initrd_data.size)) { 795 795 pr_err("The initial RAM disk does not fit into the memory\n"); 796 - memblock_free(initrd_data.start, initrd_data.size); 796 + memblock_phys_free(initrd_data.start, initrd_data.size); 797 797 initrd_start = initrd_end = 0; 798 798 } 799 799 #endif ··· 890 890 891 891 if (stsi(vmms, 3, 2, 2) == 0 && vmms->count) 892 892 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count); 893 - memblock_free((unsigned long) vmms, PAGE_SIZE); 893 + memblock_phys_free((unsigned long)vmms, PAGE_SIZE); 894 894 } 895 895 896 896 /*
+2 -2
arch/s390/kernel/smp.c
··· 723 723 /* Get the CPU registers */ 724 724 smp_save_cpu_regs(sa, addr, is_boot_cpu, page); 725 725 } 726 - memblock_free(page, PAGE_SIZE); 726 + memblock_phys_free(page, PAGE_SIZE); 727 727 diag_amode31_ops.diag308_reset(); 728 728 pcpu_set_smt(0); 729 729 } ··· 880 880 881 881 /* Add CPUs present at boot */ 882 882 __smp_rescan_cpus(info, true); 883 - memblock_free((unsigned long)info, sizeof(*info)); 883 + memblock_phys_free((unsigned long)info, sizeof(*info)); 884 884 } 885 885 886 886 /*
+1 -1
arch/s390/kernel/uv.c
··· 64 64 } 65 65 66 66 if (uv_init(uv_stor_base, uv_info.uv_base_stor_len)) { 67 - memblock_free(uv_stor_base, uv_info.uv_base_stor_len); 67 + memblock_phys_free(uv_stor_base, uv_info.uv_base_stor_len); 68 68 goto fail; 69 69 } 70 70
+1 -1
arch/s390/mm/kasan_init.c
··· 399 399 400 400 void __init kasan_free_early_identity(void) 401 401 { 402 - memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos); 402 + memblock_phys_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos); 403 403 }
+1 -1
arch/sh/boards/mach-ap325rxa/setup.c
··· 560 560 if (!phys) 561 561 panic("Failed to allocate CEU memory\n"); 562 562 563 - memblock_free(phys, size); 563 + memblock_phys_free(phys, size); 564 564 memblock_remove(phys, size); 565 565 566 566 ceu_dma_membase = phys;
+2 -2
arch/sh/boards/mach-ecovec24/setup.c
··· 1502 1502 if (!phys) 1503 1503 panic("Failed to allocate CEU0 memory\n"); 1504 1504 1505 - memblock_free(phys, size); 1505 + memblock_phys_free(phys, size); 1506 1506 memblock_remove(phys, size); 1507 1507 ceu0_dma_membase = phys; 1508 1508 ··· 1510 1510 if (!phys) 1511 1511 panic("Failed to allocate CEU1 memory\n"); 1512 1512 1513 - memblock_free(phys, size); 1513 + memblock_phys_free(phys, size); 1514 1514 memblock_remove(phys, size); 1515 1515 ceu1_dma_membase = phys; 1516 1516 }
+1 -1
arch/sh/boards/mach-kfr2r09/setup.c
··· 633 633 if (!phys) 634 634 panic("Failed to allocate CEU memory\n"); 635 635 636 - memblock_free(phys, size); 636 + memblock_phys_free(phys, size); 637 637 memblock_remove(phys, size); 638 638 639 639 ceu_dma_membase = phys;
+1 -1
arch/sh/boards/mach-migor/setup.c
··· 633 633 if (!phys) 634 634 panic("Failed to allocate CEU memory\n"); 635 635 636 - memblock_free(phys, size); 636 + memblock_phys_free(phys, size); 637 637 memblock_remove(phys, size); 638 638 639 639 ceu_dma_membase = phys;
+2 -2
arch/sh/boards/mach-se/7724/setup.c
··· 966 966 if (!phys) 967 967 panic("Failed to allocate CEU0 memory\n"); 968 968 969 - memblock_free(phys, size); 969 + memblock_phys_free(phys, size); 970 970 memblock_remove(phys, size); 971 971 ceu0_dma_membase = phys; 972 972 ··· 974 974 if (!phys) 975 975 panic("Failed to allocate CEU1 memory\n"); 976 976 977 - memblock_free(phys, size); 977 + memblock_phys_free(phys, size); 978 978 memblock_remove(phys, size); 979 979 ceu1_dma_membase = phys; 980 980 }
+1 -1
arch/sparc/kernel/smp_64.c
··· 1567 1567 1568 1568 static void __init pcpu_free_bootmem(void *ptr, size_t size) 1569 1569 { 1570 - memblock_free(__pa(ptr), size); 1570 + memblock_phys_free(__pa(ptr), size); 1571 1571 } 1572 1572 1573 1573 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+1 -1
arch/um/kernel/mem.c
··· 47 47 */ 48 48 brk_end = (unsigned long) UML_ROUND_UP(sbrk(0)); 49 49 map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0); 50 - memblock_free(__pa(brk_end), uml_reserved - brk_end); 50 + memblock_phys_free(__pa(brk_end), uml_reserved - brk_end); 51 51 uml_reserved = brk_end; 52 52 53 53 /* this will put all low memory onto the freelists */
+2 -2
arch/x86/kernel/setup.c
··· 322 322 323 323 relocate_initrd(); 324 324 325 - memblock_free(ramdisk_image, ramdisk_end - ramdisk_image); 325 + memblock_phys_free(ramdisk_image, ramdisk_end - ramdisk_image); 326 326 } 327 327 328 328 #else ··· 521 521 } 522 522 523 523 if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) { 524 - memblock_free(crash_base, crash_size); 524 + memblock_phys_free(crash_base, crash_size); 525 525 return; 526 526 } 527 527
+1 -1
arch/x86/mm/init.c
··· 618 618 */ 619 619 addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start, 620 620 map_end); 621 - memblock_free(addr, PMD_SIZE); 621 + memblock_phys_free(addr, PMD_SIZE); 622 622 real_end = addr + PMD_SIZE; 623 623 624 624 /* step_size need to be small so pgt_buf from BRK could cover it */
+3 -3
arch/x86/xen/mmu_pv.c
··· 1025 1025 for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) 1026 1026 make_lowmem_page_readwrite(vaddr); 1027 1027 1028 - memblock_free(paddr, size); 1028 + memblock_phys_free(paddr, size); 1029 1029 } 1030 1030 1031 1031 static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin) ··· 1151 1151 xen_cleanhighmap(addr, addr + size); 1152 1152 size = PAGE_ALIGN(xen_start_info->nr_pages * 1153 1153 sizeof(unsigned long)); 1154 - memblock_free(__pa(addr), size); 1154 + memblock_phys_free(__pa(addr), size); 1155 1155 } else { 1156 1156 xen_cleanmfnmap(addr); 1157 1157 } ··· 1955 1955 pfn_end = p2m_pfn_end; 1956 1956 } 1957 1957 1958 - memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn)); 1958 + memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn)); 1959 1959 while (pfn < pfn_end) { 1960 1960 if (pfn == p2m_pfn) { 1961 1961 pfn = p2m_pfn_end;
+3 -3
arch/x86/xen/setup.c
··· 153 153 break; 154 154 } 155 155 } 156 - memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); 156 + memblock_phys_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); 157 157 } 158 158 159 159 /* ··· 719 719 return; 720 720 721 721 xen_relocate_p2m(); 722 - memblock_free(start, size); 722 + memblock_phys_free(start, size); 723 723 } 724 724 725 725 /** ··· 885 885 xen_phys_memcpy(new_area, start, size); 886 886 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n", 887 887 start, start + size, new_area, new_area + size); 888 - memblock_free(start, size); 888 + memblock_phys_free(start, size); 889 889 boot_params.hdr.ramdisk_image = new_area; 890 890 boot_params.ext_ramdisk_image = new_area >> 32; 891 891 }
+1 -1
drivers/base/arch_numa.c
··· 166 166 167 167 static void __init pcpu_fc_free(void *ptr, size_t size) 168 168 { 169 - memblock_free(__pa(ptr), size); 169 + memblock_phys_free(__pa(ptr), size); 170 170 } 171 171 172 172 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+1 -1
drivers/firmware/efi/memmap.c
··· 35 35 if (slab_is_available()) 36 36 memblock_free_late(phys, size); 37 37 else 38 - memblock_free(phys, size); 38 + memblock_phys_free(phys, size); 39 39 } else if (flags & EFI_MEMMAP_SLAB) { 40 40 struct page *p = pfn_to_page(PHYS_PFN(phys)); 41 41 unsigned int order = get_order(size);
+1 -2
drivers/of/kexec.c
··· 171 171 if (ret) 172 172 return ret; 173 173 174 - return memblock_free(addr, size); 175 - 174 + return memblock_phys_free(addr, size); 176 175 } 177 176 178 177 /**
+3 -2
drivers/of/of_reserved_mem.c
··· 46 46 if (nomap) { 47 47 err = memblock_mark_nomap(base, size); 48 48 if (err) 49 - memblock_free(base, size); 49 + memblock_phys_free(base, size); 50 50 kmemleak_ignore_phys(base); 51 51 } 52 52 ··· 284 284 if (nomap) 285 285 memblock_clear_nomap(rmem->base, rmem->size); 286 286 else 287 - memblock_free(rmem->base, rmem->size); 287 + memblock_phys_free(rmem->base, 288 + rmem->size); 288 289 } 289 290 } 290 291 }
+1 -1
drivers/s390/char/sclp_early.c
··· 139 139 } 140 140 sclp_fill_core_info(info, sccb); 141 141 out: 142 - memblock_free((unsigned long)sccb, length); 142 + memblock_phys_free((unsigned long)sccb, length); 143 143 return rc; 144 144 } 145 145
+5 -5
drivers/usb/early/xhci-dbc.c
··· 185 185 if (!seg) 186 186 return; 187 187 188 - memblock_free(seg->dma, PAGE_SIZE); 188 + memblock_phys_free(seg->dma, PAGE_SIZE); 189 189 ring->segment = NULL; 190 190 } 191 191 ··· 665 665 xdbc_free_ring(&xdbc.in_ring); 666 666 667 667 if (xdbc.table_dma) 668 - memblock_free(xdbc.table_dma, PAGE_SIZE); 668 + memblock_phys_free(xdbc.table_dma, PAGE_SIZE); 669 669 670 670 if (xdbc.out_dma) 671 - memblock_free(xdbc.out_dma, PAGE_SIZE); 671 + memblock_phys_free(xdbc.out_dma, PAGE_SIZE); 672 672 673 673 xdbc.table_base = NULL; 674 674 xdbc.out_buf = NULL; ··· 987 987 xdbc_free_ring(&xdbc.evt_ring); 988 988 xdbc_free_ring(&xdbc.out_ring); 989 989 xdbc_free_ring(&xdbc.in_ring); 990 - memblock_free(xdbc.table_dma, PAGE_SIZE); 991 - memblock_free(xdbc.out_dma, PAGE_SIZE); 990 + memblock_phys_free(xdbc.table_dma, PAGE_SIZE); 991 + memblock_phys_free(xdbc.out_dma, PAGE_SIZE); 992 992 writel(0, &xdbc.xdbc_reg->control); 993 993 early_iounmap(xdbc.xhci_base, xdbc.xhci_length); 994 994
+1 -1
drivers/xen/swiotlb-xen.c
··· 241 241 */ 242 242 rc = xen_swiotlb_fixup(start, nslabs); 243 243 if (rc) { 244 - memblock_free(__pa(start), PAGE_ALIGN(bytes)); 244 + memblock_phys_free(__pa(start), PAGE_ALIGN(bytes)); 245 245 if (nslabs > 1024 && repeat--) { 246 246 /* Min is 2MB */ 247 247 nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
+1 -1
include/linux/memblock.h
··· 103 103 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); 104 104 int memblock_add(phys_addr_t base, phys_addr_t size); 105 105 int memblock_remove(phys_addr_t base, phys_addr_t size); 106 - int memblock_free(phys_addr_t base, phys_addr_t size); 106 + int memblock_phys_free(phys_addr_t base, phys_addr_t size); 107 107 int memblock_reserve(phys_addr_t base, phys_addr_t size); 108 108 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 109 109 int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
+1 -1
init/initramfs.c
··· 607 607 unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE); 608 608 unsigned long aligned_end = ALIGN(end, PAGE_SIZE); 609 609 610 - memblock_free(__pa(aligned_start), aligned_end - aligned_start); 610 + memblock_phys_free(__pa(aligned_start), aligned_end - aligned_start); 611 611 #endif 612 612 613 613 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
+1 -1
kernel/dma/swiotlb.c
··· 247 247 return; 248 248 249 249 fail_free_mem: 250 - memblock_free(__pa(tlb), bytes); 250 + memblock_phys_free(__pa(tlb), bytes); 251 251 fail: 252 252 pr_warn("Cannot allocate buffer"); 253 253 }
+1 -1
lib/cpumask.c
··· 188 188 */ 189 189 void __init free_bootmem_cpumask_var(cpumask_var_t mask) 190 190 { 191 - memblock_free(__pa(mask), cpumask_size()); 191 + memblock_phys_free(__pa(mask), cpumask_size()); 192 192 } 193 193 #endif 194 194
+1 -1
mm/cma.c
··· 378 378 return 0; 379 379 380 380 free_mem: 381 - memblock_free(base, size); 381 + memblock_phys_free(base, size); 382 382 err: 383 383 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 384 384 return ret;
+4 -4
mm/memblock.c
··· 806 806 void __init_memblock memblock_free_ptr(void *ptr, size_t size) 807 807 { 808 808 if (ptr) 809 - memblock_free(__pa(ptr), size); 809 + memblock_phys_free(__pa(ptr), size); 810 810 } 811 811 812 812 /** 813 - * memblock_free - free boot memory block 813 + * memblock_phys_free - free boot memory block 814 814 * @base: phys starting address of the boot memory block 815 815 * @size: size of the boot memory block in bytes 816 816 * 817 817 * Free boot memory block previously allocated by memblock_alloc_xx() API. 818 818 * The freeing memory will not be released to the buddy allocator. 819 819 */ 820 - int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 820 + int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size) 821 821 { 822 822 phys_addr_t end = base + size - 1; 823 823 ··· 1937 1937 * memmap array. 1938 1938 */ 1939 1939 if (pg < pgend) 1940 - memblock_free(pg, pgend - pg); 1940 + memblock_phys_free(pg, pgend - pg); 1941 1941 } 1942 1942 1943 1943 /*
+1 -1
mm/memory_hotplug.c
··· 2204 2204 arch_remove_memory(start, size, altmap); 2205 2205 2206 2206 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { 2207 - memblock_free(start, size); 2207 + memblock_phys_free(start, size); 2208 2208 memblock_remove(start, size); 2209 2209 } 2210 2210
+4 -4
mm/percpu.c
··· 2472 2472 */ 2473 2473 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 2474 2474 { 2475 - memblock_free(__pa(ai), ai->__ai_size); 2475 + memblock_phys_free(__pa(ai), ai->__ai_size); 2476 2476 } 2477 2477 2478 2478 /** ··· 3134 3134 out_free: 3135 3135 pcpu_free_alloc_info(ai); 3136 3136 if (areas) 3137 - memblock_free(__pa(areas), areas_size); 3137 + memblock_phys_free(__pa(areas), areas_size); 3138 3138 return rc; 3139 3139 } 3140 3140 #endif /* BUILD_EMBED_FIRST_CHUNK */ ··· 3256 3256 free_fn(page_address(pages[j]), PAGE_SIZE); 3257 3257 rc = -ENOMEM; 3258 3258 out_free_ar: 3259 - memblock_free(__pa(pages), pages_size); 3259 + memblock_phys_free(__pa(pages), pages_size); 3260 3260 pcpu_free_alloc_info(ai); 3261 3261 return rc; 3262 3262 } ··· 3286 3286 3287 3287 static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 3288 3288 { 3289 - memblock_free(__pa(ptr), size); 3289 + memblock_phys_free(__pa(ptr), size); 3290 3290 } 3291 3291 3292 3292 void __init setup_per_cpu_areas(void)
+1 -1
mm/sparse.c
··· 451 451 static inline void __meminit sparse_buffer_free(unsigned long size) 452 452 { 453 453 WARN_ON(!sparsemap_buf || size == 0); 454 - memblock_free(__pa(sparsemap_buf), size); 454 + memblock_phys_free(__pa(sparsemap_buf), size); 455 455 } 456 456 457 457 static void __init sparse_buffer_init(unsigned long size, int nid)