Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

memblock: stop using implicit alignment to SMP_CACHE_BYTES

When a memblock allocation APIs are called with align = 0, the alignment
is implicitly set to SMP_CACHE_BYTES.

Implicit alignment is done deep in the memblock allocator and it can
come as a surprise. Not that such an alignment would be wrong even
when used incorrectly but it is better to be explicit for the sake of
clarity and the prinicple of the least surprise.

Replace all such uses of memblock APIs with the 'align' parameter
explicitly set to SMP_CACHE_BYTES and stop implicit alignment assignment
in the memblock internal allocation functions.

For the case when memblock APIs are used via helper functions, e.g. like
iommu_arena_new_node() in Alpha, the helper functions were detected with
Coccinelle's help and then manually examined and updated where
appropriate.

The direct memblock APIs users were updated using the semantic patch below:

@@
expression size, min_addr, max_addr, nid;
@@
(
|
- memblock_alloc_try_nid_raw(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_raw(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid_nopanic(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid)
|
- memblock_alloc(size, 0)
+ memblock_alloc(size, SMP_CACHE_BYTES)
|
- memblock_alloc_raw(size, 0)
+ memblock_alloc_raw(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from(size, 0, min_addr)
+ memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_nopanic(size, 0)
+ memblock_alloc_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low(size, 0)
+ memblock_alloc_low(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low_nopanic(size, 0)
+ memblock_alloc_low_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from_nopanic(size, 0, min_addr)
+ memblock_alloc_from_nopanic(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_node(size, 0, nid)
+ memblock_alloc_node(size, SMP_CACHE_BYTES, nid)
)

[mhocko@suse.com: changelog update]
[akpm@linux-foundation.org: coding-style fixes]
[rppt@linux.ibm.com: fix missed uses of implicit alignment]
Link: http://lkml.kernel.org/r/20181016133656.GA10925@rapoport-lnx
Link: http://lkml.kernel.org/r/1538687224-17535-1-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Paul Burton <paul.burton@mips.com> [MIPS]
Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mike Rapoport and committed by
Linus Torvalds
7e1c4e27 530d4c0c

+120 -96
+2 -1
arch/alpha/kernel/core_apecs.c
··· 346 346 * Window 1 is direct access 1GB at 1GB 347 347 * Window 2 is scatter-gather 8MB at 8MB (for isa) 348 348 */ 349 - hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); 349 + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 350 + SMP_CACHE_BYTES); 350 351 hose->sg_pci = NULL; 351 352 __direct_map_base = 0x40000000; 352 353 __direct_map_size = 0x40000000;
+2 -1
arch/alpha/kernel/core_lca.c
··· 275 275 * Note that we do not try to save any of the DMA window CSRs 276 276 * before setting them, since we cannot read those CSRs on LCA. 277 277 */ 278 - hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); 278 + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 279 + SMP_CACHE_BYTES); 279 280 hose->sg_pci = NULL; 280 281 __direct_map_base = 0x40000000; 281 282 __direct_map_size = 0x40000000;
+2 -2
arch/alpha/kernel/core_marvel.c
··· 82 82 char *name; 83 83 84 84 sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port); 85 - name = memblock_alloc(strlen(tmp) + 1, 0); 85 + name = memblock_alloc(strlen(tmp) + 1, SMP_CACHE_BYTES); 86 86 strcpy(name, tmp); 87 87 88 88 return name; ··· 117 117 return NULL; 118 118 } 119 119 120 - io7 = memblock_alloc(sizeof(*io7), 0); 120 + io7 = memblock_alloc(sizeof(*io7), SMP_CACHE_BYTES); 121 121 io7->pe = pe; 122 122 raw_spin_lock_init(&io7->irq_lock); 123 123
+4 -2
arch/alpha/kernel/core_mcpcia.c
··· 364 364 * Window 1 is scatter-gather (up to) 1GB at 1GB (for pci) 365 365 * Window 2 is direct access 2GB at 2GB 366 366 */ 367 - hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); 367 + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 368 + SMP_CACHE_BYTES); 368 369 hose->sg_pci = iommu_arena_new(hose, 0x40000000, 369 - size_for_memory(0x40000000), 0); 370 + size_for_memory(0x40000000), 371 + SMP_CACHE_BYTES); 370 372 371 373 __direct_map_base = 0x80000000; 372 374 __direct_map_size = 0x80000000;
+1 -1
arch/alpha/kernel/core_t2.c
··· 351 351 352 352 /* Note we can only do 1 SG window, as the other is for direct, so 353 353 do an ISA SG area, especially for the floppy. */ 354 - hose->sg_isa = iommu_arena_new(hose, base, length, 0); 354 + hose->sg_isa = iommu_arena_new(hose, base, length, SMP_CACHE_BYTES); 355 355 hose->sg_pci = NULL; 356 356 357 357 temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
+4 -2
arch/alpha/kernel/core_titan.c
··· 316 316 * Window 1 is direct access 1GB at 2GB 317 317 * Window 2 is scatter-gather 1GB at 3GB 318 318 */ 319 - hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); 319 + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 320 + SMP_CACHE_BYTES); 320 321 hose->sg_isa->align_entry = 8; /* 64KB for ISA */ 321 322 322 - hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 0); 323 + hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 324 + SMP_CACHE_BYTES); 323 325 hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */ 324 326 325 327 port->wsba[0].csr = hose->sg_isa->dma_base | 3;
+4 -2
arch/alpha/kernel/core_tsunami.c
··· 319 319 * NOTE: we need the align_entry settings for Acer devices on ES40, 320 320 * specifically floppy and IDE when memory is larger than 2GB. 321 321 */ 322 - hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); 322 + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 323 + SMP_CACHE_BYTES); 323 324 /* Initially set for 4 PTEs, but will be overridden to 64K for ISA. */ 324 325 hose->sg_isa->align_entry = 4; 325 326 326 327 hose->sg_pci = iommu_arena_new(hose, 0x40000000, 327 - size_for_memory(0x40000000), 0); 328 + size_for_memory(0x40000000), 329 + SMP_CACHE_BYTES); 328 330 hose->sg_pci->align_entry = 4; /* Tsunami caches 4 PTEs at a time */ 329 331 330 332 __direct_map_base = 0x80000000;
+4 -2
arch/alpha/kernel/core_wildfire.c
··· 111 111 * ??? We ought to scale window 3 memory. 112 112 * 113 113 */ 114 - hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); 115 - hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 0); 114 + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 115 + SMP_CACHE_BYTES); 116 + hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 117 + SMP_CACHE_BYTES); 116 118 117 119 pci = WILDFIRE_pci(qbbno, hoseno); 118 120
+2 -2
arch/alpha/kernel/pci-noop.c
··· 33 33 { 34 34 struct pci_controller *hose; 35 35 36 - hose = memblock_alloc(sizeof(*hose), 0); 36 + hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); 37 37 38 38 *hose_tail = hose; 39 39 hose_tail = &hose->next; ··· 44 44 struct resource * __init 45 45 alloc_resource(void) 46 46 { 47 - return memblock_alloc(sizeof(struct resource), 0); 47 + return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); 48 48 } 49 49 50 50 SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus,
+2 -2
arch/alpha/kernel/pci.c
··· 392 392 { 393 393 struct pci_controller *hose; 394 394 395 - hose = memblock_alloc(sizeof(*hose), 0); 395 + hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); 396 396 397 397 *hose_tail = hose; 398 398 hose_tail = &hose->next; ··· 403 403 struct resource * __init 404 404 alloc_resource(void) 405 405 { 406 - return memblock_alloc(sizeof(struct resource), 0); 406 + return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); 407 407 } 408 408 409 409
+2 -2
arch/alpha/kernel/pci_iommu.c
··· 79 79 printk("%s: couldn't allocate arena from node %d\n" 80 80 " falling back to system-wide allocation\n", 81 81 __func__, nid); 82 - arena = memblock_alloc(sizeof(*arena), 0); 82 + arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); 83 83 } 84 84 85 85 arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid); ··· 92 92 93 93 #else /* CONFIG_DISCONTIGMEM */ 94 94 95 - arena = memblock_alloc(sizeof(*arena), 0); 95 + arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); 96 96 arena->ptes = memblock_alloc_from(mem_size, align, 0); 97 97 98 98 #endif /* CONFIG_DISCONTIGMEM */
+2 -2
arch/arm/kernel/setup.c
··· 856 856 */ 857 857 boot_alias_start = phys_to_idmap(start); 858 858 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) { 859 - res = memblock_alloc(sizeof(*res), 0); 859 + res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); 860 860 res->name = "System RAM (boot alias)"; 861 861 res->start = boot_alias_start; 862 862 res->end = phys_to_idmap(end); ··· 864 864 request_resource(&iomem_resource, res); 865 865 } 866 866 867 - res = memblock_alloc(sizeof(*res), 0); 867 + res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); 868 868 res->name = "System RAM"; 869 869 res->start = start; 870 870 res->end = end;
+5 -3
arch/arm/mach-omap2/omap_hwmod.c
··· 726 726 u64 size; 727 727 int i; 728 728 729 - provider = memblock_alloc(sizeof(*provider), 0); 729 + provider = memblock_alloc(sizeof(*provider), SMP_CACHE_BYTES); 730 730 if (!provider) 731 731 return -ENOMEM; 732 732 ··· 736 736 of_property_count_elems_of_size(np, "reg", sizeof(u32)) / 2; 737 737 738 738 provider->addr = 739 - memblock_alloc(sizeof(void *) * provider->num_addrs, 0); 739 + memblock_alloc(sizeof(void *) * provider->num_addrs, 740 + SMP_CACHE_BYTES); 740 741 if (!provider->addr) 741 742 return -ENOMEM; 742 743 743 744 provider->size = 744 - memblock_alloc(sizeof(u32) * provider->num_addrs, 0); 745 + memblock_alloc(sizeof(u32) * provider->num_addrs, 746 + SMP_CACHE_BYTES); 745 747 if (!provider->size) 746 748 return -ENOMEM; 747 749
+1 -1
arch/arm64/kernel/setup.c
··· 218 218 num_standard_resources = memblock.memory.cnt; 219 219 standard_resources = memblock_alloc_low(num_standard_resources * 220 220 sizeof(*standard_resources), 221 - 0); 221 + SMP_CACHE_BYTES); 222 222 223 223 for_each_memblock(memory, region) { 224 224 res = &standard_resources[i++];
+2 -2
arch/ia64/kernel/mca.c
··· 361 361 362 362 #define IA64_LOG_ALLOCATE(it, size) \ 363 363 {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \ 364 - (ia64_err_rec_t *)memblock_alloc(size, 0); \ 364 + (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); \ 365 365 ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \ 366 - (ia64_err_rec_t *)memblock_alloc(size, 0);} 366 + (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);} 367 367 #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock) 368 368 #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s) 369 369 #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
+4 -2
arch/ia64/mm/tlb.c
··· 59 59 void __init 60 60 mmu_context_init (void) 61 61 { 62 - ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, 0); 63 - ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, 0); 62 + ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, 63 + SMP_CACHE_BYTES); 64 + ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, 65 + SMP_CACHE_BYTES); 64 66 } 65 67 66 68 /*
+3 -1
arch/ia64/sn/kernel/io_common.c
··· 391 391 if (node >= num_online_nodes()) /* Headless/memless IO nodes */ 392 392 node = 0; 393 393 394 - hubdev_info = (struct hubdev_info *)memblock_alloc_node(size, 0, node); 394 + hubdev_info = (struct hubdev_info *)memblock_alloc_node(size, 395 + SMP_CACHE_BYTES, 396 + node); 395 397 396 398 npda->pdinfo = (void *)hubdev_info; 397 399 }
+3 -2
arch/ia64/sn/kernel/setup.c
··· 511 511 */ 512 512 for_each_online_node(cnode) { 513 513 nodepdaindr[cnode] = 514 - memblock_alloc_node(sizeof(nodepda_t), 0, cnode); 514 + memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, 515 + cnode); 515 516 memset(nodepdaindr[cnode]->phys_cpuid, -1, 516 517 sizeof(nodepdaindr[cnode]->phys_cpuid)); 517 518 spin_lock_init(&nodepdaindr[cnode]->ptc_lock); ··· 523 522 */ 524 523 for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) 525 524 nodepdaindr[cnode] = 526 - memblock_alloc_node(sizeof(nodepda_t), 0, 0); 525 + memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, 0); 527 526 528 527 /* 529 528 * Now copy the array of nodepda pointers to each nodepda.
+1 -1
arch/m68k/sun3/sun3dvma.c
··· 268 268 list_add(&(hole->list), &hole_list); 269 269 270 270 iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long), 271 - 0); 271 + SMP_CACHE_BYTES); 272 272 273 273 dvma_unmap_iommu(DVMA_START, DVMA_SIZE); 274 274
+1 -1
arch/microblaze/mm/init.c
··· 376 376 if (mem_init_done) 377 377 p = kzalloc(size, mask); 378 378 else { 379 - p = memblock_alloc(size, 0); 379 + p = memblock_alloc(size, SMP_CACHE_BYTES); 380 380 if (p) 381 381 memset(p, 0, size); 382 382 }
+1 -1
arch/mips/kernel/setup.c
··· 916 916 if (end >= HIGHMEM_START) 917 917 end = HIGHMEM_START - 1; 918 918 919 - res = memblock_alloc(sizeof(struct resource), 0); 919 + res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); 920 920 921 921 res->start = start; 922 922 res->end = end;
+1 -1
arch/powerpc/kernel/paca.c
··· 198 198 paca_nr_cpu_ids = nr_cpu_ids; 199 199 200 200 paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; 201 - paca_ptrs = __va(memblock_phys_alloc(paca_ptrs_size, 0)); 201 + paca_ptrs = __va(memblock_phys_alloc(paca_ptrs_size, SMP_CACHE_BYTES)); 202 202 memset(paca_ptrs, 0x88, paca_ptrs_size); 203 203 } 204 204
+2 -1
arch/powerpc/kernel/pci_32.c
··· 203 203 struct property* of_prop; 204 204 struct device_node *dn; 205 205 206 - of_prop = memblock_alloc(sizeof(struct property) + 256, 0); 206 + of_prop = memblock_alloc(sizeof(struct property) + 256, 207 + SMP_CACHE_BYTES); 207 208 dn = of_find_node_by_path("/"); 208 209 if (dn) { 209 210 memset(of_prop, -1, sizeof(struct property) + 256);
+1 -1
arch/powerpc/lib/alloc.c
··· 14 14 if (slab_is_available()) 15 15 p = kzalloc(size, mask); 16 16 else { 17 - p = memblock_alloc(size, 0); 17 + p = memblock_alloc(size, SMP_CACHE_BYTES); 18 18 } 19 19 return p; 20 20 }
+4 -3
arch/powerpc/mm/mmu_context_nohash.c
··· 461 461 /* 462 462 * Allocate the maps used by context management 463 463 */ 464 - context_map = memblock_alloc(CTX_MAP_SIZE, 0); 465 - context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1), 0); 464 + context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); 465 + context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1), 466 + SMP_CACHE_BYTES); 466 467 #ifdef CONFIG_SMP 467 - stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, 0); 468 + stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); 468 469 469 470 cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE, 470 471 "powerpc/mmu/ctx:prepare",
+1 -1
arch/powerpc/platforms/powermac/nvram.c
··· 513 513 printk(KERN_ERR "nvram: no address\n"); 514 514 return -EINVAL; 515 515 } 516 - nvram_image = memblock_alloc(NVRAM_SIZE, 0); 516 + nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES); 517 517 nvram_data = ioremap(addr, NVRAM_SIZE*2); 518 518 nvram_naddrs = 1; /* Make sure we get the correct case */ 519 519
+3 -3
arch/powerpc/platforms/powernv/pci-ioda.c
··· 3769 3769 phb_id = be64_to_cpup(prop64); 3770 3770 pr_debug(" PHB-ID : 0x%016llx\n", phb_id); 3771 3771 3772 - phb = memblock_alloc(sizeof(*phb), 0); 3772 + phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES); 3773 3773 3774 3774 /* Allocate PCI controller */ 3775 3775 phb->hose = hose = pcibios_alloc_controller(np); ··· 3815 3815 else 3816 3816 phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE; 3817 3817 3818 - phb->diag_data = memblock_alloc(phb->diag_data_size, 0); 3818 + phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES); 3819 3819 3820 3820 /* Parse 32-bit and IO ranges (if any) */ 3821 3821 pci_process_bridge_OF_ranges(hose, np, !hose->global_number); ··· 3874 3874 } 3875 3875 pemap_off = size; 3876 3876 size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe); 3877 - aux = memblock_alloc(size, 0); 3877 + aux = memblock_alloc(size, SMP_CACHE_BYTES); 3878 3878 phb->ioda.pe_alloc = aux; 3879 3879 phb->ioda.m64_segmap = aux + m64map_off; 3880 3880 phb->ioda.m32_segmap = aux + m32map_off;
+1 -1
arch/powerpc/sysdev/msi_bitmap.c
··· 128 128 if (bmp->bitmap_from_slab) 129 129 bmp->bitmap = kzalloc(size, GFP_KERNEL); 130 130 else { 131 - bmp->bitmap = memblock_alloc(size, 0); 131 + bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES); 132 132 /* the bitmap won't be freed from memblock allocator */ 133 133 kmemleak_not_leak(bmp->bitmap); 134 134 }
+1 -1
arch/um/drivers/net_kern.c
··· 650 650 return 1; 651 651 } 652 652 653 - new = memblock_alloc(sizeof(*new), 0); 653 + new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); 654 654 655 655 INIT_LIST_HEAD(&new->list); 656 656 new->index = n;
+1 -1
arch/um/drivers/vector_kern.c
··· 1580 1580 str, error); 1581 1581 return 1; 1582 1582 } 1583 - new = memblock_alloc(sizeof(*new), 0); 1583 + new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); 1584 1584 INIT_LIST_HEAD(&new->list); 1585 1585 new->unit = n; 1586 1586 new->arguments = str;
+1 -1
arch/um/kernel/initrd.c
··· 36 36 return 0; 37 37 } 38 38 39 - area = memblock_alloc(size, 0); 39 + area = memblock_alloc(size, SMP_CACHE_BYTES); 40 40 41 41 if (load_initrd(initrd, area, size) == -1) 42 42 return 0;
+1 -1
arch/unicore32/kernel/setup.c
··· 206 206 if (mi->bank[i].size == 0) 207 207 continue; 208 208 209 - res = memblock_alloc_low(sizeof(*res), 0); 209 + res = memblock_alloc_low(sizeof(*res), SMP_CACHE_BYTES); 210 210 res->name = "System RAM"; 211 211 res->start = mi->bank[i].start; 212 212 res->end = mi->bank[i].start + mi->bank[i].size - 1;
+1 -1
arch/x86/kernel/acpi/boot.c
··· 934 934 */ 935 935 #define HPET_RESOURCE_NAME_SIZE 9 936 936 hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE, 937 - 0); 937 + SMP_CACHE_BYTES); 938 938 939 939 hpet_res->name = (void *)&hpet_res[1]; 940 940 hpet_res->flags = IORESOURCE_MEM;
+1 -1
arch/x86/kernel/apic/io_apic.c
··· 2578 2578 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); 2579 2579 n *= nr_ioapics; 2580 2580 2581 - mem = memblock_alloc(n, 0); 2581 + mem = memblock_alloc(n, SMP_CACHE_BYTES); 2582 2582 res = (void *)mem; 2583 2583 2584 2584 mem += sizeof(struct resource) * nr_ioapics;
+2 -1
arch/x86/kernel/e820.c
··· 1093 1093 struct resource *res; 1094 1094 u64 end; 1095 1095 1096 - res = memblock_alloc(sizeof(*res) * e820_table->nr_entries, 0); 1096 + res = memblock_alloc(sizeof(*res) * e820_table->nr_entries, 1097 + SMP_CACHE_BYTES); 1097 1098 e820_res = res; 1098 1099 1099 1100 for (i = 0; i < e820_table->nr_entries; i++) {
+1 -1
arch/x86/platform/olpc/olpc_dt.c
··· 141 141 * fast enough on the platforms we care about while minimizing 142 142 * wasted bootmem) and hand off chunks of it to callers. 143 143 */ 144 - res = memblock_alloc(chunk_size, 0); 144 + res = memblock_alloc(chunk_size, SMP_CACHE_BYTES); 145 145 BUG_ON(!res); 146 146 prom_early_allocated += chunk_size; 147 147 memset(res, 0, chunk_size);
+1 -1
arch/xtensa/platforms/iss/network.c
··· 646 646 return 1; 647 647 } 648 648 649 - new = memblock_alloc(sizeof(*new), 0); 649 + new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); 650 650 if (new == NULL) { 651 651 pr_err("Alloc_bootmem failed\n"); 652 652 return 1;
+1 -1
drivers/clk/ti/clk.c
··· 342 342 { 343 343 struct clk_iomap *io; 344 344 345 - io = memblock_alloc(sizeof(*io), 0); 345 + io = memblock_alloc(sizeof(*io), SMP_CACHE_BYTES); 346 346 347 347 io->mem = mem; 348 348
+1 -1
drivers/firmware/efi/memmap.c
··· 15 15 16 16 static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size) 17 17 { 18 - return memblock_phys_alloc(size, 0); 18 + return memblock_phys_alloc(size, SMP_CACHE_BYTES); 19 19 } 20 20 21 21 static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
+2 -1
drivers/firmware/memmap.c
··· 333 333 { 334 334 struct firmware_map_entry *entry; 335 335 336 - entry = memblock_alloc(sizeof(struct firmware_map_entry), 0); 336 + entry = memblock_alloc(sizeof(struct firmware_map_entry), 337 + SMP_CACHE_BYTES); 337 338 if (WARN_ON(!entry)) 338 339 return -ENOMEM; 339 340
+1 -1
drivers/macintosh/smu.c
··· 492 492 goto fail_np; 493 493 } 494 494 495 - smu = memblock_alloc(sizeof(struct smu_device), 0); 495 + smu = memblock_alloc(sizeof(struct smu_device), SMP_CACHE_BYTES); 496 496 497 497 spin_lock_init(&smu->lock); 498 498 INIT_LIST_HEAD(&smu->cmd_list);
+1
drivers/of/of_reserved_mem.c
··· 36 36 * panic()s on allocation failure. 37 37 */ 38 38 end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end; 39 + align = !align ? SMP_CACHE_BYTES : align; 39 40 base = __memblock_alloc_base(size, align, end); 40 41 if (!base) 41 42 return -ENOMEM;
+2 -1
include/linux/memblock.h
··· 406 406 static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size, 407 407 int nid) 408 408 { 409 - return memblock_alloc_try_nid_nopanic(size, 0, MEMBLOCK_LOW_LIMIT, 409 + return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, 410 + MEMBLOCK_LOW_LIMIT, 410 411 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 411 412 } 412 413
+8 -5
init/main.c
··· 375 375 static void __init setup_command_line(char *command_line) 376 376 { 377 377 saved_command_line = 378 - memblock_alloc(strlen(boot_command_line) + 1, 0); 378 + memblock_alloc(strlen(boot_command_line) + 1, SMP_CACHE_BYTES); 379 379 initcall_command_line = 380 - memblock_alloc(strlen(boot_command_line) + 1, 0); 381 - static_command_line = memblock_alloc(strlen(command_line) + 1, 0); 380 + memblock_alloc(strlen(boot_command_line) + 1, SMP_CACHE_BYTES); 381 + static_command_line = memblock_alloc(strlen(command_line) + 1, 382 + SMP_CACHE_BYTES); 382 383 strcpy(saved_command_line, boot_command_line); 383 384 strcpy(static_command_line, command_line); 384 385 } ··· 774 773 str_entry = strsep(&str, ","); 775 774 if (str_entry) { 776 775 pr_debug("blacklisting initcall %s\n", str_entry); 777 - entry = memblock_alloc(sizeof(*entry), 0); 778 - entry->buf = memblock_alloc(strlen(str_entry) + 1, 0); 776 + entry = memblock_alloc(sizeof(*entry), 777 + SMP_CACHE_BYTES); 778 + entry->buf = memblock_alloc(strlen(str_entry) + 1, 779 + SMP_CACHE_BYTES); 779 780 strcpy(entry->buf, str_entry); 780 781 list_add(&entry->next, &blacklisted_initcalls); 781 782 }
+2 -1
kernel/power/snapshot.c
··· 963 963 BUG_ON(!region); 964 964 } else { 965 965 /* This allocation cannot fail */ 966 - region = memblock_alloc(sizeof(struct nosave_region), 0); 966 + region = memblock_alloc(sizeof(struct nosave_region), 967 + SMP_CACHE_BYTES); 967 968 } 968 969 region->start_pfn = start_pfn; 969 970 region->end_pfn = end_pfn;
+1 -1
lib/cpumask.c
··· 163 163 */ 164 164 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) 165 165 { 166 - *mask = memblock_alloc(cpumask_size(), 0); 166 + *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES); 167 167 } 168 168 169 169 /**
-8
mm/memblock.c
··· 1247 1247 { 1248 1248 phys_addr_t found; 1249 1249 1250 - if (!align) 1251 - align = SMP_CACHE_BYTES; 1252 - 1253 1250 found = memblock_find_in_range_node(size, align, start, end, nid, 1254 1251 flags); 1255 1252 if (found && !memblock_reserve(found, size)) { ··· 1340 1343 * The allocation is performed from memory region limited by 1341 1344 * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE. 1342 1345 * 1343 - * The memory block is aligned on %SMP_CACHE_BYTES if @align == 0. 1344 - * 1345 1346 * The phys address of allocated boot memory block is converted to virtual and 1346 1347 * allocated memory is reset to 0. 1347 1348 * ··· 1368 1373 */ 1369 1374 if (WARN_ON_ONCE(slab_is_available())) 1370 1375 return kzalloc_node(size, GFP_NOWAIT, nid); 1371 - 1372 - if (!align) 1373 - align = SMP_CACHE_BYTES; 1374 1376 1375 1377 if (max_addr > memblock.current_limit) 1376 1378 max_addr = memblock.current_limit;
+4 -2
mm/page_alloc.c
··· 7710 7710 size = bucketsize << log2qty; 7711 7711 if (flags & HASH_EARLY) { 7712 7712 if (flags & HASH_ZERO) 7713 - table = memblock_alloc_nopanic(size, 0); 7713 + table = memblock_alloc_nopanic(size, 7714 + SMP_CACHE_BYTES); 7714 7715 else 7715 - table = memblock_alloc_raw(size, 0); 7716 + table = memblock_alloc_raw(size, 7717 + SMP_CACHE_BYTES); 7716 7718 } else if (hashdist) { 7717 7719 table = __vmalloc(size, gfp_flags, PAGE_KERNEL); 7718 7720 } else {
+20 -18
mm/percpu.c
··· 1102 1102 1103 1103 /* allocate chunk */ 1104 1104 chunk = memblock_alloc(sizeof(struct pcpu_chunk) + 1105 - BITS_TO_LONGS(region_size >> PAGE_SHIFT), 1106 - 0); 1105 + BITS_TO_LONGS(region_size >> PAGE_SHIFT), 1106 + SMP_CACHE_BYTES); 1107 1107 1108 1108 INIT_LIST_HEAD(&chunk->list); 1109 1109 ··· 1114 1114 chunk->nr_pages = region_size >> PAGE_SHIFT; 1115 1115 region_bits = pcpu_chunk_map_bits(chunk); 1116 1116 1117 - chunk->alloc_map = memblock_alloc(BITS_TO_LONGS(region_bits) * 1118 - sizeof(chunk->alloc_map[0]), 0); 1119 - chunk->bound_map = memblock_alloc(BITS_TO_LONGS(region_bits + 1) * 1120 - sizeof(chunk->bound_map[0]), 0); 1121 - chunk->md_blocks = memblock_alloc(pcpu_chunk_nr_blocks(chunk) * 1122 - sizeof(chunk->md_blocks[0]), 0); 1117 + chunk->alloc_map = memblock_alloc(BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]), 1118 + SMP_CACHE_BYTES); 1119 + chunk->bound_map = memblock_alloc(BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]), 1120 + SMP_CACHE_BYTES); 1121 + chunk->md_blocks = memblock_alloc(pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]), 1122 + SMP_CACHE_BYTES); 1123 1123 pcpu_init_md_blocks(chunk); 1124 1124 1125 1125 /* manage populated page bitmap */ ··· 2075 2075 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 2076 2076 2077 2077 /* process group information and build config tables accordingly */ 2078 - group_offsets = memblock_alloc(ai->nr_groups * 2079 - sizeof(group_offsets[0]), 0); 2080 - group_sizes = memblock_alloc(ai->nr_groups * 2081 - sizeof(group_sizes[0]), 0); 2082 - unit_map = memblock_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); 2083 - unit_off = memblock_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); 2078 + group_offsets = memblock_alloc(ai->nr_groups * sizeof(group_offsets[0]), 2079 + SMP_CACHE_BYTES); 2080 + group_sizes = memblock_alloc(ai->nr_groups * sizeof(group_sizes[0]), 2081 + SMP_CACHE_BYTES); 2082 + unit_map = memblock_alloc(nr_cpu_ids * sizeof(unit_map[0]), 2083 + SMP_CACHE_BYTES); 2084 + unit_off = memblock_alloc(nr_cpu_ids * sizeof(unit_off[0]), 2085 + SMP_CACHE_BYTES); 2084 2086 2085 2087 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 2086 2088 unit_map[cpu] = UINT_MAX; ··· 2146 2144 * empty chunks. 2147 2145 */ 2148 2146 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 2149 - pcpu_slot = memblock_alloc( 2150 - pcpu_nr_slots * sizeof(pcpu_slot[0]), 0); 2147 + pcpu_slot = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_slot[0]), 2148 + SMP_CACHE_BYTES); 2151 2149 for (i = 0; i < pcpu_nr_slots; i++) 2152 2150 INIT_LIST_HEAD(&pcpu_slot[i]); 2153 2151 ··· 2460 2458 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2461 2459 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 2462 2460 2463 - areas = memblock_alloc_nopanic(areas_size, 0); 2461 + areas = memblock_alloc_nopanic(areas_size, SMP_CACHE_BYTES); 2464 2462 if (!areas) { 2465 2463 rc = -ENOMEM; 2466 2464 goto out_free; ··· 2601 2599 /* unaligned allocations can't be freed, round up to page size */ 2602 2600 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 2603 2601 sizeof(pages[0])); 2604 - pages = memblock_alloc(pages_size, 0); 2602 + pages = memblock_alloc(pages_size, SMP_CACHE_BYTES); 2605 2603 2606 2604 /* allocate pages */ 2607 2605 j = 0;
+2 -1
mm/sparse.c
··· 68 68 if (slab_is_available()) 69 69 section = kzalloc_node(array_size, GFP_KERNEL, nid); 70 70 else 71 - section = memblock_alloc_node(array_size, 0, nid); 71 + section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, 72 + nid); 72 73 73 74 return section; 74 75 }