Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

treewide: add checks for the return value of memblock_alloc*()

Add check for the return value of memblock_alloc*() functions and call
panic() in case of error. The panic message repeats the one used by
panicing memblock allocators with adjustment of parameters to include
only relevant ones.

The replacement was mostly automated with semantic patches like the one
below with manual massaging of format strings.

@@
expression ptr, size, align;
@@
ptr = memblock_alloc(size, align);
+ if (!ptr)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, size, align);

[anders.roxell@linaro.org: use '%pa' with 'phys_addr_t' type]
Link: http://lkml.kernel.org/r/20190131161046.21886-1-anders.roxell@linaro.org
[rppt@linux.ibm.com: fix format strings for panics after memblock_alloc]
Link: http://lkml.kernel.org/r/1548950940-15145-1-git-send-email-rppt@linux.ibm.com
[rppt@linux.ibm.com: don't panic if the allocation in sparse_buffer_init fails]
Link: http://lkml.kernel.org/r/20190131074018.GD28876@rapoport-lnx
[akpm@linux-foundation.org: fix xtensa printk warning]
Link: http://lkml.kernel.org/r/1548057848-15136-20-git-send-email-rppt@linux.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
Reviewed-by: Guo Ren <ren_guo@c-sky.com> [c-sky]
Acked-by: Paul Burton <paul.burton@mips.com> [MIPS]
Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> [s390]
Reviewed-by: Juergen Gross <jgross@suse.com> [Xen]
Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k]
Acked-by: Max Filippov <jcmvbkbc@gmail.com> [xtensa]
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Mark Salter <msalter@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Rob Herring <robh@kernel.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mike Rapoport and committed by
Linus Torvalds
8a7f97b9 a0bf842e

+411 -32
+3
arch/alpha/kernel/core_cia.c
··· 332 332 333 333 /* Use minimal 1K map. */ 334 334 ppte = memblock_alloc(CIA_BROKEN_TBIA_SIZE, 32768); 335 + if (!ppte) 336 + panic("%s: Failed to allocate %u bytes align=0x%x\n", 337 + __func__, CIA_BROKEN_TBIA_SIZE, 32768); 335 338 pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1; 336 339 337 340 for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
+6
arch/alpha/kernel/core_marvel.c
··· 83 83 84 84 sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port); 85 85 name = memblock_alloc(strlen(tmp) + 1, SMP_CACHE_BYTES); 86 + if (!name) 87 + panic("%s: Failed to allocate %zu bytes\n", __func__, 88 + strlen(tmp) + 1); 86 89 strcpy(name, tmp); 87 90 88 91 return name; ··· 121 118 } 122 119 123 120 io7 = memblock_alloc(sizeof(*io7), SMP_CACHE_BYTES); 121 + if (!io7) 122 + panic("%s: Failed to allocate %zu bytes\n", __func__, 123 + sizeof(*io7)); 124 124 io7->pe = pe; 125 125 raw_spin_lock_init(&io7->irq_lock); 126 126
+11 -2
arch/alpha/kernel/pci-noop.c
··· 34 34 struct pci_controller *hose; 35 35 36 36 hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); 37 + if (!hose) 38 + panic("%s: Failed to allocate %zu bytes\n", __func__, 39 + sizeof(*hose)); 37 40 38 41 *hose_tail = hose; 39 42 hose_tail = &hose->next; ··· 47 44 struct resource * __init 48 45 alloc_resource(void) 49 46 { 50 - return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); 47 + void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); 48 + 49 + if (!ptr) 50 + panic("%s: Failed to allocate %zu bytes\n", __func__, 51 + sizeof(struct resource)); 52 + 53 + return ptr; 51 54 } 52 55 53 56 SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus, ··· 63 54 64 55 /* from hose or from bus.devfn */ 65 56 if (which & IOBASE_FROM_HOSE) { 66 - for (hose = hose_head; hose; hose = hose->next) 57 + for (hose = hose_head; hose; hose = hose->next) 67 58 if (hose->index == bus) 68 59 break; 69 60 if (!hose)
+10 -1
arch/alpha/kernel/pci.c
··· 393 393 struct pci_controller *hose; 394 394 395 395 hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); 396 + if (!hose) 397 + panic("%s: Failed to allocate %zu bytes\n", __func__, 398 + sizeof(*hose)); 396 399 397 400 *hose_tail = hose; 398 401 hose_tail = &hose->next; ··· 406 403 struct resource * __init 407 404 alloc_resource(void) 408 405 { 409 - return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); 406 + void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); 407 + 408 + if (!ptr) 409 + panic("%s: Failed to allocate %zu bytes\n", __func__, 410 + sizeof(struct resource)); 411 + 412 + return ptr; 410 413 } 411 414 412 415
+12
arch/alpha/kernel/pci_iommu.c
··· 80 80 " falling back to system-wide allocation\n", 81 81 __func__, nid); 82 82 arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); 83 + if (!arena) 84 + panic("%s: Failed to allocate %zu bytes\n", __func__, 85 + sizeof(*arena)); 83 86 } 84 87 85 88 arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid); ··· 91 88 " falling back to system-wide allocation\n", 92 89 __func__, nid); 93 90 arena->ptes = memblock_alloc(mem_size, align); 91 + if (!arena->ptes) 92 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 93 + __func__, mem_size, align); 94 94 } 95 95 96 96 #else /* CONFIG_DISCONTIGMEM */ 97 97 98 98 arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); 99 + if (!arena) 100 + panic("%s: Failed to allocate %zu bytes\n", __func__, 101 + sizeof(*arena)); 99 102 arena->ptes = memblock_alloc(mem_size, align); 103 + if (!arena->ptes) 104 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 105 + __func__, mem_size, align); 100 106 101 107 #endif /* CONFIG_DISCONTIGMEM */ 102 108
+4
arch/arc/mm/highmem.c
··· 124 124 pmd_k = pmd_offset(pud_k, kvaddr); 125 125 126 126 pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 127 + if (!pte_k) 128 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 129 + __func__, PAGE_SIZE, PAGE_SIZE); 130 + 127 131 pmd_populate_kernel(&init_mm, pmd_k, pte_k); 128 132 return pte_k; 129 133 }
+6
arch/arm/kernel/setup.c
··· 867 867 boot_alias_start = phys_to_idmap(start); 868 868 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) { 869 869 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); 870 + if (!res) 871 + panic("%s: Failed to allocate %zu bytes\n", 872 + __func__, sizeof(*res)); 870 873 res->name = "System RAM (boot alias)"; 871 874 res->start = boot_alias_start; 872 875 res->end = phys_to_idmap(end); ··· 878 875 } 879 876 880 877 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); 878 + if (!res) 879 + panic("%s: Failed to allocate %zu bytes\n", __func__, 880 + sizeof(*res)); 881 881 res->name = "System RAM"; 882 882 res->start = start; 883 883 res->end = end;
+13 -1
arch/arm/mm/mmu.c
··· 721 721 722 722 static void __init *early_alloc(unsigned long sz) 723 723 { 724 - return memblock_alloc(sz, sz); 724 + void *ptr = memblock_alloc(sz, sz); 725 + 726 + if (!ptr) 727 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 728 + __func__, sz, sz); 729 + 730 + return ptr; 725 731 } 726 732 727 733 static void *__init late_alloc(unsigned long sz) ··· 1000 994 return; 1001 995 1002 996 svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm)); 997 + if (!svm) 998 + panic("%s: Failed to allocate %zu bytes align=0x%zx\n", 999 + __func__, sizeof(*svm) * nr, __alignof__(*svm)); 1003 1000 1004 1001 for (md = io_desc; nr; md++, nr--) { 1005 1002 create_mapping(md); ··· 1025 1016 struct static_vm *svm; 1026 1017 1027 1018 svm = memblock_alloc(sizeof(*svm), __alignof__(*svm)); 1019 + if (!svm) 1020 + panic("%s: Failed to allocate %zu bytes align=0x%zx\n", 1021 + __func__, sizeof(*svm), __alignof__(*svm)); 1028 1022 1029 1023 vm = &svm->vm; 1030 1024 vm->addr = (void *)addr;
+5 -3
arch/arm64/kernel/setup.c
··· 208 208 struct memblock_region *region; 209 209 struct resource *res; 210 210 unsigned long i = 0; 211 + size_t res_size; 211 212 212 213 kernel_code.start = __pa_symbol(_text); 213 214 kernel_code.end = __pa_symbol(__init_begin - 1); ··· 216 215 kernel_data.end = __pa_symbol(_end - 1); 217 216 218 217 num_standard_resources = memblock.memory.cnt; 219 - standard_resources = memblock_alloc_low(num_standard_resources * 220 - sizeof(*standard_resources), 221 - SMP_CACHE_BYTES); 218 + res_size = num_standard_resources * sizeof(*standard_resources); 219 + standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES); 220 + if (!standard_resources) 221 + panic("%s: Failed to allocate %zu bytes\n", __func__, res_size); 222 222 223 223 for_each_memblock(memory, region) { 224 224 res = &standard_resources[i++];
+10
arch/arm64/mm/kasan_init.c
··· 40 40 void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, 41 41 __pa(MAX_DMA_ADDRESS), 42 42 MEMBLOCK_ALLOC_KASAN, node); 43 + if (!p) 44 + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", 45 + __func__, PAGE_SIZE, PAGE_SIZE, node, 46 + __pa(MAX_DMA_ADDRESS)); 47 + 43 48 return __pa(p); 44 49 } 45 50 ··· 53 48 void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE, 54 49 __pa(MAX_DMA_ADDRESS), 55 50 MEMBLOCK_ALLOC_KASAN, node); 51 + if (!p) 52 + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", 53 + __func__, PAGE_SIZE, PAGE_SIZE, node, 54 + __pa(MAX_DMA_ADDRESS)); 55 + 56 56 return __pa(p); 57 57 } 58 58
+4
arch/c6x/mm/dma-coherent.c
··· 138 138 139 139 dma_bitmap = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long), 140 140 sizeof(long)); 141 + if (!dma_bitmap) 142 + panic("%s: Failed to allocate %zu bytes align=0x%zx\n", 143 + __func__, BITS_TO_LONGS(dma_pages) * sizeof(long), 144 + sizeof(long)); 141 145 } 142 146 143 147 static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
+3
arch/c6x/mm/init.c
··· 40 40 41 41 empty_zero_page = (unsigned long) memblock_alloc(PAGE_SIZE, 42 42 PAGE_SIZE); 43 + if (!empty_zero_page) 44 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 45 + __func__, PAGE_SIZE, PAGE_SIZE); 43 46 44 47 /* 45 48 * Set up user data space
+5
arch/csky/mm/highmem.c
··· 141 141 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 142 142 if (pmd_none(*pmd)) { 143 143 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 144 + if (!pte) 145 + panic("%s: Failed to allocate %lu bytes align=%lx\n", 146 + __func__, PAGE_SIZE, 147 + PAGE_SIZE); 148 + 144 149 set_pmd(pmd, __pmd(__pa(pte))); 145 150 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 146 151 }
+3
arch/h8300/mm/init.c
··· 68 68 * to a couple of allocated pages. 69 69 */ 70 70 empty_zero_page = (unsigned long)memblock_alloc(PAGE_SIZE, PAGE_SIZE); 71 + if (!empty_zero_page) 72 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 73 + __func__, PAGE_SIZE, PAGE_SIZE); 71 74 72 75 /* 73 76 * Set up SFC/DFC registers (user data space).
+4
arch/m68k/atari/stram.c
··· 97 97 pr_debug("atari_stram pool: kernel in ST-RAM, using alloc_bootmem!\n"); 98 98 stram_pool.start = (resource_size_t)memblock_alloc_low(pool_size, 99 99 PAGE_SIZE); 100 + if (!stram_pool.start) 101 + panic("%s: Failed to allocate %lu bytes align=%lx\n", 102 + __func__, pool_size, PAGE_SIZE); 103 + 100 104 stram_pool.end = stram_pool.start + pool_size - 1; 101 105 request_resource(&iomem_resource, &stram_pool); 102 106 stram_virt_offset = 0;
+3
arch/m68k/mm/init.c
··· 94 94 high_memory = (void *) end_mem; 95 95 96 96 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 97 + if (!empty_zero_page) 98 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 99 + __func__, PAGE_SIZE, PAGE_SIZE); 97 100 98 101 /* 99 102 * Set up SFC/DFC registers (user data space).
+6
arch/m68k/mm/mcfmmu.c
··· 44 44 int i; 45 45 46 46 empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE); 47 + if (!empty_zero_page) 48 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 49 + __func__, PAGE_SIZE, PAGE_SIZE); 47 50 48 51 pg_dir = swapper_pg_dir; 49 52 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); ··· 54 51 size = num_pages * sizeof(pte_t); 55 52 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); 56 53 next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE); 54 + if (!next_pgtable) 55 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 56 + __func__, size, PAGE_SIZE); 57 57 58 58 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; 59 59 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
+9
arch/m68k/mm/motorola.c
··· 55 55 pte_t *ptablep; 56 56 57 57 ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 58 + if (!ptablep) 59 + panic("%s: Failed to allocate %lu bytes align=%lx\n", 60 + __func__, PAGE_SIZE, PAGE_SIZE); 58 61 59 62 clear_page(ptablep); 60 63 __flush_page_to_ram(ptablep); ··· 99 96 if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) { 100 97 last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE, 101 98 PAGE_SIZE); 99 + if (!last_pgtable) 100 + panic("%s: Failed to allocate %lu bytes align=%lx\n", 101 + __func__, PAGE_SIZE, PAGE_SIZE); 102 102 103 103 clear_page(last_pgtable); 104 104 __flush_page_to_ram(last_pgtable); ··· 284 278 * to a couple of allocated pages 285 279 */ 286 280 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 281 + if (!empty_zero_page) 282 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 283 + __func__, PAGE_SIZE, PAGE_SIZE); 287 284 288 285 /* 289 286 * Set up SFC/DFC registers
+6
arch/m68k/mm/sun3mmu.c
··· 46 46 unsigned long size; 47 47 48 48 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 49 + if (!empty_zero_page) 50 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 51 + __func__, PAGE_SIZE, PAGE_SIZE); 49 52 50 53 address = PAGE_OFFSET; 51 54 pg_dir = swapper_pg_dir; ··· 59 56 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); 60 57 61 58 next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE); 59 + if (!next_pgtable) 60 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 61 + __func__, size, PAGE_SIZE); 62 62 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; 63 63 64 64 /* Map whole memory from PAGE_OFFSET (0x0E000000) */
+3
arch/m68k/sun3/sun3dvma.c
··· 269 269 270 270 iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long), 271 271 SMP_CACHE_BYTES); 272 + if (!iommu_use) 273 + panic("%s: Failed to allocate %zu bytes\n", __func__, 274 + IOMMU_TOTAL_ENTRIES * sizeof(unsigned long)); 272 275 273 276 dvma_unmap_iommu(DVMA_START, DVMA_SIZE); 274 277
+6 -2
arch/microblaze/mm/init.c
··· 374 374 { 375 375 void *p; 376 376 377 - if (mem_init_done) 377 + if (mem_init_done) { 378 378 p = kzalloc(size, mask); 379 - else 379 + } else { 380 380 p = memblock_alloc(size, SMP_CACHE_BYTES); 381 + if (!p) 382 + panic("%s: Failed to allocate %zu bytes\n", 383 + __func__, size); 384 + } 381 385 382 386 return p; 383 387 }
+3
arch/mips/cavium-octeon/dma-octeon.c
··· 245 245 swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT; 246 246 247 247 octeon_swiotlb = memblock_alloc_low(swiotlbsize, PAGE_SIZE); 248 + if (!octeon_swiotlb) 249 + panic("%s: Failed to allocate %zu bytes align=%lx\n", 250 + __func__, swiotlbsize, PAGE_SIZE); 248 251 249 252 if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM) 250 253 panic("Cannot allocate SWIOTLB buffer");
+3
arch/mips/kernel/setup.c
··· 919 919 end = HIGHMEM_START - 1; 920 920 921 921 res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); 922 + if (!res) 923 + panic("%s: Failed to allocate %zu bytes\n", __func__, 924 + sizeof(struct resource)); 922 925 923 926 res->start = start; 924 927 res->end = end;
+3
arch/mips/kernel/traps.c
··· 2294 2294 2295 2295 ebase = (unsigned long) 2296 2296 memblock_alloc(size, 1 << fls(size)); 2297 + if (!ebase) 2298 + panic("%s: Failed to allocate %lu bytes align=0x%x\n", 2299 + __func__, size, 1 << fls(size)); 2297 2300 2298 2301 /* 2299 2302 * Try to ensure ebase resides in KSeg0 if possible.
+5
arch/mips/mm/init.c
··· 252 252 if (pmd_none(*pmd)) { 253 253 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, 254 254 PAGE_SIZE); 255 + if (!pte) 256 + panic("%s: Failed to allocate %lu bytes align=%lx\n", 257 + __func__, PAGE_SIZE, 258 + PAGE_SIZE); 259 + 255 260 set_pmd(pmd, __pmd((unsigned long)pte)); 256 261 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 257 262 }
+12
arch/nds32/mm/init.c
··· 79 79 80 80 /* Alloc one page for holding PTE's... */ 81 81 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 82 + if (!pte) 83 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 84 + __func__, PAGE_SIZE, PAGE_SIZE); 82 85 set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE)); 83 86 84 87 /* Fill the newly allocated page with PTE'S */ ··· 114 111 pud = pud_offset(pgd, vaddr); 115 112 pmd = pmd_offset(pud, vaddr); 116 113 fixmap_pmd_p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 114 + if (!fixmap_pmd_p) 115 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 116 + __func__, PAGE_SIZE, PAGE_SIZE); 117 117 set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE)); 118 118 119 119 #ifdef CONFIG_HIGHMEM ··· 129 123 pud = pud_offset(pgd, vaddr); 130 124 pmd = pmd_offset(pud, vaddr); 131 125 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 126 + if (!pte) 127 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 128 + __func__, PAGE_SIZE, PAGE_SIZE); 132 129 set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE)); 133 130 pkmap_page_table = pte; 134 131 #endif /* CONFIG_HIGHMEM */ ··· 157 148 158 149 /* allocate space for empty_zero_page */ 159 150 zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 151 + if (!zero_page) 152 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 153 + __func__, PAGE_SIZE, PAGE_SIZE); 160 154 zone_sizes_init(); 161 155 162 156 empty_zero_page = virt_to_page(zero_page);
+6 -2
arch/openrisc/mm/ioremap.c
··· 122 122 { 123 123 pte_t *pte; 124 124 125 - if (likely(mem_init_done)) 125 + if (likely(mem_init_done)) { 126 126 pte = (pte_t *)get_zeroed_page(GFP_KERNEL); 127 - else 127 + } else { 128 128 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 129 + if (!pte) 130 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 131 + __func__, PAGE_SIZE, PAGE_SIZE); 132 + } 129 133 130 134 return pte; 131 135 }
+5
arch/powerpc/kernel/dt_cpu_ftrs.c
··· 1005 1005 of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes, 1006 1006 &nr_dt_cpu_features); 1007 1007 dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE); 1008 + if (!dt_cpu_features) 1009 + panic("%s: Failed to allocate %zu bytes align=0x%lx\n", 1010 + __func__, 1011 + sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, 1012 + PAGE_SIZE); 1008 1013 1009 1014 cpufeatures_setup_start(isa); 1010 1015
+3
arch/powerpc/kernel/pci_32.c
··· 205 205 206 206 of_prop = memblock_alloc(sizeof(struct property) + 256, 207 207 SMP_CACHE_BYTES); 208 + if (!of_prop) 209 + panic("%s: Failed to allocate %zu bytes\n", __func__, 210 + sizeof(struct property) + 256); 208 211 dn = of_find_node_by_path("/"); 209 212 if (dn) { 210 213 memset(of_prop, -1, sizeof(struct property) + 256);
+3
arch/powerpc/kernel/setup-common.c
··· 461 461 462 462 cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32), 463 463 __alignof__(u32)); 464 + if (!cpu_to_phys_id) 465 + panic("%s: Failed to allocate %zu bytes align=0x%zx\n", 466 + __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32)); 464 467 465 468 for_each_node_by_type(dn, "cpu") { 466 469 const __be32 *intserv;
+4
arch/powerpc/kernel/setup_64.c
··· 905 905 l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2, 906 906 l1d_size, MEMBLOCK_LOW_LIMIT, 907 907 limit, NUMA_NO_NODE); 908 + if (!l1d_flush_fallback_area) 909 + panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n", 910 + __func__, l1d_size * 2, l1d_size, &limit); 911 + 908 912 909 913 for_each_possible_cpu(cpu) { 910 914 struct paca_struct *paca = paca_ptrs[cpu];
+3
arch/powerpc/lib/alloc.c
··· 15 15 p = kzalloc(size, mask); 16 16 else { 17 17 p = memblock_alloc(size, SMP_CACHE_BYTES); 18 + if (!p) 19 + panic("%s: Failed to allocate %zu bytes\n", __func__, 20 + size); 18 21 } 19 22 return p; 20 23 }
+3
arch/powerpc/mm/hash_utils_64.c
··· 915 915 linear_map_hash_slots = memblock_alloc_try_nid( 916 916 linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT, 917 917 ppc64_rma_size, NUMA_NO_NODE); 918 + if (!linear_map_hash_slots) 919 + panic("%s: Failed to allocate %lu bytes max_addr=%pa\n", 920 + __func__, linear_map_hash_count, &ppc64_rma_size); 918 921 } 919 922 #endif /* CONFIG_DEBUG_PAGEALLOC */ 920 923
+9
arch/powerpc/mm/mmu_context_nohash.c
··· 461 461 * Allocate the maps used by context management 462 462 */ 463 463 context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); 464 + if (!context_map) 465 + panic("%s: Failed to allocate %zu bytes\n", __func__, 466 + CTX_MAP_SIZE); 464 467 context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1), 465 468 SMP_CACHE_BYTES); 469 + if (!context_mm) 470 + panic("%s: Failed to allocate %zu bytes\n", __func__, 471 + sizeof(void *) * (LAST_CONTEXT + 1)); 466 472 #ifdef CONFIG_SMP 467 473 stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); 474 + if (!stale_map[boot_cpuid]) 475 + panic("%s: Failed to allocate %zu bytes\n", __func__, 476 + CTX_MAP_SIZE); 468 477 469 478 cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE, 470 479 "powerpc/mmu/ctx:prepare",
+10 -2
arch/powerpc/mm/pgtable-book3e.c
··· 57 57 58 58 static __ref void *early_alloc_pgtable(unsigned long size) 59 59 { 60 - return memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT, 61 - __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE); 60 + void *ptr; 61 + 62 + ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT, 63 + __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE); 64 + 65 + if (!ptr) 66 + panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n", 67 + __func__, size, size, __pa(MAX_DMA_ADDRESS)); 68 + 69 + return ptr; 62 70 } 63 71 64 72 /*
+3
arch/powerpc/mm/pgtable-book3s64.c
··· 197 197 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large."); 198 198 /* Initialize the Partition Table with no entries */ 199 199 partition_tb = memblock_alloc(patb_size, patb_size); 200 + if (!partition_tb) 201 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 202 + __func__, patb_size, patb_size); 200 203 201 204 /* 202 205 * update partition table control register,
+8 -1
arch/powerpc/mm/pgtable-radix.c
··· 53 53 { 54 54 phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT; 55 55 phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE; 56 + void *ptr; 56 57 57 58 if (region_start) 58 59 min_addr = region_start; 59 60 if (region_end) 60 61 max_addr = region_end; 61 62 62 - return memblock_alloc_try_nid(size, size, min_addr, max_addr, nid); 63 + ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid); 64 + 65 + if (!ptr) 66 + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n", 67 + __func__, size, size, nid, &min_addr, &max_addr); 68 + 69 + return ptr; 63 70 } 64 71 65 72 static int early_map_kernel_page(unsigned long ea, unsigned long pa,
+3
arch/powerpc/mm/ppc_mmu_32.c
··· 340 340 */ 341 341 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); 342 342 Hash = memblock_alloc(Hash_size, Hash_size); 343 + if (!Hash) 344 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 345 + __func__, Hash_size, Hash_size); 343 346 _SDR1 = __pa(Hash) | SDR1_LOW_BITS; 344 347 345 348 Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
+3
arch/powerpc/platforms/pasemi/iommu.c
··· 211 211 iob_l2_base = memblock_alloc_try_nid_raw(1UL << 21, 1UL << 21, 212 212 MEMBLOCK_LOW_LIMIT, 0x80000000, 213 213 NUMA_NO_NODE); 214 + if (!iob_l2_base) 215 + panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%x\n", 216 + __func__, 1UL << 21, 1UL << 21, 0x80000000); 214 217 215 218 pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base); 216 219
+3
arch/powerpc/platforms/powermac/nvram.c
··· 519 519 return -EINVAL; 520 520 } 521 521 nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES); 522 + if (!nvram_image) 523 + panic("%s: Failed to allocate %u bytes\n", __func__, 524 + NVRAM_SIZE); 522 525 nvram_data = ioremap(addr, NVRAM_SIZE*2); 523 526 nvram_naddrs = 1; /* Make sure we get the correct case */ 524 527
+3
arch/powerpc/platforms/powernv/opal.c
··· 171 171 * Allocate a buffer to hold the MC recoverable ranges. 172 172 */ 173 173 mc_recoverable_range = memblock_alloc(size, __alignof__(u64)); 174 + if (!mc_recoverable_range) 175 + panic("%s: Failed to allocate %u bytes align=0x%lx\n", 176 + __func__, size, __alignof__(u64)); 174 177 175 178 for (i = 0; i < mc_recoverable_range_len; i++) { 176 179 mc_recoverable_range[i].start_addr =
+8
arch/powerpc/platforms/powernv/pci-ioda.c
··· 3657 3657 pr_debug(" PHB-ID : 0x%016llx\n", phb_id); 3658 3658 3659 3659 phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES); 3660 + if (!phb) 3661 + panic("%s: Failed to allocate %zu bytes\n", __func__, 3662 + sizeof(*phb)); 3660 3663 3661 3664 /* Allocate PCI controller */ 3662 3665 phb->hose = hose = pcibios_alloc_controller(np); ··· 3706 3703 phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE; 3707 3704 3708 3705 phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES); 3706 + if (!phb->diag_data) 3707 + panic("%s: Failed to allocate %u bytes\n", __func__, 3708 + phb->diag_data_size); 3709 3709 3710 3710 /* Parse 32-bit and IO ranges (if any) */ 3711 3711 pci_process_bridge_OF_ranges(hose, np, !hose->global_number); ··· 3768 3762 pemap_off = size; 3769 3763 size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe); 3770 3764 aux = memblock_alloc(size, SMP_CACHE_BYTES); 3765 + if (!aux) 3766 + panic("%s: Failed to allocate %lu bytes\n", __func__, size); 3771 3767 phb->ioda.pe_alloc = aux; 3772 3768 phb->ioda.m64_segmap = aux + m64map_off; 3773 3769 phb->ioda.m32_segmap = aux + m32map_off;
+3
arch/powerpc/platforms/ps3/setup.c
··· 127 127 return; 128 128 129 129 p->address = memblock_alloc(p->size, p->align); 130 + if (!p->address) 131 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 132 + __func__, p->size, p->align); 130 133 131 134 printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size, 132 135 p->address);
+3
arch/powerpc/sysdev/msi_bitmap.c
··· 129 129 bmp->bitmap = kzalloc(size, GFP_KERNEL); 130 130 else { 131 131 bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES); 132 + if (!bmp->bitmap) 133 + panic("%s: Failed to allocate %u bytes\n", __func__, 134 + size); 132 135 /* the bitmap won't be freed from memblock allocator */ 133 136 kmemleak_not_leak(bmp->bitmap); 134 137 }
+13
arch/s390/kernel/setup.c
··· 378 378 */ 379 379 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE); 380 380 lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc)); 381 + if (!lc) 382 + panic("%s: Failed to allocate %zu bytes align=%zx\n", 383 + __func__, sizeof(*lc), sizeof(*lc)); 384 + 381 385 lc->restart_psw.mask = PSW_KERNEL_BITS; 382 386 lc->restart_psw.addr = (unsigned long) restart_int_handler; 383 387 lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; ··· 423 419 * all CPUs in cast *one* of them does a PSW restart. 424 420 */ 425 421 restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE); 422 + if (!restart_stack) 423 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 424 + __func__, THREAD_SIZE, THREAD_SIZE); 426 425 restart_stack += STACK_INIT_OFFSET; 427 426 428 427 /* ··· 502 495 503 496 for_each_memblock(memory, reg) { 504 497 res = memblock_alloc(sizeof(*res), 8); 498 + if (!res) 499 + panic("%s: Failed to allocate %zu bytes align=0x%x\n", 500 + __func__, sizeof(*res), 8); 505 501 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; 506 502 507 503 res->name = "System RAM"; ··· 519 509 continue; 520 510 if (std_res->end > res->end) { 521 511 sub_res = memblock_alloc(sizeof(*sub_res), 8); 512 + if (!sub_res) 513 + panic("%s: Failed to allocate %zu bytes align=0x%x\n", 514 + __func__, sizeof(*sub_res), 8); 522 515 *sub_res = *std_res; 523 516 sub_res->end = res->end; 524 517 std_res->start = res->end + 1;
+4 -1
arch/s390/kernel/smp.c
··· 658 658 /* Allocate a page as dumping area for the store status sigps */ 659 659 page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31); 660 660 if (!page) 661 - panic("ERROR: Failed to allocate %x bytes below %lx\n", 661 + panic("ERROR: Failed to allocate %lx bytes below %lx\n", 662 662 PAGE_SIZE, 1UL << 31); 663 663 664 664 /* Set multi-threading state to the previous system. */ ··· 770 770 771 771 /* Get CPU information */ 772 772 info = memblock_alloc(sizeof(*info), 8); 773 + if (!info) 774 + panic("%s: Failed to allocate %zu bytes align=0x%x\n", 775 + __func__, sizeof(*info), 8); 773 776 smp_get_core_info(info, 1); 774 777 /* Find boot CPU type */ 775 778 if (sclp.has_core_type) {
+6
arch/s390/kernel/topology.c
··· 520 520 nr_masks = max(nr_masks, 1); 521 521 for (i = 0; i < nr_masks; i++) { 522 522 mask->next = memblock_alloc(sizeof(*mask->next), 8); 523 + if (!mask->next) 524 + panic("%s: Failed to allocate %zu bytes align=0x%x\n", 525 + __func__, sizeof(*mask->next), 8); 523 526 mask = mask->next; 524 527 } 525 528 } ··· 541 538 if (!MACHINE_HAS_TOPOLOGY) 542 539 goto out; 543 540 tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 541 + if (!tl_info) 542 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 543 + __func__, PAGE_SIZE, PAGE_SIZE); 544 544 info = tl_info; 545 545 store_topology(info); 546 546 pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
+3
arch/s390/numa/mode_emu.c
··· 313 313 int i; 314 314 315 315 emu_cores = memblock_alloc(sizeof(*emu_cores), 8); 316 + if (!emu_cores) 317 + panic("%s: Failed to allocate %zu bytes align=0x%x\n", 318 + __func__, sizeof(*emu_cores), 8); 316 319 for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++) 317 320 emu_cores->to_node_id[i] = NODE_ID_FREE; 318 321 }
+5 -1
arch/s390/numa/numa.c
··· 92 92 } while (cur_base < end_of_dram); 93 93 94 94 /* Allocate and fill out node_data */ 95 - for (nid = 0; nid < MAX_NUMNODES; nid++) 95 + for (nid = 0; nid < MAX_NUMNODES; nid++) { 96 96 NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8); 97 + if (!NODE_DATA(nid)) 98 + panic("%s: Failed to allocate %zu bytes align=0x%x\n", 99 + __func__, sizeof(pg_data_t), 8); 100 + } 97 101 98 102 for_each_online_node(nid) { 99 103 unsigned long start_pfn, end_pfn;
+6
arch/sh/mm/init.c
··· 128 128 pmd_t *pmd; 129 129 130 130 pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 131 + if (!pmd) 132 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 133 + __func__, PAGE_SIZE, PAGE_SIZE); 131 134 pud_populate(&init_mm, pud, pmd); 132 135 BUG_ON(pmd != pmd_offset(pud, 0)); 133 136 } ··· 144 141 pte_t *pte; 145 142 146 143 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 144 + if (!pte) 145 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 146 + __func__, PAGE_SIZE, PAGE_SIZE); 147 147 pmd_populate_kernel(&init_mm, pmd, pte); 148 148 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 149 149 }
+4
arch/sh/mm/numa.c
··· 43 43 /* Node-local pgdat */ 44 44 NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data), 45 45 SMP_CACHE_BYTES, nid); 46 + if (!NODE_DATA(nid)) 47 + panic("%s: Failed to allocate %zu bytes align=0x%x nid=%d\n", 48 + __func__, sizeof(struct pglist_data), SMP_CACHE_BYTES, 49 + nid); 46 50 47 51 NODE_DATA(nid)->node_start_pfn = start_pfn; 48 52 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
+3
arch/um/drivers/net_kern.c
··· 649 649 } 650 650 651 651 new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); 652 + if (!new) 653 + panic("%s: Failed to allocate %zu bytes\n", __func__, 654 + sizeof(*new)); 652 655 653 656 INIT_LIST_HEAD(&new->list); 654 657 new->index = n;
+3
arch/um/drivers/vector_kern.c
··· 1576 1576 return 1; 1577 1577 } 1578 1578 new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); 1579 + if (!new) 1580 + panic("%s: Failed to allocate %zu bytes\n", __func__, 1581 + sizeof(*new)); 1579 1582 INIT_LIST_HEAD(&new->list); 1580 1583 new->unit = n; 1581 1584 new->arguments = str;
+2
arch/um/kernel/initrd.c
··· 37 37 } 38 38 39 39 area = memblock_alloc(size, SMP_CACHE_BYTES); 40 + if (!area) 41 + panic("%s: Failed to allocate %llu bytes\n", __func__, size); 40 42 41 43 if (load_initrd(initrd, area, size) == -1) 42 44 return 0;
+16
arch/um/kernel/mem.c
··· 66 66 if (pmd_none(*pmd)) { 67 67 pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, 68 68 PAGE_SIZE); 69 + if (!pte) 70 + panic("%s: Failed to allocate %lu bytes align=%lx\n", 71 + __func__, PAGE_SIZE, PAGE_SIZE); 72 + 69 73 set_pmd(pmd, __pmd(_KERNPG_TABLE + 70 74 (unsigned long) __pa(pte))); 71 75 if (pte != pte_offset_kernel(pmd, 0)) ··· 81 77 { 82 78 #ifdef CONFIG_3_LEVEL_PGTABLES 83 79 pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 80 + if (!pmd_table) 81 + panic("%s: Failed to allocate %lu bytes align=%lx\n", 82 + __func__, PAGE_SIZE, PAGE_SIZE); 83 + 84 84 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table))); 85 85 if (pmd_table != pmd_offset(pud, 0)) 86 86 BUG(); ··· 134 126 135 127 fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir); 136 128 v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE); 129 + if (!v) 130 + panic("%s: Failed to allocate %lu bytes align=%lx\n", 131 + __func__, size, PAGE_SIZE); 132 + 137 133 memcpy((void *) v , (void *) FIXADDR_USER_START, size); 138 134 p = __pa(v); 139 135 for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE, ··· 158 146 159 147 empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE, 160 148 PAGE_SIZE); 149 + if (!empty_zero_page) 150 + panic("%s: Failed to allocate %lu bytes align=%lx\n", 151 + __func__, PAGE_SIZE, PAGE_SIZE); 152 + 161 153 for (i = 0; i < ARRAY_SIZE(zones_size); i++) 162 154 zones_size[i] = 0; 163 155
+4
arch/unicore32/kernel/setup.c
··· 207 207 continue; 208 208 209 209 res = memblock_alloc_low(sizeof(*res), SMP_CACHE_BYTES); 210 + if (!res) 211 + panic("%s: Failed to allocate %zu bytes align=%x\n", 212 + __func__, sizeof(*res), SMP_CACHE_BYTES); 213 + 210 214 res->name = "System RAM"; 211 215 res->start = mi->bank[i].start; 212 216 res->end = mi->bank[i].start + mi->bank[i].size - 1;
+13 -2
arch/unicore32/mm/mmu.c
··· 145 145 unsigned long prot) 146 146 { 147 147 if (pmd_none(*pmd)) { 148 - pte_t *pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), 149 - PTRS_PER_PTE * sizeof(pte_t)); 148 + size_t size = PTRS_PER_PTE * sizeof(pte_t); 149 + pte_t *pte = memblock_alloc(size, size); 150 + 151 + if (!pte) 152 + panic("%s: Failed to allocate %zu bytes align=%zx\n", 153 + __func__, size, size); 154 + 150 155 __pmd_populate(pmd, __pa(pte) | prot); 151 156 } 152 157 BUG_ON(pmd_bad(*pmd)); ··· 354 349 * Allocate the vector page early. 355 350 */ 356 351 vectors = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 352 + if (!vectors) 353 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 354 + __func__, PAGE_SIZE, PAGE_SIZE); 357 355 358 356 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) 359 357 pmd_clear(pmd_off_k(addr)); ··· 434 426 435 427 /* allocate the zero page. */ 436 428 zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 429 + if (!zero_page) 430 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 431 + __func__, PAGE_SIZE, PAGE_SIZE); 437 432 438 433 bootmem_init(); 439 434
+3
arch/x86/kernel/acpi/boot.c
··· 935 935 #define HPET_RESOURCE_NAME_SIZE 9 936 936 hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE, 937 937 SMP_CACHE_BYTES); 938 + if (!hpet_res) 939 + panic("%s: Failed to allocate %zu bytes\n", __func__, 940 + sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE); 938 941 939 942 hpet_res->name = (void *)&hpet_res[1]; 940 943 hpet_res->flags = IORESOURCE_MEM;
+5
arch/x86/kernel/apic/io_apic.c
··· 2581 2581 n *= nr_ioapics; 2582 2582 2583 2583 mem = memblock_alloc(n, SMP_CACHE_BYTES); 2584 + if (!mem) 2585 + panic("%s: Failed to allocate %lu bytes\n", __func__, n); 2584 2586 res = (void *)mem; 2585 2587 2586 2588 mem += sizeof(struct resource) * nr_ioapics; ··· 2627 2625 #endif 2628 2626 ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE, 2629 2627 PAGE_SIZE); 2628 + if (!ioapic_phys) 2629 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 2630 + __func__, PAGE_SIZE, PAGE_SIZE); 2630 2631 ioapic_phys = __pa(ioapic_phys); 2631 2632 } 2632 2633 set_fixmap_nocache(idx, ioapic_phys);
+3
arch/x86/kernel/e820.c
··· 1092 1092 1093 1093 res = memblock_alloc(sizeof(*res) * e820_table->nr_entries, 1094 1094 SMP_CACHE_BYTES); 1095 + if (!res) 1096 + panic("%s: Failed to allocate %zu bytes\n", __func__, 1097 + sizeof(*res) * e820_table->nr_entries); 1095 1098 e820_res = res; 1096 1099 1097 1100 for (i = 0; i < e820_table->nr_entries; i++) {
+3
arch/x86/platform/olpc/olpc_dt.c
··· 141 141 * wasted bootmem) and hand off chunks of it to callers. 142 142 */ 143 143 res = memblock_alloc(chunk_size, SMP_CACHE_BYTES); 144 + if (!res) 145 + panic("%s: Failed to allocate %zu bytes\n", __func__, 146 + chunk_size); 144 147 BUG_ON(!res); 145 148 prom_early_allocated += chunk_size; 146 149 memset(res, 0, chunk_size);
+9 -2
arch/x86/xen/p2m.c
··· 181 181 182 182 static void * __ref alloc_p2m_page(void) 183 183 { 184 - if (unlikely(!slab_is_available())) 185 - return memblock_alloc(PAGE_SIZE, PAGE_SIZE); 184 + if (unlikely(!slab_is_available())) { 185 + void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 186 + 187 + if (!ptr) 188 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 189 + __func__, PAGE_SIZE, PAGE_SIZE); 190 + 191 + return ptr; 192 + } 186 193 187 194 return (void *)__get_free_page(GFP_KERNEL); 188 195 }
+4
arch/xtensa/mm/kasan_init.c
··· 45 45 pmd_t *pmd = pmd_offset(pgd, vaddr); 46 46 pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); 47 47 48 + if (!pte) 49 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 50 + __func__, n_pages * sizeof(pte_t), PAGE_SIZE); 51 + 48 52 pr_debug("%s: %p - %p\n", __func__, start, end); 49 53 50 54 for (i = j = 0; i < n_pmds; ++i) {
+3
arch/xtensa/mm/mmu.c
··· 32 32 __func__, vaddr, n_pages); 33 33 34 34 pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE); 35 + if (!pte) 36 + panic("%s: Failed to allocate %zu bytes align=%lx\n", 37 + __func__, n_pages * sizeof(pte_t), PAGE_SIZE); 35 38 36 39 for (i = 0; i < n_pages; ++i) 37 40 pte_clear(NULL, 0, pte + i);
+3
drivers/clk/ti/clk.c
··· 351 351 struct clk_iomap *io; 352 352 353 353 io = memblock_alloc(sizeof(*io), SMP_CACHE_BYTES); 354 + if (!io) 355 + panic("%s: Failed to allocate %zu bytes\n", __func__, 356 + sizeof(*io)); 354 357 355 358 io->mem = mem; 356 359
+3
drivers/macintosh/smu.c
··· 493 493 } 494 494 495 495 smu = memblock_alloc(sizeof(struct smu_device), SMP_CACHE_BYTES); 496 + if (!smu) 497 + panic("%s: Failed to allocate %zu bytes\n", __func__, 498 + sizeof(struct smu_device)); 496 499 497 500 spin_lock_init(&smu->lock); 498 501 INIT_LIST_HEAD(&smu->cmd_list);
+7 -1
drivers/of/fdt.c
··· 1181 1181 1182 1182 static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) 1183 1183 { 1184 - return memblock_alloc(size, align); 1184 + void *ptr = memblock_alloc(size, align); 1185 + 1186 + if (!ptr) 1187 + panic("%s: Failed to allocate %llu bytes align=0x%llx\n", 1188 + __func__, size, align); 1189 + 1190 + return ptr; 1185 1191 } 1186 1192 1187 1193 bool __init early_init_dt_verify(void *params)
+7 -1
drivers/of/unittest.c
··· 2241 2241 2242 2242 static void * __init dt_alloc_memory(u64 size, u64 align) 2243 2243 { 2244 - return memblock_alloc(size, align); 2244 + void *ptr = memblock_alloc(size, align); 2245 + 2246 + if (!ptr) 2247 + panic("%s: Failed to allocate %llu bytes align=0x%llx\n", 2248 + __func__, size, align); 2249 + 2250 + return ptr; 2245 2251 } 2246 2252 2247 2253 /*
+5 -2
drivers/xen/swiotlb-xen.c
··· 214 214 /* 215 215 * Get IO TLB memory from any location. 216 216 */ 217 - if (early) 217 + if (early) { 218 218 xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes), 219 219 PAGE_SIZE); 220 - else { 220 + if (!xen_io_tlb_start) 221 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 222 + __func__, PAGE_ALIGN(bytes), PAGE_SIZE); 223 + } else { 221 224 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 222 225 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 223 226 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
+2 -2
kernel/dma/swiotlb.c
··· 215 215 alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(int)); 216 216 io_tlb_list = memblock_alloc(alloc_size, PAGE_SIZE); 217 217 if (!io_tlb_list) 218 - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 218 + panic("%s: Failed to allocate %zu bytes align=0x%lx\n", 219 219 __func__, alloc_size, PAGE_SIZE); 220 220 221 221 alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)); 222 222 io_tlb_orig_addr = memblock_alloc(alloc_size, PAGE_SIZE); 223 223 if (!io_tlb_orig_addr) 224 - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 224 + panic("%s: Failed to allocate %zu bytes align=0x%lx\n", 225 225 __func__, alloc_size, PAGE_SIZE); 226 226 227 227 for (i = 0; i < io_tlb_nslabs; i++) {
+3
kernel/power/snapshot.c
··· 965 965 /* This allocation cannot fail */ 966 966 region = memblock_alloc(sizeof(struct nosave_region), 967 967 SMP_CACHE_BYTES); 968 + if (!region) 969 + panic("%s: Failed to allocate %zu bytes\n", __func__, 970 + sizeof(struct nosave_region)); 968 971 } 969 972 region->start_pfn = start_pfn; 970 973 region->end_pfn = end_pfn;
+3
lib/cpumask.c
··· 165 165 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) 166 166 { 167 167 *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES); 168 + if (!*mask) 169 + panic("%s: Failed to allocate %u bytes\n", __func__, 170 + cpumask_size()); 168 171 } 169 172 170 173 /**
+8 -2
mm/kasan/init.c
··· 83 83 84 84 static __init void *early_alloc(size_t size, int node) 85 85 { 86 - return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), 87 - MEMBLOCK_ALLOC_ACCESSIBLE, node); 86 + void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), 87 + MEMBLOCK_ALLOC_ACCESSIBLE, node); 88 + 89 + if (!ptr) 90 + panic("%s: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n", 91 + __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS)); 92 + 93 + return ptr; 88 94 } 89 95 90 96 static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
+17 -4
mm/sparse.c
··· 65 65 unsigned long array_size = SECTIONS_PER_ROOT * 66 66 sizeof(struct mem_section); 67 67 68 - if (slab_is_available()) 68 + if (slab_is_available()) { 69 69 section = kzalloc_node(array_size, GFP_KERNEL, nid); 70 - else 70 + } else { 71 71 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, 72 72 nid); 73 + if (!section) 74 + panic("%s: Failed to allocate %lu bytes nid=%d\n", 75 + __func__, array_size, nid); 76 + } 73 77 74 78 return section; 75 79 } ··· 222 218 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; 223 219 align = 1 << (INTERNODE_CACHE_SHIFT); 224 220 mem_section = memblock_alloc(size, align); 221 + if (!mem_section) 222 + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 223 + __func__, size, align); 225 224 } 226 225 #endif 227 226 ··· 411 404 { 412 405 unsigned long size = section_map_size(); 413 406 struct page *map = sparse_buffer_alloc(size); 407 + phys_addr_t addr = __pa(MAX_DMA_ADDRESS); 414 408 415 409 if (map) 416 410 return map; 417 411 418 412 map = memblock_alloc_try_nid(size, 419 - PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 413 + PAGE_SIZE, addr, 420 414 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 415 + if (!map) 416 + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n", 417 + __func__, size, PAGE_SIZE, nid, &addr); 418 + 421 419 return map; 422 420 } 423 421 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ ··· 432 420 433 421 static void __init sparse_buffer_init(unsigned long size, int nid) 434 422 { 423 + phys_addr_t addr = __pa(MAX_DMA_ADDRESS); 435 424 WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ 436 425 sparsemap_buf = 437 426 memblock_alloc_try_nid_raw(size, PAGE_SIZE, 438 - __pa(MAX_DMA_ADDRESS), 427 + addr, 439 428 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 440 429 sparsemap_buf_end = sparsemap_buf + size; 441 430 }