Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: remove the pgprot argument to __vmalloc

The pgprot argument to __vmalloc is always PAGE_KERNEL now, so remove it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Michael Kelley <mikelley@microsoft.com> [hyperv]
Acked-by: Gao Xiang <xiang@kernel.org> [erofs]
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Wei Liu <wei.liu@kernel.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Airlie <airlied@linux.ie>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Haiyang Zhang <haiyangz@microsoft.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Sakari Ailus <sakari.ailus@linux.intel.com>
Cc: Stephen Hemminger <sthemmin@microsoft.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Link: http://lkml.kernel.org/r/20200414131348.444715-22-hch@lst.de
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Christoph Hellwig and committed by
Linus Torvalds
88dca4ca d28ff991

+47 -59
+1 -2
arch/x86/hyperv/hv_init.c
··· 97 97 * not be stopped in the case of CPU offlining and the VM will hang. 98 98 */ 99 99 if (!*hvp) { 100 - *hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO, 101 - PAGE_KERNEL); 100 + *hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO); 102 101 } 103 102 104 103 if (*hvp) {
+1 -2
arch/x86/include/asm/kvm_host.h
··· 1279 1279 #define __KVM_HAVE_ARCH_VM_ALLOC 1280 1280 static inline struct kvm *kvm_arch_alloc_vm(void) 1281 1281 { 1282 - return __vmalloc(kvm_x86_ops.vm_size, 1283 - GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL); 1282 + return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); 1284 1283 } 1285 1284 void kvm_arch_free_vm(struct kvm *kvm); 1286 1285
+1 -2
arch/x86/kvm/svm/sev.c
··· 336 336 /* Avoid using vmalloc for smaller buffers. */ 337 337 size = npages * sizeof(struct page *); 338 338 if (size > PAGE_SIZE) 339 - pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO, 340 - PAGE_KERNEL); 339 + pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); 341 340 else 342 341 pages = kmalloc(size, GFP_KERNEL_ACCOUNT); 343 342
+1 -3
drivers/block/drbd/drbd_bitmap.c
··· 396 396 bytes = sizeof(struct page *)*want; 397 397 new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN); 398 398 if (!new_pages) { 399 - new_pages = __vmalloc(bytes, 400 - GFP_NOIO | __GFP_ZERO, 401 - PAGE_KERNEL); 399 + new_pages = __vmalloc(bytes, GFP_NOIO | __GFP_ZERO); 402 400 if (!new_pages) 403 401 return NULL; 404 402 }
+2 -2
drivers/gpu/drm/etnaviv/etnaviv_dump.c
··· 154 154 file_size += sizeof(*iter.hdr) * n_obj; 155 155 156 156 /* Allocate the file in vmalloc memory, it's likely to be big */ 157 - iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, 158 - PAGE_KERNEL); 157 + iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | 158 + __GFP_NORETRY); 159 159 if (!iter.start) { 160 160 mutex_unlock(&gpu->mmu_context->lock); 161 161 dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
+2 -3
drivers/lightnvm/pblk-init.c
··· 145 145 int ret = 0; 146 146 147 147 map_size = pblk_trans_map_size(pblk); 148 - pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN 149 - | __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM, 150 - PAGE_KERNEL); 148 + pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN | 149 + __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM); 151 150 if (!pblk->trans_map) { 152 151 pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n", 153 152 map_size);
+2 -2
drivers/md/dm-bufio.c
··· 400 400 */ 401 401 if (gfp_mask & __GFP_NORETRY) { 402 402 unsigned noio_flag = memalloc_noio_save(); 403 - void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); 403 + void *ptr = __vmalloc(c->block_size, gfp_mask); 404 404 405 405 memalloc_noio_restore(noio_flag); 406 406 return ptr; 407 407 } 408 408 409 - return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); 409 + return __vmalloc(c->block_size, gfp_mask); 410 410 } 411 411 412 412 /*
+2 -2
drivers/mtd/ubi/io.c
··· 1297 1297 if (!ubi_dbg_chk_io(ubi)) 1298 1298 return 0; 1299 1299 1300 - buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL); 1300 + buf1 = __vmalloc(len, GFP_NOFS); 1301 1301 if (!buf1) { 1302 1302 ubi_err(ubi, "cannot allocate memory to check writes"); 1303 1303 return 0; ··· 1361 1361 if (!ubi_dbg_chk_io(ubi)) 1362 1362 return 0; 1363 1363 1364 - buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL); 1364 + buf = __vmalloc(len, GFP_NOFS); 1365 1365 if (!buf) { 1366 1366 ubi_err(ubi, "cannot allocate memory to check for 0xFFs"); 1367 1367 return 0;
+1 -2
drivers/scsi/sd_zbc.c
··· 136 136 137 137 while (bufsize >= SECTOR_SIZE) { 138 138 buf = __vmalloc(bufsize, 139 - GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY, 140 - PAGE_KERNEL); 139 + GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY); 141 140 if (buf) { 142 141 *buflen = bufsize; 143 142 return buf;
+4 -5
fs/gfs2/dir.c
··· 354 354 355 355 hc = kmalloc(hsize, GFP_NOFS | __GFP_NOWARN); 356 356 if (hc == NULL) 357 - hc = __vmalloc(hsize, GFP_NOFS, PAGE_KERNEL); 357 + hc = __vmalloc(hsize, GFP_NOFS); 358 358 359 359 if (hc == NULL) 360 360 return ERR_PTR(-ENOMEM); ··· 1166 1166 1167 1167 hc2 = kmalloc_array(hsize_bytes, 2, GFP_NOFS | __GFP_NOWARN); 1168 1168 if (hc2 == NULL) 1169 - hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS, PAGE_KERNEL); 1169 + hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS); 1170 1170 1171 1171 if (!hc2) 1172 1172 return -ENOMEM; ··· 1327 1327 if (size < KMALLOC_MAX_SIZE) 1328 1328 ptr = kmalloc(size, GFP_NOFS | __GFP_NOWARN); 1329 1329 if (!ptr) 1330 - ptr = __vmalloc(size, GFP_NOFS, PAGE_KERNEL); 1330 + ptr = __vmalloc(size, GFP_NOFS); 1331 1331 return ptr; 1332 1332 } 1333 1333 ··· 1987 1987 1988 1988 ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN); 1989 1989 if (ht == NULL) 1990 - ht = __vmalloc(size, GFP_NOFS | __GFP_NOWARN | __GFP_ZERO, 1991 - PAGE_KERNEL); 1990 + ht = __vmalloc(size, GFP_NOFS | __GFP_NOWARN | __GFP_ZERO); 1992 1991 if (!ht) 1993 1992 return -ENOMEM; 1994 1993
+1 -1
fs/gfs2/quota.c
··· 1365 1365 sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN); 1366 1366 if (sdp->sd_quota_bitmap == NULL) 1367 1367 sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS | 1368 - __GFP_ZERO, PAGE_KERNEL); 1368 + __GFP_ZERO); 1369 1369 if (!sdp->sd_quota_bitmap) 1370 1370 return error; 1371 1371
+1 -1
fs/nfs/blocklayout/extent_tree.c
··· 582 582 if (!arg->layoutupdate_pages) 583 583 return -ENOMEM; 584 584 585 - start_p = __vmalloc(buffer_size, GFP_NOFS, PAGE_KERNEL); 585 + start_p = __vmalloc(buffer_size, GFP_NOFS); 586 586 if (!start_p) { 587 587 kfree(arg->layoutupdate_pages); 588 588 return -ENOMEM;
+1 -1
fs/ntfs/malloc.h
··· 34 34 /* return (void *)__get_free_page(gfp_mask); */ 35 35 } 36 36 if (likely((size >> PAGE_SHIFT) < totalram_pages())) 37 - return __vmalloc(size, gfp_mask, PAGE_KERNEL); 37 + return __vmalloc(size, gfp_mask); 38 38 return NULL; 39 39 } 40 40
+1 -1
fs/ubifs/debug.c
··· 815 815 816 816 pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum); 817 817 818 - buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); 818 + buf = __vmalloc(c->leb_size, GFP_NOFS); 819 819 if (!buf) { 820 820 ubifs_err(c, "cannot allocate memory for dumping LEB %d", lnum); 821 821 return;
+1 -1
fs/ubifs/lprops.c
··· 1095 1095 return LPT_SCAN_CONTINUE; 1096 1096 } 1097 1097 1098 - buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); 1098 + buf = __vmalloc(c->leb_size, GFP_NOFS); 1099 1099 if (!buf) 1100 1100 return -ENOMEM; 1101 1101
+2 -2
fs/ubifs/lpt_commit.c
··· 1596 1596 if (!dbg_is_chk_lprops(c)) 1597 1597 return 0; 1598 1598 1599 - buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); 1599 + buf = p = __vmalloc(c->leb_size, GFP_NOFS); 1600 1600 if (!buf) { 1601 1601 ubifs_err(c, "cannot allocate memory for ltab checking"); 1602 1602 return 0; ··· 1845 1845 void *buf, *p; 1846 1846 1847 1847 pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum); 1848 - buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); 1848 + buf = p = __vmalloc(c->leb_size, GFP_NOFS); 1849 1849 if (!buf) { 1850 1850 ubifs_err(c, "cannot allocate memory to dump LPT"); 1851 1851 return;
+1 -1
fs/ubifs/orphan.c
··· 977 977 if (c->no_orphs) 978 978 return 0; 979 979 980 - buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); 980 + buf = __vmalloc(c->leb_size, GFP_NOFS); 981 981 if (!buf) { 982 982 ubifs_err(c, "cannot allocate memory to check orphans"); 983 983 return 0;
+1 -1
fs/xfs/kmem.c
··· 48 48 if (flags & KM_NOFS) 49 49 nofs_flag = memalloc_nofs_save(); 50 50 51 - ptr = __vmalloc(size, lflags, PAGE_KERNEL); 51 + ptr = __vmalloc(size, lflags); 52 52 53 53 if (flags & KM_NOFS) 54 54 memalloc_nofs_restore(nofs_flag);
+1 -1
include/linux/vmalloc.h
··· 110 110 extern void *vmalloc_exec(unsigned long size); 111 111 extern void *vmalloc_32(unsigned long size); 112 112 extern void *vmalloc_32_user(unsigned long size); 113 - extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 113 + extern void *__vmalloc(unsigned long size, gfp_t gfp_mask); 114 114 extern void *__vmalloc_node_range(unsigned long size, unsigned long align, 115 115 unsigned long start, unsigned long end, gfp_t gfp_mask, 116 116 pgprot_t prot, unsigned long vm_flags, int node,
+3 -3
kernel/bpf/core.c
··· 82 82 struct bpf_prog *fp; 83 83 84 84 size = round_up(size, PAGE_SIZE); 85 - fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 85 + fp = __vmalloc(size, gfp_flags); 86 86 if (fp == NULL) 87 87 return NULL; 88 88 ··· 232 232 if (ret) 233 233 return NULL; 234 234 235 - fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 235 + fp = __vmalloc(size, gfp_flags); 236 236 if (fp == NULL) { 237 237 __bpf_prog_uncharge(fp_old->aux->user, delta); 238 238 } else { ··· 1089 1089 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 1090 1090 struct bpf_prog *fp; 1091 1091 1092 - fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL); 1092 + fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags); 1093 1093 if (fp != NULL) { 1094 1094 /* aux->prog still points to the fp_other one, so 1095 1095 * when promoting the clone to the real program,
+1 -1
kernel/groups.c
··· 20 20 len = sizeof(struct group_info) + sizeof(kgid_t) * gidsetsize; 21 21 gi = kmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_NOWARN|__GFP_NORETRY); 22 22 if (!gi) 23 - gi = __vmalloc(len, GFP_KERNEL_ACCOUNT, PAGE_KERNEL); 23 + gi = __vmalloc(len, GFP_KERNEL_ACCOUNT); 24 24 if (!gi) 25 25 return NULL; 26 26
+1 -2
kernel/module.c
··· 2946 2946 return err; 2947 2947 2948 2948 /* Suck in entire file: we'll want most of it. */ 2949 - info->hdr = __vmalloc(info->len, 2950 - GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL); 2949 + info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN); 2951 2950 if (!info->hdr) 2952 2951 return -ENOMEM; 2953 2952
+7 -8
mm/nommu.c
··· 140 140 } 141 141 EXPORT_SYMBOL(vfree); 142 142 143 - void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 143 + void *__vmalloc(unsigned long size, gfp_t gfp_mask) 144 144 { 145 145 /* 146 146 * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() ··· 152 152 153 153 void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags) 154 154 { 155 - return __vmalloc(size, flags, PAGE_KERNEL); 155 + return __vmalloc(size, flags); 156 156 } 157 157 158 158 static void *__vmalloc_user_flags(unsigned long size, gfp_t flags) 159 159 { 160 160 void *ret; 161 161 162 - ret = __vmalloc(size, flags, PAGE_KERNEL); 162 + ret = __vmalloc(size, flags); 163 163 if (ret) { 164 164 struct vm_area_struct *vma; 165 165 ··· 230 230 */ 231 231 void *vmalloc(unsigned long size) 232 232 { 233 - return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); 233 + return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM); 234 234 } 235 235 EXPORT_SYMBOL(vmalloc); 236 236 ··· 248 248 */ 249 249 void *vzalloc(unsigned long size) 250 250 { 251 - return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 252 - PAGE_KERNEL); 251 + return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 253 252 } 254 253 EXPORT_SYMBOL(vzalloc); 255 254 ··· 301 302 302 303 void *vmalloc_exec(unsigned long size) 303 304 { 304 - return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); 305 + return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM); 305 306 } 306 307 307 308 /** ··· 313 314 */ 314 315 void *vmalloc_32(unsigned long size) 315 316 { 316 - return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); 317 + return __vmalloc(size, GFP_KERNEL); 317 318 } 318 319 EXPORT_SYMBOL(vmalloc_32); 319 320
+1 -1
mm/page_alloc.c
··· 8244 8244 table = memblock_alloc_raw(size, 8245 8245 SMP_CACHE_BYTES); 8246 8246 } else if (get_order(size) >= MAX_ORDER || hashdist) { 8247 - table = __vmalloc(size, gfp_flags, PAGE_KERNEL); 8247 + table = __vmalloc(size, gfp_flags); 8248 8248 virt = true; 8249 8249 } else { 8250 8250 /*
+1 -1
mm/percpu.c
··· 482 482 if (size <= PAGE_SIZE) 483 483 return kzalloc(size, gfp); 484 484 else 485 - return __vmalloc(size, gfp | __GFP_ZERO, PAGE_KERNEL); 485 + return __vmalloc(size, gfp | __GFP_ZERO); 486 486 } 487 487 488 488 /**
+2 -2
mm/vmalloc.c
··· 2564 2564 gfp_mask, prot, 0, node, caller); 2565 2565 } 2566 2566 2567 - void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 2567 + void *__vmalloc(unsigned long size, gfp_t gfp_mask) 2568 2568 { 2569 - return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE, 2569 + return __vmalloc_node(size, 1, gfp_mask, PAGE_KERNEL, NUMA_NO_NODE, 2570 2570 __builtin_return_address(0)); 2571 2571 } 2572 2572 EXPORT_SYMBOL(__vmalloc);
+2 -4
net/bridge/netfilter/ebtables.c
··· 1095 1095 tmp.name[sizeof(tmp.name) - 1] = 0; 1096 1096 1097 1097 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; 1098 - newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT, 1099 - PAGE_KERNEL); 1098 + newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT); 1100 1099 if (!newinfo) 1101 1100 return -ENOMEM; 1102 1101 1103 1102 if (countersize) 1104 1103 memset(newinfo->counters, 0, countersize); 1105 1104 1106 - newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT, 1107 - PAGE_KERNEL); 1105 + newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT); 1108 1106 if (!newinfo->entries) { 1109 1107 ret = -ENOMEM; 1110 1108 goto free_newinfo;
+1 -1
sound/core/memalloc.c
··· 143 143 break; 144 144 case SNDRV_DMA_TYPE_VMALLOC: 145 145 gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM); 146 - dmab->area = __vmalloc(size, gfp, PAGE_KERNEL); 146 + dmab->area = __vmalloc(size, gfp); 147 147 dmab->addr = 0; 148 148 break; 149 149 #ifdef CONFIG_HAS_DMA
+1 -1
sound/core/pcm_memory.c
··· 460 460 return 0; /* already large enough */ 461 461 vfree(runtime->dma_area); 462 462 } 463 - runtime->dma_area = __vmalloc(size, gfp_flags, PAGE_KERNEL); 463 + runtime->dma_area = __vmalloc(size, gfp_flags); 464 464 if (!runtime->dma_area) 465 465 return -ENOMEM; 466 466 runtime->dma_bytes = size;