Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

treewide: use kv[mz]alloc* rather than opencoded variants

There are many code paths opencoding kvmalloc. Let's use the helper
instead. The main difference to kvmalloc is that those users are
usually not considering all the aspects of the memory allocator. E.g.
allocation requests <= 32kB (with 4kB pages) are basically never failing
and invoke OOM killer to satisfy the allocation. This sounds too
disruptive for something that has a reasonable fallback - the vmalloc.
On the other hand those requests might fallback to vmalloc even when the
memory allocator would succeed after several more reclaim/compaction
attempts previously. There is no guarantee something like that happens
though.

This patch converts many of those places to kv[mz]alloc* helpers because
they are more conservative.

Link: http://lkml.kernel.org/r/20170306103327.2766-2-mhocko@kernel.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> # Xen bits
Acked-by: Kees Cook <keescook@chromium.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Andreas Dilger <andreas.dilger@intel.com> # Lustre
Acked-by: Christian Borntraeger <borntraeger@de.ibm.com> # KVM/s390
Acked-by: Dan Williams <dan.j.williams@intel.com> # nvdim
Acked-by: David Sterba <dsterba@suse.com> # btrfs
Acked-by: Ilya Dryomov <idryomov@gmail.com> # Ceph
Acked-by: Tariq Toukan <tariqt@mellanox.com> # mlx4
Acked-by: Leon Romanovsky <leonro@mellanox.com> # mlx5
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Anton Vorontsov <anton@enomsg.org>
Cc: Colin Cross <ccross@android.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: Kent Overstreet <kent.overstreet@gmail.com>
Cc: Santosh Raspatur <santosh@chelsio.com>
Cc: Hariprasad S <hariprasad@chelsio.com>
Cc: Yishai Hadas <yishaih@mellanox.com>
Cc: Oleg Drokin <oleg.drokin@intel.com>
Cc: "Yan, Zheng" <zyan@redhat.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: David Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Michal Hocko and committed by
Linus Torvalds
752ade68 81be3dee

+128 -350
+2 -8
arch/s390/kvm/kvm-s390.c
··· 1166 1166 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) 1167 1167 return -EINVAL; 1168 1168 1169 - keys = kmalloc_array(args->count, sizeof(uint8_t), 1170 - GFP_KERNEL | __GFP_NOWARN); 1171 - if (!keys) 1172 - keys = vmalloc(sizeof(uint8_t) * args->count); 1169 + keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL); 1173 1170 if (!keys) 1174 1171 return -ENOMEM; 1175 1172 ··· 1208 1211 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) 1209 1212 return -EINVAL; 1210 1213 1211 - keys = kmalloc_array(args->count, sizeof(uint8_t), 1212 - GFP_KERNEL | __GFP_NOWARN); 1213 - if (!keys) 1214 - keys = vmalloc(sizeof(uint8_t) * args->count); 1214 + keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL); 1215 1215 if (!keys) 1216 1216 return -ENOMEM; 1217 1217
+1 -3
crypto/lzo.c
··· 32 32 { 33 33 void *ctx; 34 34 35 - ctx = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN); 36 - if (!ctx) 37 - ctx = vmalloc(LZO1X_MEM_COMPRESS); 35 + ctx = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); 38 36 if (!ctx) 39 37 return ERR_PTR(-ENOMEM); 40 38
+2 -6
drivers/acpi/apei/erst.c
··· 513 513 if (i < erst_record_id_cache.len) 514 514 goto retry; 515 515 if (erst_record_id_cache.len >= erst_record_id_cache.size) { 516 - int new_size, alloc_size; 516 + int new_size; 517 517 u64 *new_entries; 518 518 519 519 new_size = erst_record_id_cache.size * 2; ··· 524 524 pr_warn(FW_WARN "too many record IDs!\n"); 525 525 return 0; 526 526 } 527 - alloc_size = new_size * sizeof(entries[0]); 528 - if (alloc_size < PAGE_SIZE) 529 - new_entries = kmalloc(alloc_size, GFP_KERNEL); 530 - else 531 - new_entries = vmalloc(alloc_size); 527 + new_entries = kvmalloc(new_size * sizeof(entries[0]), GFP_KERNEL); 532 528 if (!new_entries) 533 529 return -ENOMEM; 534 530 memcpy(new_entries, entries,
+1 -7
drivers/char/agp/generic.c
··· 88 88 89 89 void agp_alloc_page_array(size_t size, struct agp_memory *mem) 90 90 { 91 - mem->pages = NULL; 92 - 93 - if (size <= 2*PAGE_SIZE) 94 - mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); 95 - if (mem->pages == NULL) { 96 - mem->pages = vmalloc(size); 97 - } 91 + mem->pages = kvmalloc(size, GFP_KERNEL); 98 92 } 99 93 EXPORT_SYMBOL(agp_alloc_page_array); 100 94
+1 -3
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 568 568 569 569 size *= nmemb; 570 570 571 - mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); 572 - if (!mem) 573 - mem = vmalloc(size); 571 + mem = kvmalloc(size, GFP_KERNEL); 574 572 if (!mem) 575 573 return ERR_PTR(-ENOMEM); 576 574
+2 -10
drivers/md/bcache/util.h
··· 43 43 (heap)->used = 0; \ 44 44 (heap)->size = (_size); \ 45 45 _bytes = (heap)->size * sizeof(*(heap)->data); \ 46 - (heap)->data = NULL; \ 47 - if (_bytes < KMALLOC_MAX_SIZE) \ 48 - (heap)->data = kmalloc(_bytes, (gfp)); \ 49 - if ((!(heap)->data) && ((gfp) & GFP_KERNEL)) \ 50 - (heap)->data = vmalloc(_bytes); \ 46 + (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \ 51 47 (heap)->data; \ 52 48 }) 53 49 ··· 132 136 \ 133 137 (fifo)->mask = _allocated_size - 1; \ 134 138 (fifo)->front = (fifo)->back = 0; \ 135 - (fifo)->data = NULL; \ 136 139 \ 137 - if (_bytes < KMALLOC_MAX_SIZE) \ 138 - (fifo)->data = kmalloc(_bytes, (gfp)); \ 139 - if ((!(fifo)->data) && ((gfp) & GFP_KERNEL)) \ 140 - (fifo)->data = vmalloc(_bytes); \ 140 + (fifo)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \ 141 141 (fifo)->data; \ 142 142 }) 143 143
-3
drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
··· 41 41 42 42 #define VALIDATE_TID 1 43 43 44 - void *cxgb_alloc_mem(unsigned long size); 45 - void cxgb_free_mem(void *addr); 46 - 47 44 /* 48 45 * Map an ATID or STID to their entries in the corresponding TID tables. 49 46 */
+4 -25
drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
··· 1152 1152 } 1153 1153 1154 1154 /* 1155 - * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. 1156 - * The allocated memory is cleared. 1157 - */ 1158 - void *cxgb_alloc_mem(unsigned long size) 1159 - { 1160 - void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 1161 - 1162 - if (!p) 1163 - p = vzalloc(size); 1164 - return p; 1165 - } 1166 - 1167 - /* 1168 - * Free memory allocated through t3_alloc_mem(). 1169 - */ 1170 - void cxgb_free_mem(void *addr) 1171 - { 1172 - kvfree(addr); 1173 - } 1174 - 1175 - /* 1176 1155 * Allocate and initialize the TID tables. Returns 0 on success. 1177 1156 */ 1178 1157 static int init_tid_tabs(struct tid_info *t, unsigned int ntids, ··· 1161 1182 unsigned long size = ntids * sizeof(*t->tid_tab) + 1162 1183 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab); 1163 1184 1164 - t->tid_tab = cxgb_alloc_mem(size); 1185 + t->tid_tab = kvzalloc(size, GFP_KERNEL); 1165 1186 if (!t->tid_tab) 1166 1187 return -ENOMEM; 1167 1188 ··· 1197 1218 1198 1219 static void free_tid_maps(struct tid_info *t) 1199 1220 { 1200 - cxgb_free_mem(t->tid_tab); 1221 + kvfree(t->tid_tab); 1201 1222 } 1202 1223 1203 1224 static inline void add_adapter(struct adapter *adap) ··· 1272 1293 return 0; 1273 1294 1274 1295 out_free_l2t: 1275 - t3_free_l2t(l2td); 1296 + kvfree(l2td); 1276 1297 out_free: 1277 1298 kfree(t); 1278 1299 return err; ··· 1281 1302 static void clean_l2_data(struct rcu_head *head) 1282 1303 { 1283 1304 struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); 1284 - t3_free_l2t(d); 1305 + kvfree(d); 1285 1306 } 1286 1307 1287 1308
+1 -7
drivers/net/ethernet/chelsio/cxgb3/l2t.c
··· 444 444 struct l2t_data *d; 445 445 int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry); 446 446 447 - d = cxgb_alloc_mem(size); 447 + d = kvzalloc(size, GFP_KERNEL); 448 448 if (!d) 449 449 return NULL; 450 450 ··· 462 462 } 463 463 return d; 464 464 } 465 - 466 - void t3_free_l2t(struct l2t_data *d) 467 - { 468 - cxgb_free_mem(d); 469 - } 470 -
-1
drivers/net/ethernet/chelsio/cxgb3/l2t.h
··· 115 115 struct l2t_entry *e); 116 116 void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e); 117 117 struct l2t_data *t3_init_l2t(unsigned int l2t_capacity); 118 - void t3_free_l2t(struct l2t_data *d); 119 118 120 119 int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb); 121 120
+6 -6
drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
··· 290 290 if (clipt_size < CLIPT_MIN_HASH_BUCKETS) 291 291 return NULL; 292 292 293 - ctbl = t4_alloc_mem(sizeof(*ctbl) + 294 - clipt_size*sizeof(struct list_head)); 293 + ctbl = kvzalloc(sizeof(*ctbl) + 294 + clipt_size*sizeof(struct list_head), GFP_KERNEL); 295 295 if (!ctbl) 296 296 return NULL; 297 297 ··· 305 305 for (i = 0; i < ctbl->clipt_size; ++i) 306 306 INIT_LIST_HEAD(&ctbl->hash_list[i]); 307 307 308 - cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry)); 308 + cl_list = kvzalloc(clipt_size*sizeof(struct clip_entry), GFP_KERNEL); 309 309 if (!cl_list) { 310 - t4_free_mem(ctbl); 310 + kvfree(ctbl); 311 311 return NULL; 312 312 } 313 313 ctbl->cl_list = (void *)cl_list; ··· 326 326 327 327 if (ctbl) { 328 328 if (ctbl->cl_list) 329 - t4_free_mem(ctbl->cl_list); 330 - t4_free_mem(ctbl); 329 + kvfree(ctbl->cl_list); 330 + kvfree(ctbl); 331 331 } 332 332 } 333 333 EXPORT_SYMBOL(t4_cleanup_clip_tbl);
-3
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
··· 1184 1184 void t4_os_portmod_changed(const struct adapter *adap, int port_id); 1185 1185 void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); 1186 1186 1187 - void *t4_alloc_mem(size_t size); 1188 - 1189 1187 void t4_free_sge_resources(struct adapter *adap); 1190 1188 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); 1191 1189 irq_handler_t t4_intr_handler(struct adapter *adap); ··· 1555 1557 int rateunit, int ratemode, int channel, int class, 1556 1558 int minrate, int maxrate, int weight, int pktsize); 1557 1559 void t4_sge_decode_idma_state(struct adapter *adapter, int state); 1558 - void t4_free_mem(void *addr); 1559 1560 void t4_idma_monitor_init(struct adapter *adapter, 1560 1561 struct sge_idma_monitor_state *idma); 1561 1562 void t4_idma_monitor(struct adapter *adapter,
+5 -5
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
··· 2634 2634 if (count > avail - pos) 2635 2635 count = avail - pos; 2636 2636 2637 - data = t4_alloc_mem(count); 2637 + data = kvzalloc(count, GFP_KERNEL); 2638 2638 if (!data) 2639 2639 return -ENOMEM; 2640 2640 ··· 2642 2642 ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ); 2643 2643 spin_unlock(&adap->win0_lock); 2644 2644 if (ret) { 2645 - t4_free_mem(data); 2645 + kvfree(data); 2646 2646 return ret; 2647 2647 } 2648 2648 ret = copy_to_user(buf, data, count); 2649 2649 2650 - t4_free_mem(data); 2650 + kvfree(data); 2651 2651 if (ret) 2652 2652 return -EFAULT; 2653 2653 ··· 2753 2753 adap->sge.egr_sz, adap->sge.blocked_fl); 2754 2754 len += sprintf(buf + len, "\n"); 2755 2755 size = simple_read_from_buffer(ubuf, count, ppos, buf, len); 2756 - t4_free_mem(buf); 2756 + kvfree(buf); 2757 2757 return size; 2758 2758 } 2759 2759 ··· 2773 2773 return err; 2774 2774 2775 2775 bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); 2776 - t4_free_mem(t); 2776 + kvfree(t); 2777 2777 return count; 2778 2778 } 2779 2779
+4 -4
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
··· 969 969 { 970 970 int i, err = 0; 971 971 struct adapter *adapter = netdev2adap(dev); 972 - u8 *buf = t4_alloc_mem(EEPROMSIZE); 972 + u8 *buf = kvzalloc(EEPROMSIZE, GFP_KERNEL); 973 973 974 974 if (!buf) 975 975 return -ENOMEM; ··· 980 980 981 981 if (!err) 982 982 memcpy(data, buf + e->offset, e->len); 983 - t4_free_mem(buf); 983 + kvfree(buf); 984 984 return err; 985 985 } 986 986 ··· 1009 1009 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { 1010 1010 /* RMW possibly needed for first or last words. 1011 1011 */ 1012 - buf = t4_alloc_mem(aligned_len); 1012 + buf = kvzalloc(aligned_len, GFP_KERNEL); 1013 1013 if (!buf) 1014 1014 return -ENOMEM; 1015 1015 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); ··· 1037 1037 err = t4_seeprom_wp(adapter, true); 1038 1038 out: 1039 1039 if (buf != data) 1040 - t4_free_mem(buf); 1040 + kvfree(buf); 1041 1041 return err; 1042 1042 } 1043 1043
+5 -26
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 880 880 return err; 881 881 } 882 882 883 - /* 884 - * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. 885 - * The allocated memory is cleared. 886 - */ 887 - void *t4_alloc_mem(size_t size) 888 - { 889 - void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 890 - 891 - if (!p) 892 - p = vzalloc(size); 893 - return p; 894 - } 895 - 896 - /* 897 - * Free memory allocated through alloc_mem(). 898 - */ 899 - void t4_free_mem(void *addr) 900 - { 901 - kvfree(addr); 902 - } 903 - 904 883 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, 905 884 void *accel_priv, select_queue_fallback_t fallback) 906 885 { ··· 1278 1299 max_ftids * sizeof(*t->ftid_tab) + 1279 1300 ftid_bmap_size * sizeof(long); 1280 1301 1281 - t->tid_tab = t4_alloc_mem(size); 1302 + t->tid_tab = kvzalloc(size, GFP_KERNEL); 1282 1303 if (!t->tid_tab) 1283 1304 return -ENOMEM; 1284 1305 ··· 3424 3445 /* allocate memory to read the header of the firmware on the 3425 3446 * card 3426 3447 */ 3427 - card_fw = t4_alloc_mem(sizeof(*card_fw)); 3448 + card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL); 3428 3449 3429 3450 /* Get FW from from /lib/firmware/ */ 3430 3451 ret = request_firmware(&fw, fw_info->fw_mod_name, ··· 3444 3465 3445 3466 /* Cleaning up */ 3446 3467 release_firmware(fw); 3447 - t4_free_mem(card_fw); 3468 + kvfree(card_fw); 3448 3469 3449 3470 if (ret < 0) 3450 3471 goto bye; ··· 4449 4470 { 4450 4471 unsigned int i; 4451 4472 4452 - t4_free_mem(adapter->l2t); 4473 + kvfree(adapter->l2t); 4453 4474 t4_cleanup_sched(adapter); 4454 - t4_free_mem(adapter->tids.tid_tab); 4475 + kvfree(adapter->tids.tid_tab); 4455 4476 cxgb4_cleanup_tc_u32(adapter); 4456 4477 kfree(adapter->sge.egr_map); 4457 4478 kfree(adapter->sge.ingr_map);
+7 -7
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
··· 432 432 for (i = 0; i < t->size; i++) { 433 433 struct cxgb4_link *link = &t->table[i]; 434 434 435 - t4_free_mem(link->tid_map); 435 + kvfree(link->tid_map); 436 436 } 437 - t4_free_mem(adap->tc_u32); 437 + kvfree(adap->tc_u32); 438 438 } 439 439 440 440 struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap) ··· 446 446 if (!max_tids) 447 447 return NULL; 448 448 449 - t = t4_alloc_mem(sizeof(*t) + 450 - (max_tids * sizeof(struct cxgb4_link))); 449 + t = kvzalloc(sizeof(*t) + 450 + (max_tids * sizeof(struct cxgb4_link)), GFP_KERNEL); 451 451 if (!t) 452 452 return NULL; 453 453 ··· 458 458 unsigned int bmap_size; 459 459 460 460 bmap_size = BITS_TO_LONGS(max_tids); 461 - link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size); 461 + link->tid_map = kvzalloc(sizeof(unsigned long) * bmap_size, GFP_KERNEL); 462 462 if (!link->tid_map) 463 463 goto out_no_mem; 464 464 bitmap_zero(link->tid_map, max_tids); ··· 471 471 struct cxgb4_link *link = &t->table[i]; 472 472 473 473 if (link->tid_map) 474 - t4_free_mem(link->tid_map); 474 + kvfree(link->tid_map); 475 475 } 476 476 477 477 if (t) 478 - t4_free_mem(t); 478 + kvfree(t); 479 479 480 480 return NULL; 481 481 }
+1 -1
drivers/net/ethernet/chelsio/cxgb4/l2t.c
··· 646 646 if (l2t_size < L2T_MIN_HASH_BUCKETS) 647 647 return NULL; 648 648 649 - d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry)); 649 + d = kvzalloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry), GFP_KERNEL); 650 650 if (!d) 651 651 return NULL; 652 652
+6 -6
drivers/net/ethernet/chelsio/cxgb4/sched.c
··· 177 177 } 178 178 179 179 list_del(&qe->list); 180 - t4_free_mem(qe); 180 + kvfree(qe); 181 181 if (atomic_dec_and_test(&e->refcnt)) { 182 182 e->state = SCHED_STATE_UNUSED; 183 183 memset(&e->info, 0, sizeof(e->info)); ··· 201 201 if (p->queue < 0 || p->queue >= pi->nqsets) 202 202 return -ERANGE; 203 203 204 - qe = t4_alloc_mem(sizeof(struct sched_queue_entry)); 204 + qe = kvzalloc(sizeof(struct sched_queue_entry), GFP_KERNEL); 205 205 if (!qe) 206 206 return -ENOMEM; 207 207 ··· 211 211 /* Unbind queue from any existing class */ 212 212 err = t4_sched_queue_unbind(pi, p); 213 213 if (err) { 214 - t4_free_mem(qe); 214 + kvfree(qe); 215 215 goto out; 216 216 } 217 217 ··· 224 224 spin_lock(&e->lock); 225 225 err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true); 226 226 if (err) { 227 - t4_free_mem(qe); 227 + kvfree(qe); 228 228 spin_unlock(&e->lock); 229 229 goto out; 230 230 } ··· 512 512 struct sched_table *s; 513 513 unsigned int i; 514 514 515 - s = t4_alloc_mem(sizeof(*s) + sched_size * sizeof(struct sched_class)); 515 + s = kvzalloc(sizeof(*s) + sched_size * sizeof(struct sched_class), GFP_KERNEL); 516 516 if (!s) 517 517 return NULL; 518 518 ··· 548 548 t4_sched_class_free(pi, e); 549 549 write_unlock(&s->rw_lock); 550 550 } 551 - t4_free_mem(s); 551 + kvfree(s); 552 552 } 553 553 }
+3 -6
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 70 70 ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; 71 71 72 72 tmp = size * sizeof(struct mlx4_en_tx_info); 73 - ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node); 73 + ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node); 74 74 if (!ring->tx_info) { 75 - ring->tx_info = vmalloc(tmp); 76 - if (!ring->tx_info) { 77 - err = -ENOMEM; 78 - goto err_ring; 79 - } 75 + err = -ENOMEM; 76 + goto err_ring; 80 77 } 81 78 82 79 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
+3 -6
drivers/net/ethernet/mellanox/mlx4/mr.c
··· 115 115 116 116 for (i = 0; i <= buddy->max_order; ++i) { 117 117 s = BITS_TO_LONGS(1 << (buddy->max_order - i)); 118 - buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN); 119 - if (!buddy->bits[i]) { 120 - buddy->bits[i] = vzalloc(s * sizeof(long)); 121 - if (!buddy->bits[i]) 122 - goto err_out_free; 123 - } 118 + buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO); 119 + if (!buddy->bits[i]) 120 + goto err_out_free; 124 121 } 125 122 126 123 set_bit(0, buddy->bits[buddy->max_order]);
+1 -4
drivers/nvdimm/dimm_devs.c
··· 106 106 return -ENXIO; 107 107 } 108 108 109 - ndd->data = kmalloc(ndd->nsarea.config_size, GFP_KERNEL); 110 - if (!ndd->data) 111 - ndd->data = vmalloc(ndd->nsarea.config_size); 112 - 109 + ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL); 113 110 if (!ndd->data) 114 111 return -ENOMEM; 115 112
+1 -10
drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
··· 45 45 void *libcfs_kvzalloc_cpt(struct cfs_cpt_table *cptab, int cpt, size_t size, 46 46 gfp_t flags) 47 47 { 48 - void *ret; 49 - 50 - ret = kzalloc_node(size, flags | __GFP_NOWARN, 51 - cfs_cpt_spread_node(cptab, cpt)); 52 - if (!ret) { 53 - WARN_ON(!(flags & (__GFP_FS | __GFP_HIGH))); 54 - ret = vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); 55 - } 56 - 57 - return ret; 48 + return kvzalloc_node(size, flags, cfs_cpt_spread_node(cptab, cpt)); 58 49 } 59 50 EXPORT_SYMBOL(libcfs_kvzalloc_cpt);
+1 -13
drivers/xen/evtchn.c
··· 87 87 bool enabled; 88 88 }; 89 89 90 - static evtchn_port_t *evtchn_alloc_ring(unsigned int size) 91 - { 92 - evtchn_port_t *ring; 93 - size_t s = size * sizeof(*ring); 94 - 95 - ring = kmalloc(s, GFP_KERNEL); 96 - if (!ring) 97 - ring = vmalloc(s); 98 - 99 - return ring; 100 - } 101 - 102 90 static void evtchn_free_ring(evtchn_port_t *ring) 103 91 { 104 92 kvfree(ring); ··· 322 334 else 323 335 new_size = 2 * u->ring_size; 324 336 325 - new_ring = evtchn_alloc_ring(new_size); 337 + new_ring = kvmalloc(new_size * sizeof(*new_ring), GFP_KERNEL); 326 338 if (!new_ring) 327 339 return -ENOMEM; 328 340
+3 -6
fs/btrfs/ctree.c
··· 5392 5392 goto out; 5393 5393 } 5394 5394 5395 - tmp_buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN); 5395 + tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); 5396 5396 if (!tmp_buf) { 5397 - tmp_buf = vmalloc(fs_info->nodesize); 5398 - if (!tmp_buf) { 5399 - ret = -ENOMEM; 5400 - goto out; 5401 - } 5397 + ret = -ENOMEM; 5398 + goto out; 5402 5399 } 5403 5400 5404 5401 left_path->search_commit_root = 1;
+3 -6
fs/btrfs/ioctl.c
··· 3539 3539 u64 last_dest_end = destoff; 3540 3540 3541 3541 ret = -ENOMEM; 3542 - buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN); 3543 - if (!buf) { 3544 - buf = vmalloc(fs_info->nodesize); 3545 - if (!buf) 3546 - return ret; 3547 - } 3542 + buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); 3543 + if (!buf) 3544 + return ret; 3548 3545 3549 3546 path = btrfs_alloc_path(); 3550 3547 if (!path) {
+9 -18
fs/btrfs/send.c
··· 6360 6360 sctx->clone_roots_cnt = arg->clone_sources_count; 6361 6361 6362 6362 sctx->send_max_size = BTRFS_SEND_BUF_SIZE; 6363 - sctx->send_buf = kmalloc(sctx->send_max_size, GFP_KERNEL | __GFP_NOWARN); 6363 + sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL); 6364 6364 if (!sctx->send_buf) { 6365 - sctx->send_buf = vmalloc(sctx->send_max_size); 6366 - if (!sctx->send_buf) { 6367 - ret = -ENOMEM; 6368 - goto out; 6369 - } 6365 + ret = -ENOMEM; 6366 + goto out; 6370 6367 } 6371 6368 6372 - sctx->read_buf = kmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL | __GFP_NOWARN); 6369 + sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL); 6373 6370 if (!sctx->read_buf) { 6374 - sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE); 6375 - if (!sctx->read_buf) { 6376 - ret = -ENOMEM; 6377 - goto out; 6378 - } 6371 + ret = -ENOMEM; 6372 + goto out; 6379 6373 } 6380 6374 6381 6375 sctx->pending_dir_moves = RB_ROOT; ··· 6390 6396 alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources); 6391 6397 6392 6398 if (arg->clone_sources_count) { 6393 - clone_sources_tmp = kmalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN); 6399 + clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL); 6394 6400 if (!clone_sources_tmp) { 6395 - clone_sources_tmp = vmalloc(alloc_size); 6396 - if (!clone_sources_tmp) { 6397 - ret = -ENOMEM; 6398 - goto out; 6399 - } 6401 + ret = -ENOMEM; 6402 + goto out; 6400 6403 } 6401 6404 6402 6405 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
+3 -6
fs/ceph/file.c
··· 74 74 align = (unsigned long)(it->iov->iov_base + it->iov_offset) & 75 75 (PAGE_SIZE - 1); 76 76 npages = calc_pages_for(align, nbytes); 77 - pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL); 78 - if (!pages) { 79 - pages = vmalloc(sizeof(*pages) * npages); 80 - if (!pages) 81 - return ERR_PTR(-ENOMEM); 82 - } 77 + pages = kvmalloc(sizeof(*pages) * npages, GFP_KERNEL); 78 + if (!pages) 79 + return ERR_PTR(-ENOMEM); 83 80 84 81 for (idx = 0; idx < npages; ) { 85 82 size_t start;
+1 -4
fs/select.c
··· 633 633 goto out_nofds; 634 634 635 635 alloc_size = 6 * size; 636 - bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN); 637 - if (!bits && alloc_size > PAGE_SIZE) 638 - bits = vmalloc(alloc_size); 639 - 636 + bits = kvmalloc(alloc_size, GFP_KERNEL); 640 637 if (!bits) 641 638 goto out_nofds; 642 639 }
+9 -18
fs/xattr.c
··· 431 431 if (size) { 432 432 if (size > XATTR_SIZE_MAX) 433 433 return -E2BIG; 434 - kvalue = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); 435 - if (!kvalue) { 436 - kvalue = vmalloc(size); 437 - if (!kvalue) 438 - return -ENOMEM; 439 - } 434 + kvalue = kvmalloc(size, GFP_KERNEL); 435 + if (!kvalue) 436 + return -ENOMEM; 440 437 if (copy_from_user(kvalue, value, size)) { 441 438 error = -EFAULT; 442 439 goto out; ··· 525 528 if (size) { 526 529 if (size > XATTR_SIZE_MAX) 527 530 size = XATTR_SIZE_MAX; 528 - kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 529 - if (!kvalue) { 530 - kvalue = vzalloc(size); 531 - if (!kvalue) 532 - return -ENOMEM; 533 - } 531 + kvalue = kvzalloc(size, GFP_KERNEL); 532 + if (!kvalue) 533 + return -ENOMEM; 534 534 } 535 535 536 536 error = vfs_getxattr(d, kname, kvalue, size); ··· 605 611 if (size) { 606 612 if (size > XATTR_LIST_MAX) 607 613 size = XATTR_LIST_MAX; 608 - klist = kmalloc(size, __GFP_NOWARN | GFP_KERNEL); 609 - if (!klist) { 610 - klist = vmalloc(size); 611 - if (!klist) 612 - return -ENOMEM; 613 - } 614 + klist = kvmalloc(size, GFP_KERNEL); 615 + if (!klist) 616 + return -ENOMEM; 614 617 } 615 618 616 619 error = vfs_listxattr(d, klist, size);
+1 -6
include/linux/mlx5/driver.h
··· 892 892 893 893 static inline void *mlx5_vzalloc(unsigned long size) 894 894 { 895 - void *rtn; 896 - 897 - rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 898 - if (!rtn) 899 - rtn = vzalloc(size); 900 - return rtn; 895 + return kvzalloc(size, GFP_KERNEL); 901 896 } 902 897 903 898 static inline u32 mlx5_base_mkey(const u32 key)
+8
include/linux/mm.h
··· 532 532 return kvmalloc(size, flags | __GFP_ZERO); 533 533 } 534 534 535 + static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) 536 + { 537 + if (size != 0 && n > SIZE_MAX / size) 538 + return NULL; 539 + 540 + return kvmalloc(n * size, flags); 541 + } 542 + 535 543 extern void kvfree(const void *addr); 536 544 537 545 static inline atomic_t *compound_mapcount_ptr(struct page *page)
+1 -4
lib/iov_iter.c
··· 1028 1028 1029 1029 static struct page **get_pages_array(size_t n) 1030 1030 { 1031 - struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL); 1032 - if (!p) 1033 - p = vmalloc(n * sizeof(struct page *)); 1034 - return p; 1031 + return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); 1035 1032 } 1036 1033 1037 1034 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
+1 -4
mm/frame_vector.c
··· 200 200 * Avoid higher order allocations, use vmalloc instead. It should 201 201 * be rare anyway. 202 202 */ 203 - if (size <= PAGE_SIZE) 204 - vec = kmalloc(size, GFP_KERNEL); 205 - else 206 - vec = vmalloc(size); 203 + vec = kvmalloc(size, GFP_KERNEL); 207 204 if (!vec) 208 205 return NULL; 209 206 vec->nr_allocated = nr_frames;
+1 -5
net/ipv4/inet_hashtables.c
··· 678 678 /* no more locks than number of hash buckets */ 679 679 nblocks = min(nblocks, hashinfo->ehash_mask + 1); 680 680 681 - hashinfo->ehash_locks = kmalloc_array(nblocks, locksz, 682 - GFP_KERNEL | __GFP_NOWARN); 683 - if (!hashinfo->ehash_locks) 684 - hashinfo->ehash_locks = vmalloc(nblocks * locksz); 685 - 681 + hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL); 686 682 if (!hashinfo->ehash_locks) 687 683 return -ENOMEM; 688 684
+1 -4
net/ipv4/tcp_metrics.c
··· 1011 1011 tcp_metrics_hash_log = order_base_2(slots); 1012 1012 size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log; 1013 1013 1014 - tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 1015 - if (!tcp_metrics_hash) 1016 - tcp_metrics_hash = vzalloc(size); 1017 - 1014 + tcp_metrics_hash = kvzalloc(size, GFP_KERNEL); 1018 1015 if (!tcp_metrics_hash) 1019 1016 return -ENOMEM; 1020 1017
+1 -4
net/mpls/af_mpls.c
··· 2005 2005 unsigned index; 2006 2006 2007 2007 if (size) { 2008 - labels = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); 2009 - if (!labels) 2010 - labels = vzalloc(size); 2011 - 2008 + labels = kvzalloc(size, GFP_KERNEL); 2012 2009 if (!labels) 2013 2010 goto nolabels; 2014 2011 }
+4 -17
net/netfilter/x_tables.c
··· 763 763 */ 764 764 unsigned int *xt_alloc_entry_offsets(unsigned int size) 765 765 { 766 - unsigned int *off; 766 + return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO); 767 767 768 - off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN); 769 - 770 - if (off) 771 - return off; 772 - 773 - if (size < (SIZE_MAX / sizeof(unsigned int))) 774 - off = vmalloc(size * sizeof(unsigned int)); 775 - 776 - return off; 777 768 } 778 769 EXPORT_SYMBOL(xt_alloc_entry_offsets); 779 770 ··· 1107 1116 1108 1117 size = sizeof(void **) * nr_cpu_ids; 1109 1118 if (size > PAGE_SIZE) 1110 - i->jumpstack = vzalloc(size); 1119 + i->jumpstack = kvzalloc(size, GFP_KERNEL); 1111 1120 else 1112 1121 i->jumpstack = kzalloc(size, GFP_KERNEL); 1113 1122 if (i->jumpstack == NULL) ··· 1129 1138 */ 1130 1139 size = sizeof(void *) * i->stacksize * 2u; 1131 1140 for_each_possible_cpu(cpu) { 1132 - if (size > PAGE_SIZE) 1133 - i->jumpstack[cpu] = vmalloc_node(size, 1134 - cpu_to_node(cpu)); 1135 - else 1136 - i->jumpstack[cpu] = kmalloc_node(size, 1137 - GFP_KERNEL, cpu_to_node(cpu)); 1141 + i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL, 1142 + cpu_to_node(cpu)); 1138 1143 if (i->jumpstack[cpu] == NULL) 1139 1144 /* 1140 1145 * Freeing will be done later on by the callers. The
+1 -4
net/netfilter/xt_recent.c
··· 388 388 } 389 389 390 390 sz = sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size; 391 - if (sz <= PAGE_SIZE) 392 - t = kzalloc(sz, GFP_KERNEL); 393 - else 394 - t = vzalloc(sz); 391 + t = kvzalloc(sz, GFP_KERNEL); 395 392 if (t == NULL) { 396 393 ret = -ENOMEM; 397 394 goto out;
+1 -4
net/sched/sch_choke.c
··· 376 376 if (mask != q->tab_mask) { 377 377 struct sk_buff **ntab; 378 378 379 - ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), 380 - GFP_KERNEL | __GFP_NOWARN); 381 - if (!ntab) 382 - ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *)); 379 + ntab = kvmalloc_array((mask + 1), sizeof(struct sk_buff *), GFP_KERNEL | __GFP_ZERO); 383 380 if (!ntab) 384 381 return -ENOMEM; 385 382
+6 -20
net/sched/sch_fq_codel.c
··· 446 446 return 0; 447 447 } 448 448 449 - static void *fq_codel_zalloc(size_t sz) 450 - { 451 - void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); 452 - 453 - if (!ptr) 454 - ptr = vzalloc(sz); 455 - return ptr; 456 - } 457 - 458 - static void fq_codel_free(void *addr) 459 - { 460 - kvfree(addr); 461 - } 462 - 463 449 static void fq_codel_destroy(struct Qdisc *sch) 464 450 { 465 451 struct fq_codel_sched_data *q = qdisc_priv(sch); 466 452 467 453 tcf_destroy_chain(&q->filter_list); 468 - fq_codel_free(q->backlogs); 469 - fq_codel_free(q->flows); 454 + kvfree(q->backlogs); 455 + kvfree(q->flows); 470 456 } 471 457 472 458 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) ··· 479 493 } 480 494 481 495 if (!q->flows) { 482 - q->flows = fq_codel_zalloc(q->flows_cnt * 483 - sizeof(struct fq_codel_flow)); 496 + q->flows = kvzalloc(q->flows_cnt * 497 + sizeof(struct fq_codel_flow), GFP_KERNEL); 484 498 if (!q->flows) 485 499 return -ENOMEM; 486 - q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32)); 500 + q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL); 487 501 if (!q->backlogs) { 488 - fq_codel_free(q->flows); 502 + kvfree(q->flows); 489 503 return -ENOMEM; 490 504 } 491 505 for (i = 0; i < q->flows_cnt; i++) {
+9 -24
net/sched/sch_hhf.c
··· 467 467 rtnl_kfree_skbs(skb, skb); 468 468 } 469 469 470 - static void *hhf_zalloc(size_t sz) 471 - { 472 - void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); 473 - 474 - if (!ptr) 475 - ptr = vzalloc(sz); 476 - 477 - return ptr; 478 - } 479 - 480 - static void hhf_free(void *addr) 481 - { 482 - kvfree(addr); 483 - } 484 - 485 470 static void hhf_destroy(struct Qdisc *sch) 486 471 { 487 472 int i; 488 473 struct hhf_sched_data *q = qdisc_priv(sch); 489 474 490 475 for (i = 0; i < HHF_ARRAYS_CNT; i++) { 491 - hhf_free(q->hhf_arrays[i]); 492 - hhf_free(q->hhf_valid_bits[i]); 476 + kvfree(q->hhf_arrays[i]); 477 + kvfree(q->hhf_valid_bits[i]); 493 478 } 494 479 495 480 for (i = 0; i < HH_FLOWS_CNT; i++) { ··· 488 503 kfree(flow); 489 504 } 490 505 } 491 - hhf_free(q->hh_flows); 506 + kvfree(q->hh_flows); 492 507 } 493 508 494 509 static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = { ··· 594 609 595 610 if (!q->hh_flows) { 596 611 /* Initialize heavy-hitter flow table. */ 597 - q->hh_flows = hhf_zalloc(HH_FLOWS_CNT * 598 - sizeof(struct list_head)); 612 + q->hh_flows = kvzalloc(HH_FLOWS_CNT * 613 + sizeof(struct list_head), GFP_KERNEL); 599 614 if (!q->hh_flows) 600 615 return -ENOMEM; 601 616 for (i = 0; i < HH_FLOWS_CNT; i++) ··· 609 624 610 625 /* Initialize heavy-hitter filter arrays. */ 611 626 for (i = 0; i < HHF_ARRAYS_CNT; i++) { 612 - q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN * 613 - sizeof(u32)); 627 + q->hhf_arrays[i] = kvzalloc(HHF_ARRAYS_LEN * 628 + sizeof(u32), GFP_KERNEL); 614 629 if (!q->hhf_arrays[i]) { 615 630 /* Note: hhf_destroy() will be called 616 631 * by our caller. ··· 622 637 623 638 /* Initialize valid bits of heavy-hitter filter arrays. */ 624 639 for (i = 0; i < HHF_ARRAYS_CNT; i++) { 625 - q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN / 626 - BITS_PER_BYTE); 640 + q->hhf_valid_bits[i] = kvzalloc(HHF_ARRAYS_LEN / 641 + BITS_PER_BYTE, GFP_KERNEL); 627 642 if (!q->hhf_valid_bits[i]) { 628 643 /* Note: hhf_destroy() will be called 629 644 * by our caller.
+1 -5
net/sched/sch_netem.c
··· 702 702 spinlock_t *root_lock; 703 703 struct disttable *d; 704 704 int i; 705 - size_t s; 706 705 707 706 if (n > NETEM_DIST_MAX) 708 707 return -EINVAL; 709 708 710 - s = sizeof(struct disttable) + n * sizeof(s16); 711 - d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN); 712 - if (!d) 713 - d = vmalloc(s); 709 + d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL); 714 710 if (!d) 715 711 return -ENOMEM; 716 712
+1 -5
net/sched/sch_sfq.c
··· 685 685 686 686 static void *sfq_alloc(size_t sz) 687 687 { 688 - void *ptr = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN); 689 - 690 - if (!ptr) 691 - ptr = vmalloc(sz); 692 - return ptr; 688 + return kvmalloc(sz, GFP_KERNEL); 693 689 } 694 690 695 691 static void sfq_free(void *addr)
+6 -16
security/keys/keyctl.c
··· 101 101 102 102 if (_payload) { 103 103 ret = -ENOMEM; 104 - payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN); 105 - if (!payload) { 106 - if (plen <= PAGE_SIZE) 107 - goto error2; 108 - payload = vmalloc(plen); 109 - if (!payload) 110 - goto error2; 111 - } 104 + payload = kvmalloc(plen, GFP_KERNEL); 105 + if (!payload) 106 + goto error2; 112 107 113 108 ret = -EFAULT; 114 109 if (copy_from_user(payload, _payload, plen) != 0) ··· 1066 1071 1067 1072 if (from) { 1068 1073 ret = -ENOMEM; 1069 - payload = kmalloc(plen, GFP_KERNEL); 1070 - if (!payload) { 1071 - if (plen <= PAGE_SIZE) 1072 - goto error; 1073 - payload = vmalloc(plen); 1074 - if (!payload) 1075 - goto error; 1076 - } 1074 + payload = kvmalloc(plen, GFP_KERNEL); 1075 + if (!payload) 1076 + goto error; 1077 1077 1078 1078 ret = -EFAULT; 1079 1079 if (!copy_from_iter_full(payload, plen, from))