Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcachefs: kill kvpmalloc()

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>

+49 -115
+4 -4
fs/bcachefs/btree_cache.c
··· 60 60 61 61 clear_btree_node_just_written(b); 62 62 63 - kvpfree(b->data, btree_buf_bytes(b)); 63 + kvfree(b->data); 64 64 b->data = NULL; 65 65 #ifdef __KERNEL__ 66 66 kvfree(b->aux_data); ··· 94 94 { 95 95 BUG_ON(b->data || b->aux_data); 96 96 97 - b->data = kvpmalloc(btree_buf_bytes(b), gfp); 97 + b->data = kvmalloc(btree_buf_bytes(b), gfp); 98 98 if (!b->data) 99 99 return -BCH_ERR_ENOMEM_btree_node_mem_alloc; 100 100 #ifdef __KERNEL__ ··· 107 107 b->aux_data = NULL; 108 108 #endif 109 109 if (!b->aux_data) { 110 - kvpfree(b->data, btree_buf_bytes(b)); 110 + kvfree(b->data); 111 111 b->data = NULL; 112 112 return -BCH_ERR_ENOMEM_btree_node_mem_alloc; 113 113 } ··· 408 408 if (c->verify_data) 409 409 list_move(&c->verify_data->list, &bc->live); 410 410 411 - kvpfree(c->verify_ondisk, c->opts.btree_node_size); 411 + kvfree(c->verify_ondisk); 412 412 413 413 for (i = 0; i < btree_id_nr_alive(c); i++) { 414 414 struct btree_root *r = bch2_btree_id_root(c, i);
+2 -4
fs/bcachefs/btree_gc.c
··· 1193 1193 genradix_free(&c->gc_stripes); 1194 1194 1195 1195 for_each_member_device(c, ca) { 1196 - kvpfree(rcu_dereference_protected(ca->buckets_gc, 1), 1197 - sizeof(struct bucket_array) + 1198 - ca->mi.nbuckets * sizeof(struct bucket)); 1196 + kvfree(rcu_dereference_protected(ca->buckets_gc, 1)); 1199 1197 ca->buckets_gc = NULL; 1200 1198 1201 1199 free_percpu(ca->usage_gc); ··· 1492 1494 static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only) 1493 1495 { 1494 1496 for_each_member_device(c, ca) { 1495 - struct bucket_array *buckets = kvpmalloc(sizeof(struct bucket_array) + 1497 + struct bucket_array *buckets = kvmalloc(sizeof(struct bucket_array) + 1496 1498 ca->mi.nbuckets * sizeof(struct bucket), 1497 1499 GFP_KERNEL|__GFP_ZERO); 1498 1500 if (!buckets) {
+2 -2
fs/bcachefs/btree_io.c
··· 103 103 if (used_mempool) 104 104 mempool_free(p, &c->btree_bounce_pool); 105 105 else 106 - vpfree(p, size); 106 + kvfree(p); 107 107 } 108 108 109 109 static void *btree_bounce_alloc(struct bch_fs *c, size_t size, ··· 115 115 BUG_ON(size > c->opts.btree_node_size); 116 116 117 117 *used_mempool = false; 118 - p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT); 118 + p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT); 119 119 if (!p) { 120 120 *used_mempool = true; 121 121 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
+1 -3
fs/bcachefs/btree_journal_iter.c
··· 447 447 struct genradix_iter iter; 448 448 449 449 genradix_for_each(&c->journal_entries, iter, i) 450 - if (*i) 451 - kvpfree(*i, offsetof(struct journal_replay, j) + 452 - vstruct_bytes(&(*i)->j)); 450 + kvfree(*i); 453 451 genradix_free(&c->journal_entries); 454 452 } 455 453
+11 -18
fs/bcachefs/buckets.c
··· 1335 1335 struct bucket_gens *buckets = 1336 1336 container_of(rcu, struct bucket_gens, rcu); 1337 1337 1338 - kvpfree(buckets, sizeof(*buckets) + buckets->nbuckets); 1338 + kvfree(buckets); 1339 1339 } 1340 1340 1341 1341 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) ··· 1345 1345 bool resize = ca->bucket_gens != NULL; 1346 1346 int ret; 1347 1347 1348 - if (!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets, 1349 - GFP_KERNEL|__GFP_ZERO))) { 1348 + if (!(bucket_gens = kvmalloc(sizeof(struct bucket_gens) + nbuckets, 1349 + GFP_KERNEL|__GFP_ZERO))) { 1350 1350 ret = -BCH_ERR_ENOMEM_bucket_gens; 1351 1351 goto err; 1352 1352 } 1353 1353 1354 1354 if ((c->opts.buckets_nouse && 1355 - !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) * 1356 - sizeof(unsigned long), 1357 - GFP_KERNEL|__GFP_ZERO)))) { 1355 + !(buckets_nouse = kvmalloc(BITS_TO_LONGS(nbuckets) * 1356 + sizeof(unsigned long), 1357 + GFP_KERNEL|__GFP_ZERO)))) { 1358 1358 ret = -BCH_ERR_ENOMEM_buckets_nouse; 1359 1359 goto err; 1360 1360 } ··· 1397 1397 1398 1398 ret = 0; 1399 1399 err: 1400 - kvpfree(buckets_nouse, 1401 - BITS_TO_LONGS(nbuckets) * sizeof(unsigned long)); 1400 + kvfree(buckets_nouse); 1402 1401 if (bucket_gens) 1403 1402 call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu); 1404 1403 ··· 1406 1407 1407 1408 void bch2_dev_buckets_free(struct bch_dev *ca) 1408 1409 { 1409 - unsigned i; 1410 + kvfree(ca->buckets_nouse); 1411 + kvfree(rcu_dereference_protected(ca->bucket_gens, 1)); 1410 1412 1411 - kvpfree(ca->buckets_nouse, 1412 - BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long)); 1413 - kvpfree(rcu_dereference_protected(ca->bucket_gens, 1), 1414 - sizeof(struct bucket_gens) + ca->mi.nbuckets); 1415 - 1416 - for (i = 0; i < ARRAY_SIZE(ca->usage); i++) 1413 + for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++) 1417 1414 free_percpu(ca->usage[i]); 1418 1415 kfree(ca->usage_base); 1419 1416 } 1420 1417 1421 1418 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca) 1422 1419 { 1423 - unsigned i; 1424 - 1425 1420 ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL); 1426 1421 if (!ca->usage_base) 1427 1422 return -BCH_ERR_ENOMEM_usage_init; 1428 1423 1429 - for (i = 0; i < ARRAY_SIZE(ca->usage); i++) { 1424 + for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++) { 1430 1425 ca->usage[i] = alloc_percpu(struct bch_dev_usage); 1431 1426 if (!ca->usage[i]) 1432 1427 return -BCH_ERR_ENOMEM_usage_init;
+7 -7
fs/bcachefs/compress.c
··· 601 601 return 0; 602 602 603 603 if (!mempool_initialized(&c->compression_bounce[READ]) && 604 - mempool_init_kvpmalloc_pool(&c->compression_bounce[READ], 605 - 1, c->opts.encoded_extent_max)) 604 + mempool_init_kvmalloc_pool(&c->compression_bounce[READ], 605 + 1, c->opts.encoded_extent_max)) 606 606 return -BCH_ERR_ENOMEM_compression_bounce_read_init; 607 607 608 608 if (!mempool_initialized(&c->compression_bounce[WRITE]) && 609 - mempool_init_kvpmalloc_pool(&c->compression_bounce[WRITE], 610 - 1, c->opts.encoded_extent_max)) 609 + mempool_init_kvmalloc_pool(&c->compression_bounce[WRITE], 610 + 1, c->opts.encoded_extent_max)) 611 611 return -BCH_ERR_ENOMEM_compression_bounce_write_init; 612 612 613 613 for (i = compression_types; ··· 622 622 if (mempool_initialized(&c->compress_workspace[i->type])) 623 623 continue; 624 624 625 - if (mempool_init_kvpmalloc_pool( 625 + if (mempool_init_kvmalloc_pool( 626 626 &c->compress_workspace[i->type], 627 627 1, i->compress_workspace)) 628 628 return -BCH_ERR_ENOMEM_compression_workspace_init; 629 629 } 630 630 631 631 if (!mempool_initialized(&c->decompress_workspace) && 632 - mempool_init_kvpmalloc_pool(&c->decompress_workspace, 633 - 1, decompress_workspace_size)) 632 + mempool_init_kvmalloc_pool(&c->decompress_workspace, 633 + 1, decompress_workspace_size)) 634 634 return -BCH_ERR_ENOMEM_decompression_workspace_init; 635 635 636 636 return 0;
+3 -3
fs/bcachefs/debug.c
··· 137 137 mutex_lock(&c->verify_lock); 138 138 139 139 if (!c->verify_ondisk) { 140 - c->verify_ondisk = kvpmalloc(btree_buf_bytes(b), GFP_KERNEL); 140 + c->verify_ondisk = kvmalloc(btree_buf_bytes(b), GFP_KERNEL); 141 141 if (!c->verify_ondisk) 142 142 goto out; 143 143 } ··· 199 199 return; 200 200 } 201 201 202 - n_ondisk = kvpmalloc(btree_buf_bytes(b), GFP_KERNEL); 202 + n_ondisk = kvmalloc(btree_buf_bytes(b), GFP_KERNEL); 203 203 if (!n_ondisk) { 204 204 prt_printf(out, "memory allocation failure\n"); 205 205 goto out; ··· 293 293 out: 294 294 if (bio) 295 295 bio_put(bio); 296 - kvpfree(n_ondisk, btree_buf_bytes(b)); 296 + kvfree(n_ondisk); 297 297 percpu_ref_put(&ca->io_ref); 298 298 } 299 299
+2 -2
fs/bcachefs/ec.c
··· 504 504 unsigned i; 505 505 506 506 for (i = 0; i < s->v.nr_blocks; i++) { 507 - kvpfree(buf->data[i], buf->size << 9); 507 + kvfree(buf->data[i]); 508 508 buf->data[i] = NULL; 509 509 } 510 510 } ··· 531 531 memset(buf->valid, 0xFF, sizeof(buf->valid)); 532 532 533 533 for (i = 0; i < v->nr_blocks; i++) { 534 - buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL); 534 + buf->data[i] = kvmalloc(buf->size << 9, GFP_KERNEL); 535 535 if (!buf->data[i]) 536 536 goto err; 537 537 }
+2 -2
fs/bcachefs/fifo.h
··· 24 24 (fifo)->mask = (fifo)->size \ 25 25 ? roundup_pow_of_two((fifo)->size) - 1 \ 26 26 : 0; \ 27 - (fifo)->data = kvpmalloc(fifo_buf_size(fifo), (_gfp)); \ 27 + (fifo)->data = kvmalloc(fifo_buf_size(fifo), (_gfp)); \ 28 28 }) 29 29 30 30 #define free_fifo(fifo) \ 31 31 do { \ 32 - kvpfree((fifo)->data, fifo_buf_size(fifo)); \ 32 + kvfree((fifo)->data); \ 33 33 (fifo)->data = NULL; \ 34 34 } while (0) 35 35
+2 -2
fs/bcachefs/journal.c
··· 1343 1343 darray_exit(&j->early_journal_entries); 1344 1344 1345 1345 for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) 1346 - kvpfree(j->buf[i].data, j->buf[i].buf_size); 1346 + kvfree(j->buf[i].data); 1347 1347 free_fifo(&j->pin); 1348 1348 } 1349 1349 ··· 1372 1372 1373 1373 for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) { 1374 1374 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN; 1375 - j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL); 1375 + j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL); 1376 1376 if (!j->buf[i].data) 1377 1377 return -BCH_ERR_ENOMEM_journal_buf; 1378 1378 j->buf[i].idx = i;
+7 -8
fs/bcachefs/journal_io.c
··· 84 84 85 85 BUG_ON(*p != i); 86 86 *p = NULL; 87 - kvpfree(i, offsetof(struct journal_replay, j) + 88 - vstruct_bytes(&i->j)); 87 + kvfree(i); 89 88 } 90 89 91 90 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i) ··· 195 196 goto out; 196 197 } 197 198 replace: 198 - i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL); 199 + i = kvmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL); 199 200 if (!i) 200 201 return -BCH_ERR_ENOMEM_journal_entry_add; 201 202 ··· 964 965 return -BCH_ERR_ENOMEM_journal_read_buf_realloc; 965 966 966 967 new_size = roundup_pow_of_two(new_size); 967 - n = kvpmalloc(new_size, GFP_KERNEL); 968 + n = kvmalloc(new_size, GFP_KERNEL); 968 969 if (!n) 969 970 return -BCH_ERR_ENOMEM_journal_read_buf_realloc; 970 971 971 - kvpfree(b->data, b->size); 972 + kvfree(b->data); 972 973 b->data = n; 973 974 b->size = new_size; 974 975 return 0; ··· 1194 1195 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr; 1195 1196 out: 1196 1197 bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret); 1197 - kvpfree(buf.data, buf.size); 1198 + kvfree(buf.data); 1198 1199 percpu_ref_put(&ca->io_ref); 1199 1200 closure_return(cl); 1200 1201 return; ··· 1575 1576 if (bch2_btree_write_buffer_resize(c, btree_write_buffer_size)) 1576 1577 return; 1577 1578 1578 - new_buf = kvpmalloc(new_size, GFP_NOFS|__GFP_NOWARN); 1579 + new_buf = kvmalloc(new_size, GFP_NOFS|__GFP_NOWARN); 1579 1580 if (!new_buf) 1580 1581 return; 1581 1582 ··· 1586 1587 swap(buf->buf_size, new_size); 1587 1588 spin_unlock(&j->lock); 1588 1589 1589 - kvpfree(new_buf, new_size); 1590 + kvfree(new_buf); 1590 1591 } 1591 1592 1592 1593 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
+4 -4
fs/bcachefs/super.c
··· 576 576 destroy_workqueue(c->btree_update_wq); 577 577 578 578 bch2_free_super(&c->disk_sb); 579 - kvpfree(c, sizeof(*c)); 579 + kvfree(c); 580 580 module_put(THIS_MODULE); 581 581 } 582 582 ··· 715 715 unsigned i, iter_size; 716 716 int ret = 0; 717 717 718 - c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO); 718 + c = kvmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO); 719 719 if (!c) { 720 720 c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc); 721 721 goto out; ··· 882 882 BIOSET_NEED_BVECS) || 883 883 !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) || 884 884 !(c->online_reserved = alloc_percpu(u64)) || 885 - mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1, 886 - c->opts.btree_node_size) || 885 + mempool_init_kvmalloc_pool(&c->btree_bounce_pool, 1, 886 + c->opts.btree_node_size) || 887 887 mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) || 888 888 !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits, 889 889 sizeof(u64), GFP_KERNEL))) {
-22
fs/bcachefs/util.c
··· 1007 1007 } 1008 1008 } 1009 1009 1010 - static void mempool_free_vp(void *element, void *pool_data) 1011 - { 1012 - size_t size = (size_t) pool_data; 1013 - 1014 - vpfree(element, size); 1015 - } 1016 - 1017 - static void *mempool_alloc_vp(gfp_t gfp_mask, void *pool_data) 1018 - { 1019 - size_t size = (size_t) pool_data; 1020 - 1021 - return vpmalloc(size, gfp_mask); 1022 - } 1023 - 1024 - int mempool_init_kvpmalloc_pool(mempool_t *pool, int min_nr, size_t size) 1025 - { 1026 - return size < PAGE_SIZE 1027 - ? mempool_init_kmalloc_pool(pool, min_nr, size) 1028 - : mempool_init(pool, min_nr, mempool_alloc_vp, 1029 - mempool_free_vp, (void *) size); 1030 - } 1031 - 1032 1010 #if 0 1033 1011 void eytzinger1_test(void) 1034 1012 {
+2 -34
fs/bcachefs/util.h
··· 53 53 PAGE_SIZE); 54 54 } 55 55 56 - static inline void vpfree(void *p, size_t size) 57 - { 58 - if (is_vmalloc_addr(p)) 59 - vfree(p); 60 - else 61 - free_pages((unsigned long) p, get_order(size)); 62 - } 63 - 64 - static inline void *vpmalloc(size_t size, gfp_t gfp_mask) 65 - { 66 - return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN, 67 - get_order(size)) ?: 68 - __vmalloc(size, gfp_mask); 69 - } 70 - 71 - static inline void kvpfree(void *p, size_t size) 72 - { 73 - if (size < PAGE_SIZE) 74 - kfree(p); 75 - else 76 - vpfree(p, size); 77 - } 78 - 79 - static inline void *kvpmalloc(size_t size, gfp_t gfp_mask) 80 - { 81 - return size < PAGE_SIZE 82 - ? kmalloc(size, gfp_mask) 83 - : vpmalloc(size, gfp_mask); 84 - } 85 - 86 - int mempool_init_kvpmalloc_pool(mempool_t *, int, size_t); 87 - 88 56 #define HEAP(type) \ 89 57 struct { \ 90 58 size_t size, used; \ ··· 65 97 ({ \ 66 98 (heap)->used = 0; \ 67 99 (heap)->size = (_size); \ 68 - (heap)->data = kvpmalloc((heap)->size * sizeof((heap)->data[0]),\ 100 + (heap)->data = kvmalloc((heap)->size * sizeof((heap)->data[0]),\ 69 101 (gfp)); \ 70 102 }) 71 103 72 104 #define free_heap(heap) \ 73 105 do { \ 74 - kvpfree((heap)->data, (heap)->size * sizeof((heap)->data[0])); \ 106 + kvfree((heap)->data); \ 75 107 (heap)->data = NULL; \ 76 108 } while (0) 77 109