Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcachefs: GFP_NOIO -> GFP_NOFS

GFP_NOIO dates from the bcache days, when we operated under the block
layer. Now, GFP_NOFS is more appropriate, so switch all GFP_NOIO uses to
GFP_NOFS.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>

+29 -29
+4 -4
fs/bcachefs/btree_io.c
··· 117 117 p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT); 118 118 if (!p) { 119 119 *used_mempool = true; 120 - p = mempool_alloc(&c->btree_bounce_pool, GFP_NOIO); 120 + p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); 121 121 } 122 122 memalloc_nofs_restore(flags); 123 123 return p; ··· 937 937 /* We might get called multiple times on read retry: */ 938 938 b->written = 0; 939 939 940 - iter = mempool_alloc(&c->fill_iter, GFP_NOIO); 940 + iter = mempool_alloc(&c->fill_iter, GFP_NOFS); 941 941 sort_iter_init(iter, b); 942 942 iter->size = (btree_blocks(c) + 1) * 2; 943 943 ··· 1580 1580 bio = bio_alloc_bioset(NULL, 1581 1581 buf_pages(b->data, btree_bytes(c)), 1582 1582 REQ_OP_READ|REQ_SYNC|REQ_META, 1583 - GFP_NOIO, 1583 + GFP_NOFS, 1584 1584 &c->btree_bio); 1585 1585 rb = container_of(bio, struct btree_read_bio, bio); 1586 1586 rb->c = c; ··· 2077 2077 wbio = container_of(bio_alloc_bioset(NULL, 2078 2078 buf_pages(data, sectors_to_write << 9), 2079 2079 REQ_OP_WRITE|REQ_META, 2080 - GFP_NOIO, 2080 + GFP_NOFS, 2081 2081 &c->btree_bio), 2082 2082 struct btree_write_bio, wbio.bio); 2083 2083 wbio_init(&wbio->wbio.bio);
+1 -1
fs/bcachefs/btree_update_interior.c
··· 1092 1092 } 1093 1093 } 1094 1094 1095 - as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO); 1095 + as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOFS); 1096 1096 memset(as, 0, sizeof(*as)); 1097 1097 closure_init(&as->cl, NULL); 1098 1098 as->c = c;
+2 -2
fs/bcachefs/buckets.c
··· 433 433 WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX); 434 434 435 435 if (!d || d->used + more > d->size) { 436 - d = krealloc(d, alloc_size, GFP_NOIO|__GFP_ZERO); 436 + d = krealloc(d, alloc_size, GFP_NOFS|__GFP_ZERO); 437 437 438 438 BUG_ON(!d && alloc_size > REPLICAS_DELTA_LIST_MAX); 439 439 440 440 if (!d) { 441 - d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOIO); 441 + d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOFS); 442 442 memset(d, 0, REPLICAS_DELTA_LIST_MAX); 443 443 444 444 if (trans->fs_usage_deltas)
+6 -6
fs/bcachefs/compress.c
··· 28 28 29 29 BUG_ON(size > c->opts.encoded_extent_max); 30 30 31 - b = kmalloc(size, GFP_NOIO|__GFP_NOWARN); 31 + b = kmalloc(size, GFP_NOFS|__GFP_NOWARN); 32 32 if (b) 33 33 return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw }; 34 34 35 - b = mempool_alloc(&c->compression_bounce[rw], GFP_NOIO); 35 + b = mempool_alloc(&c->compression_bounce[rw], GFP_NOFS); 36 36 if (b) 37 37 return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw }; 38 38 ··· 94 94 BUG_ON(DIV_ROUND_UP(start.bi_size, PAGE_SIZE) > nr_pages); 95 95 96 96 pages = nr_pages > ARRAY_SIZE(stack_pages) 97 - ? kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOIO) 97 + ? kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS) 98 98 : stack_pages; 99 99 if (!pages) 100 100 goto bounce; ··· 177 177 .avail_out = dst_len, 178 178 }; 179 179 180 - workspace = mempool_alloc(&c->decompress_workspace, GFP_NOIO); 180 + workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS); 181 181 182 182 zlib_set_workspace(&strm, workspace); 183 183 zlib_inflateInit2(&strm, -MAX_WBITS); ··· 196 196 if (real_src_len > src_len - 4) 197 197 goto err; 198 198 199 - workspace = mempool_alloc(&c->decompress_workspace, GFP_NOIO); 199 + workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS); 200 200 ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound()); 201 201 202 202 ret = zstd_decompress_dctx(ctx, ··· 382 382 dst_data = bio_map_or_bounce(c, dst, WRITE); 383 383 src_data = bio_map_or_bounce(c, src, READ); 384 384 385 - workspace = mempool_alloc(&c->compress_workspace[compression_type], GFP_NOIO); 385 + workspace = mempool_alloc(&c->compress_workspace[compression_type], GFP_NOFS); 386 386 387 387 *src_len = src->bi_iter.bi_size; 388 388 *dst_len = dst->bi_iter.bi_size;
+2 -2
fs/bcachefs/debug.c
··· 47 47 bio = bio_alloc_bioset(ca->disk_sb.bdev, 48 48 buf_pages(n_sorted, btree_bytes(c)), 49 49 REQ_OP_READ|REQ_META, 50 - GFP_NOIO, 50 + GFP_NOFS, 51 51 &c->btree_bio); 52 52 bio->bi_iter.bi_sector = pick.ptr.offset; 53 53 bch2_bio_map(bio, n_sorted, btree_bytes(c)); ··· 211 211 bio = bio_alloc_bioset(ca->disk_sb.bdev, 212 212 buf_pages(n_ondisk, btree_bytes(c)), 213 213 REQ_OP_READ|REQ_META, 214 - GFP_NOIO, 214 + GFP_NOFS, 215 215 &c->btree_bio); 216 216 bio->bi_iter.bi_sector = pick.ptr.offset; 217 217 bch2_bio_map(bio, n_ondisk, btree_bytes(c));
+1 -1
fs/bcachefs/ec.c
··· 485 485 486 486 BUG_ON(!rbio->pick.has_ec); 487 487 488 - buf = kzalloc(sizeof(*buf), GFP_NOIO); 488 + buf = kzalloc(sizeof(*buf), GFP_NOFS); 489 489 if (!buf) 490 490 return -BCH_ERR_ENOMEM_ec_read_extent; 491 491
+10 -10
fs/bcachefs/io.c
··· 163 163 struct page *page; 164 164 165 165 if (likely(!*using_mempool)) { 166 - page = alloc_page(GFP_NOIO); 166 + page = alloc_page(GFP_NOFS); 167 167 if (unlikely(!page)) { 168 168 mutex_lock(&c->bio_bounce_pages_lock); 169 169 *using_mempool = true; ··· 172 172 } 173 173 } else { 174 174 pool_alloc: 175 - page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO); 175 + page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS); 176 176 } 177 177 178 178 return page; ··· 660 660 661 661 if (to_entry(ptr + 1) < ptrs.end) { 662 662 n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, 663 - GFP_NOIO, &ca->replica_set)); 663 + GFP_NOFS, &ca->replica_set)); 664 664 665 665 n->bio.bi_end_io = wbio->bio.bi_end_io; 666 666 n->bio.bi_private = wbio->bio.bi_private; ··· 976 976 pages = min(pages, BIO_MAX_VECS); 977 977 978 978 bio = bio_alloc_bioset(NULL, pages, 0, 979 - GFP_NOIO, &c->bio_write); 979 + GFP_NOFS, &c->bio_write); 980 980 wbio = wbio_init(bio); 981 981 wbio->put_bio = true; 982 982 /* copy WRITE_SYNC flag */ ··· 1314 1314 BUG_ON(total_output != total_input); 1315 1315 1316 1316 dst = bio_split(src, total_input >> 9, 1317 - GFP_NOIO, &c->bio_write); 1317 + GFP_NOFS, &c->bio_write); 1318 1318 wbio_init(dst)->put_bio = true; 1319 1319 /* copy WRITE_SYNC flag */ 1320 1320 dst->bi_opf = src->bi_opf; ··· 2013 2013 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote)) 2014 2014 return NULL; 2015 2015 2016 - op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOIO); 2016 + op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOFS); 2017 2017 if (!op) 2018 2018 goto err; 2019 2019 ··· 2026 2026 */ 2027 2027 *rbio = kzalloc(sizeof(struct bch_read_bio) + 2028 2028 sizeof(struct bio_vec) * pages, 2029 - GFP_NOIO); 2029 + GFP_NOFS); 2030 2030 if (!*rbio) 2031 2031 goto err; 2032 2032 ··· 2034 2034 bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0); 2035 2035 2036 2036 if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, 2037 - GFP_NOIO)) 2037 + GFP_NOFS)) 2038 2038 goto err; 2039 2039 2040 2040 (*rbio)->bounce = true; ··· 2746 2746 rbio = rbio_init(bio_alloc_bioset(NULL, 2747 2747 DIV_ROUND_UP(sectors, PAGE_SECTORS), 2748 2748 0, 2749 - GFP_NOIO, 2749 + GFP_NOFS, 2750 2750 &c->bio_read_split), 2751 2751 orig->opts); 2752 2752 ··· 2762 2762 * from the whole bio, in which case we don't want to retry and 2763 2763 * lose the error) 2764 2764 */ 2765 - rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOIO, 2765 + rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOFS, 2766 2766 &c->bio_read_split), 2767 2767 orig->opts); 2768 2768 rbio->bio.bi_iter = iter;
+1 -1
fs/bcachefs/journal_io.c
··· 1438 1438 if (buf->buf_size >= new_size) 1439 1439 return; 1440 1440 1441 - new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN); 1441 + new_buf = kvpmalloc(new_size, GFP_NOFS|__GFP_NOWARN); 1442 1442 if (!new_buf) 1443 1443 return; 1444 1444
+1 -1
fs/bcachefs/journal_reclaim.c
··· 271 271 blkdev_issue_discard(ca->disk_sb.bdev, 272 272 bucket_to_sector(ca, 273 273 ja->buckets[ja->discard_idx]), 274 - ca->mi.bucket_size, GFP_NOIO); 274 + ca->mi.bucket_size, GFP_NOFS); 275 275 276 276 spin_lock(&j->lock); 277 277 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
+1 -1
fs/bcachefs/keylist.c
··· 18 18 (old_buf && roundup_pow_of_two(oldsize) == newsize)) 19 19 return 0; 20 20 21 - new_keys = krealloc(old_buf, sizeof(u64) * newsize, GFP_NOIO); 21 + new_keys = krealloc(old_buf, sizeof(u64) * newsize, GFP_NOFS); 22 22 if (!new_keys) 23 23 return -ENOMEM; 24 24