Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: pass a block_device and opf to bio_alloc_bioset

Pass the block_device and operation that we plan to use this bio for to
bio_alloc_bioset to optimize the assigment. NULL/0 can be passed, both
for the passthrough case on a raw request_queue and to temporarily avoid
refactoring some nasty code.

Also move the gfp_mask argument after the nr_vecs argument for a much
more logical calling convention matching what most of the kernel does.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220124091107.642561-16-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
609be106 0a3140ea

+75 -79
+18 -12
block/bio.c
··· 417 417 418 418 /** 419 419 * bio_alloc_bioset - allocate a bio for I/O 420 + * @bdev: block device to allocate the bio for (can be %NULL) 421 + * @nr_vecs: number of bvecs to pre-allocate 422 + * @opf: operation and flags for bio 420 423 * @gfp_mask: the GFP_* mask given to the slab allocator 421 - * @nr_iovecs: number of iovecs to pre-allocate 422 424 * @bs: the bio_set to allocate from. 423 425 * 424 426 * Allocate a bio from the mempools in @bs. ··· 449 447 * 450 448 * Returns: Pointer to new bio on success, NULL on failure. 451 449 */ 452 - struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs, 450 + struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, 451 + unsigned int opf, gfp_t gfp_mask, 453 452 struct bio_set *bs) 454 453 { 455 454 gfp_t saved_gfp = gfp_mask; 456 455 struct bio *bio; 457 456 void *p; 458 457 459 - /* should not use nobvec bioset for nr_iovecs > 0 */ 460 - if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_iovecs > 0)) 458 + /* should not use nobvec bioset for nr_vecs > 0 */ 459 + if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) 461 460 return NULL; 462 461 463 462 /* ··· 495 492 return NULL; 496 493 497 494 bio = p + bs->front_pad; 498 - if (nr_iovecs > BIO_INLINE_VECS) { 495 + if (nr_vecs > BIO_INLINE_VECS) { 499 496 struct bio_vec *bvl = NULL; 500 497 501 - bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask); 498 + bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); 502 499 if (!bvl && gfp_mask != saved_gfp) { 503 500 punt_bios_to_rescuer(bs); 504 501 gfp_mask = saved_gfp; 505 - bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask); 502 + bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); 506 503 } 507 504 if (unlikely(!bvl)) 508 505 goto err_free; 509 506 510 - bio_init(bio, bvl, nr_iovecs); 511 - } else if (nr_iovecs) { 507 + bio_init(bio, bvl, nr_vecs); 508 + } else if (nr_vecs) { 512 509 bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS); 513 510 } else { 514 511 bio_init(bio, NULL, 0); 515 512 } 516 513 517 514 bio->bi_pool = bs; 515 + if (bdev) 516 + bio_set_dev(bio, bdev); 517 + bio->bi_opf = opf; 518 518 return bio; 519 519 520 520 err_free: ··· 773 767 { 774 768 struct bio *b; 775 769 776 - b = bio_alloc_bioset(gfp_mask, 0, bs); 770 + b = bio_alloc_bioset(NULL, 0, 0, gfp_mask, bs); 777 771 if (!b) 778 772 return NULL; 779 773 ··· 1749 1743 struct bio *bio; 1750 1744 1751 1745 if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS) 1752 - return bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs); 1746 + return bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs); 1753 1747 1754 1748 cache = per_cpu_ptr(bs->cache, get_cpu()); 1755 1749 if (cache->free_list) { ··· 1763 1757 return bio; 1764 1758 } 1765 1759 put_cpu(); 1766 - bio = bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs); 1760 + bio = bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs); 1767 1761 bio_set_flag(bio, BIO_PERCPU_CACHE); 1768 1762 return bio; 1769 1763 }
+2 -4
block/bounce.c
··· 165 165 * asking for trouble and would force extra work on 166 166 * __bio_clone_fast() anyways. 167 167 */ 168 - bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), 169 - &bounce_bio_set); 170 - bio->bi_bdev = bio_src->bi_bdev; 168 + bio = bio_alloc_bioset(bio_src->bi_bdev, bio_segments(bio_src), 169 + bio_src->bi_opf, GFP_NOIO, &bounce_bio_set); 171 170 if (bio_flagged(bio_src, BIO_REMAPPED)) 172 171 bio_set_flag(bio, BIO_REMAPPED); 173 - bio->bi_opf = bio_src->bi_opf; 174 172 bio->bi_ioprio = bio_src->bi_ioprio; 175 173 bio->bi_write_hint = bio_src->bi_write_hint; 176 174 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
+2 -3
drivers/block/drbd/drbd_actlog.c
··· 138 138 op_flags |= REQ_FUA | REQ_PREFLUSH; 139 139 op_flags |= REQ_SYNC; 140 140 141 - bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set); 142 - bio_set_dev(bio, bdev->md_bdev); 141 + bio = bio_alloc_bioset(bdev->md_bdev, 1, op | op_flags, GFP_NOIO, 142 + &drbd_md_io_bio_set); 143 143 bio->bi_iter.bi_sector = sector; 144 144 err = -EIO; 145 145 if (bio_add_page(bio, device->md_io.page, size, 0) != size) 146 146 goto out; 147 147 bio->bi_private = device; 148 148 bio->bi_end_io = drbd_md_endio; 149 - bio_set_op_attrs(bio, op, op_flags); 150 149 151 150 if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL) 152 151 /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
+3 -4
drivers/block/drbd/drbd_bitmap.c
··· 976 976 977 977 static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local) 978 978 { 979 - struct bio *bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set); 980 979 struct drbd_device *device = ctx->device; 980 + unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE; 981 + struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op, 982 + GFP_NOIO, &drbd_md_io_bio_set); 981 983 struct drbd_bitmap *b = device->bitmap; 982 984 struct page *page; 983 985 unsigned int len; 984 - unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE; 985 986 986 987 sector_t on_disk_sector = 987 988 device->ldev->md.md_offset + device->ldev->md.bm_offset; ··· 1007 1006 bm_store_page_idx(page, page_nr); 1008 1007 } else 1009 1008 page = b->bm_pages[page_nr]; 1010 - bio_set_dev(bio, device->ldev->md_bdev); 1011 1009 bio->bi_iter.bi_sector = on_disk_sector; 1012 1010 /* bio_add_page of a single page to an empty bio will always succeed, 1013 1011 * according to api. Do we want to assert that? */ 1014 1012 bio_add_page(bio, page, len, 0); 1015 1013 bio->bi_private = ctx; 1016 1014 bio->bi_end_io = drbd_bm_endio; 1017 - bio_set_op_attrs(bio, op, 0); 1018 1015 1019 1016 if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { 1020 1017 bio_io_error(bio);
+5 -7
drivers/md/bcache/request.c
··· 913 913 /* btree_search_recurse()'s btree iterator is no good anymore */ 914 914 ret = miss == bio ? MAP_DONE : -EINTR; 915 915 916 - cache_bio = bio_alloc_bioset(GFP_NOWAIT, 916 + cache_bio = bio_alloc_bioset(miss->bi_bdev, 917 917 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), 918 - &dc->disk.bio_split); 918 + 0, GFP_NOWAIT, &dc->disk.bio_split); 919 919 if (!cache_bio) 920 920 goto out_submit; 921 921 922 922 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; 923 - bio_copy_dev(cache_bio, miss); 924 923 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; 925 924 926 925 cache_bio->bi_end_io = backing_request_endio; ··· 1024 1025 */ 1025 1026 struct bio *flush; 1026 1027 1027 - flush = bio_alloc_bioset(GFP_NOIO, 0, 1028 - &dc->disk.bio_split); 1028 + flush = bio_alloc_bioset(bio->bi_bdev, 0, 1029 + REQ_OP_WRITE | REQ_PREFLUSH, 1030 + GFP_NOIO, &dc->disk.bio_split); 1029 1031 if (!flush) { 1030 1032 s->iop.status = BLK_STS_RESOURCE; 1031 1033 goto insert_data; 1032 1034 } 1033 - bio_copy_dev(flush, bio); 1034 1035 flush->bi_end_io = backing_request_endio; 1035 1036 flush->bi_private = cl; 1036 - flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 1037 1037 /* I/O request sent to backing device */ 1038 1038 closure_bio_submit(s->iop.c, flush, cl); 1039 1039 }
+2 -3
drivers/md/dm-crypt.c
··· 1672 1672 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) 1673 1673 mutex_lock(&cc->bio_alloc_lock); 1674 1674 1675 - clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs); 1675 + clone = bio_alloc_bioset(cc->dev->bdev, nr_iovecs, io->base_bio->bi_opf, 1676 + GFP_NOIO, &cc->bs); 1676 1677 clone->bi_private = io; 1677 1678 clone->bi_end_io = crypt_endio; 1678 - bio_set_dev(clone, cc->dev->bdev); 1679 - clone->bi_opf = io->base_bio->bi_opf; 1680 1679 1681 1680 remaining_size = size; 1682 1681
+2 -3
drivers/md/dm-io.c
··· 345 345 (PAGE_SIZE >> SECTOR_SHIFT))); 346 346 } 347 347 348 - bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios); 348 + bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags, 349 + GFP_NOIO, &io->client->bios); 349 350 bio->bi_iter.bi_sector = where->sector + (where->count - remaining); 350 - bio_set_dev(bio, where->bdev); 351 351 bio->bi_end_io = endio; 352 - bio_set_op_attrs(bio, op, op_flags); 353 352 store_io_and_region_in_bio(bio, io, region); 354 353 355 354 if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
+4 -3
drivers/md/dm-writecache.c
··· 1821 1821 1822 1822 max_pages = e->wc_list_contiguous; 1823 1823 1824 - bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set); 1824 + bio = bio_alloc_bioset(wc->dev->bdev, max_pages, REQ_OP_WRITE, 1825 + GFP_NOIO, &wc->bio_set); 1825 1826 wb = container_of(bio, struct writeback_struct, bio); 1826 1827 wb->wc = wc; 1827 1828 bio->bi_end_io = writecache_writeback_endio; 1828 - bio_set_dev(bio, wc->dev->bdev); 1829 1829 bio->bi_iter.bi_sector = read_original_sector(wc, e); 1830 1830 if (max_pages <= WB_LIST_INLINE || 1831 1831 unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *), ··· 1852 1852 wb->wc_list[wb->wc_list_n++] = f; 1853 1853 e = f; 1854 1854 } 1855 - bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA); 1855 + if (WC_MODE_FUA(wc)) 1856 + bio->bi_opf |= REQ_FUA; 1856 1857 if (writecache_has_error(wc)) { 1857 1858 bio->bi_status = BLK_STS_IOERR; 1858 1859 bio_endio(bio);
+3 -2
drivers/md/dm.c
··· 519 519 struct dm_target_io *tio; 520 520 struct bio *clone; 521 521 522 - clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); 522 + clone = bio_alloc_bioset(NULL, 0, 0, GFP_NOIO, &md->io_bs); 523 523 524 524 tio = container_of(clone, struct dm_target_io, clone); 525 525 tio->inside_dm_io = true; ··· 552 552 /* the dm_target_io embedded in ci->io is available */ 553 553 tio = &ci->io->tio; 554 554 } else { 555 - struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); 555 + struct bio *clone = bio_alloc_bioset(NULL, 0, 0, gfp_mask, 556 + &ci->io->md->bs); 556 557 if (!clone) 557 558 return NULL; 558 559
+8 -8
drivers/md/md.c
··· 562 562 atomic_inc(&rdev->nr_pending); 563 563 atomic_inc(&rdev->nr_pending); 564 564 rcu_read_unlock(); 565 - bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set); 565 + bi = bio_alloc_bioset(rdev->bdev, 0, 566 + REQ_OP_WRITE | REQ_PREFLUSH, 567 + GFP_NOIO, &mddev->bio_set); 566 568 bi->bi_end_io = md_end_flush; 567 569 bi->bi_private = rdev; 568 - bio_set_dev(bi, rdev->bdev); 569 - bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 570 570 atomic_inc(&mddev->flush_pending); 571 571 submit_bio(bi); 572 572 rcu_read_lock(); ··· 955 955 * If an error occurred, call md_error 956 956 */ 957 957 struct bio *bio; 958 - int ff = 0; 959 958 960 959 if (!page) 961 960 return; ··· 962 963 if (test_bit(Faulty, &rdev->flags)) 963 964 return; 964 965 965 - bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set); 966 + bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev, 967 + 1, 968 + REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA, 969 + GFP_NOIO, &mddev->sync_set); 966 970 967 971 atomic_inc(&rdev->nr_pending); 968 972 969 - bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev); 970 973 bio->bi_iter.bi_sector = sector; 971 974 bio_add_page(bio, page, size, 0); 972 975 bio->bi_private = rdev; ··· 977 976 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && 978 977 test_bit(FailFast, &rdev->flags) && 979 978 !test_bit(LastDev, &rdev->flags)) 980 - ff = MD_FAILFAST; 981 - bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff; 979 + bio->bi_opf |= MD_FAILFAST; 982 980 983 981 atomic_inc(&mddev->pending_writes); 984 982 submit_bio(bio);
+2 -1
drivers/md/raid1.c
··· 1126 1126 int i = 0; 1127 1127 struct bio *behind_bio = NULL; 1128 1128 1129 - behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set); 1129 + behind_bio = bio_alloc_bioset(NULL, vcnt, 0, GFP_NOIO, 1130 + &r1_bio->mddev->bio_set); 1130 1131 if (!behind_bio) 1131 1132 return; 1132 1133
+2 -4
drivers/md/raid10.c
··· 4892 4892 return sectors_done; 4893 4893 } 4894 4894 4895 - read_bio = bio_alloc_bioset(GFP_KERNEL, RESYNC_PAGES, &mddev->bio_set); 4896 - 4897 - bio_set_dev(read_bio, rdev->bdev); 4895 + read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ, 4896 + GFP_KERNEL, &mddev->bio_set); 4898 4897 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr 4899 4898 + rdev->data_offset); 4900 4899 read_bio->bi_private = r10_bio; 4901 4900 read_bio->bi_end_io = end_reshape_read; 4902 - bio_set_op_attrs(read_bio, REQ_OP_READ, 0); 4903 4901 r10_bio->master_bio = read_bio; 4904 4902 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; 4905 4903
+4 -4
drivers/md/raid5-cache.c
··· 735 735 736 736 static struct bio *r5l_bio_alloc(struct r5l_log *log) 737 737 { 738 - struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS, &log->bs); 738 + struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS, 739 + REQ_OP_WRITE, GFP_NOIO, &log->bs); 739 740 740 - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 741 - bio_set_dev(bio, log->rdev->bdev); 742 741 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; 743 742 744 743 return bio; ··· 1633 1634 { 1634 1635 struct page *page; 1635 1636 1636 - ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_VECS, &log->bs); 1637 + ctx->ra_bio = bio_alloc_bioset(NULL, BIO_MAX_VECS, 0, GFP_KERNEL, 1638 + &log->bs); 1637 1639 if (!ctx->ra_bio) 1638 1640 return -ENOMEM; 1639 1641
+5 -6
drivers/md/raid5-ppl.c
··· 496 496 if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) { 497 497 struct bio *prev = bio; 498 498 499 - bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS, 499 + bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS, 500 + prev->bi_opf, GFP_NOIO, 500 501 &ppl_conf->bs); 501 - bio->bi_opf = prev->bi_opf; 502 502 bio->bi_write_hint = prev->bi_write_hint; 503 - bio_copy_dev(bio, prev); 504 503 bio->bi_iter.bi_sector = bio_end_sector(prev); 505 504 bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); 506 505 ··· 636 637 struct bio *bio; 637 638 char b[BDEVNAME_SIZE]; 638 639 639 - bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs); 640 - bio_set_dev(bio, bdev); 640 + bio = bio_alloc_bioset(bdev, 0, GFP_NOIO, 641 + REQ_OP_WRITE | REQ_PREFLUSH, 642 + &ppl_conf->flush_bs); 641 643 bio->bi_private = io; 642 - bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 643 644 bio->bi_end_io = ppl_flush_endio; 644 645 645 646 pr_debug("%s: dev: %s\n", __func__,
+2 -4
drivers/target/target_core_iblock.c
··· 352 352 * Only allocate as many vector entries as the bio code allows us to, 353 353 * we'll loop later on until we have handled the whole request. 354 354 */ 355 - bio = bio_alloc_bioset(GFP_NOIO, bio_max_segs(sg_num), 356 - &ib_dev->ibd_bio_set); 355 + bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf, 356 + GFP_NOIO, &ib_dev->ibd_bio_set); 357 357 if (!bio) { 358 358 pr_err("Unable to allocate memory for bio\n"); 359 359 return NULL; 360 360 } 361 361 362 - bio_set_dev(bio, ib_dev->ibd_bd); 363 362 bio->bi_private = cmd; 364 363 bio->bi_end_io = &iblock_bio_done; 365 364 bio->bi_iter.bi_sector = lba; 366 - bio->bi_opf = opf; 367 365 368 366 return bio; 369 367 }
+1 -1
fs/btrfs/extent_io.c
··· 3143 3143 struct bio *bio; 3144 3144 3145 3145 ASSERT(0 < nr_iovecs && nr_iovecs <= BIO_MAX_VECS); 3146 - bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset); 3146 + bio = bio_alloc_bioset(NULL, nr_iovecs, 0, GFP_NOFS, &btrfs_bioset); 3147 3147 btrfs_bio_init(btrfs_bio(bio)); 3148 3148 return bio; 3149 3149 }
+3 -4
fs/f2fs/data.c
··· 394 394 struct f2fs_sb_info *sbi = fio->sbi; 395 395 struct bio *bio; 396 396 397 - bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset); 397 + bio = bio_alloc_bioset(NULL, npages, 0, GFP_NOIO, &f2fs_bioset); 398 398 399 399 f2fs_target_device(sbi, fio->new_blkaddr, bio); 400 400 if (is_read_io(fio->op)) { ··· 985 985 struct bio_post_read_ctx *ctx = NULL; 986 986 unsigned int post_read_steps = 0; 987 987 988 - bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL, 989 - bio_max_segs(nr_pages), &f2fs_bioset); 988 + bio = bio_alloc_bioset(NULL, bio_max_segs(nr_pages), REQ_OP_READ, 989 + for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset); 990 990 if (!bio) 991 991 return ERR_PTR(-ENOMEM); 992 992 ··· 994 994 995 995 f2fs_target_device(sbi, blkaddr, bio); 996 996 bio->bi_end_io = f2fs_read_end_io; 997 - bio_set_op_attrs(bio, REQ_OP_READ, op_flag); 998 997 999 998 if (fscrypt_inode_uses_fs_layer_crypto(inode)) 1000 999 post_read_steps |= STEP_DECRYPT;
+3 -3
fs/iomap/buffered-io.c
··· 1196 1196 struct iomap_ioend *ioend; 1197 1197 struct bio *bio; 1198 1198 1199 - bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset); 1200 - bio_set_dev(bio, wpc->iomap.bdev); 1199 + bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS, 1200 + REQ_OP_WRITE | wbc_to_write_flags(wbc), 1201 + GFP_NOFS, &iomap_ioend_bioset); 1201 1202 bio->bi_iter.bi_sector = sector; 1202 - bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); 1203 1203 bio->bi_write_hint = inode->i_write_hint; 1204 1204 wbc_init_bio(wbc, bio); 1205 1205
+4 -3
include/linux/bio.h
··· 405 405 extern int biovec_init_pool(mempool_t *pool, int pool_entries); 406 406 extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); 407 407 408 - struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs, 409 - struct bio_set *bs); 408 + struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, 409 + unsigned int opf, gfp_t gfp_mask, 410 + struct bio_set *bs); 410 411 struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs, 411 412 struct bio_set *bs); 412 413 struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs); ··· 420 419 421 420 static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs) 422 421 { 423 - return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set); 422 + return bio_alloc_bioset(NULL, nr_iovecs, 0, gfp_mask, &fs_bio_set); 424 423 } 425 424 426 425 void submit_bio(struct bio *bio);