Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: pass a block_device and opf to bio_init

Pass the block_device that we plan to use this bio for and the
operation to bio_init to optimize the assignment. A NULL block_device
can be passed, both for the passthrough case on a raw request_queue and
to temporarily avoid refactoring some nasty code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220124091107.642561-19-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
49add496 07888c66

+68 -91
+13 -14
block/bio.c
··· 249 249 * they must remember to pair any call to bio_init() with bio_uninit() 250 250 * when IO has completed, or when the bio is released. 251 251 */ 252 - void bio_init(struct bio *bio, struct bio_vec *table, 253 - unsigned short max_vecs) 252 + void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, 253 + unsigned short max_vecs, unsigned int opf) 254 254 { 255 255 bio->bi_next = NULL; 256 - bio->bi_bdev = NULL; 257 - bio->bi_opf = 0; 256 + bio->bi_bdev = bdev; 257 + bio->bi_opf = opf; 258 258 bio->bi_flags = 0; 259 259 bio->bi_ioprio = 0; 260 260 bio->bi_write_hint = 0; ··· 268 268 #ifdef CONFIG_BLK_CGROUP 269 269 bio->bi_blkg = NULL; 270 270 bio->bi_issue.value = 0; 271 + if (bdev) 272 + bio_associate_blkg(bio); 271 273 #ifdef CONFIG_BLK_CGROUP_IOCOST 272 274 bio->bi_iocost_cost = 0; 273 275 #endif ··· 506 504 if (unlikely(!bvl)) 507 505 goto err_free; 508 506 509 - bio_init(bio, bvl, nr_vecs); 507 + bio_init(bio, bdev, bvl, nr_vecs, opf); 510 508 } else if (nr_vecs) { 511 - bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS); 509 + bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf); 512 510 } else { 513 - bio_init(bio, NULL, 0); 511 + bio_init(bio, bdev, NULL, 0, opf); 514 512 } 515 513 516 514 bio->bi_pool = bs; 517 - if (bdev) 518 - bio_set_dev(bio, bdev); 519 - bio->bi_opf = opf; 520 515 return bio; 521 516 522 517 err_free: ··· 541 542 bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask); 542 543 if (unlikely(!bio)) 543 544 return NULL; 544 - bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs); 545 + bio_init(bio, NULL, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs, 546 + 0); 545 547 bio->bi_pool = NULL; 546 548 return bio; 547 549 } ··· 1756 1756 cache->free_list = bio->bi_next; 1757 1757 cache->nr--; 1758 1758 put_cpu(); 1759 - bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs); 1760 - bio_set_dev(bio, bdev); 1761 - bio->bi_opf = opf; 1759 + bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, 1760 + nr_vecs, opf); 1762 1761 bio->bi_pool = bs; 1763 1762 bio_set_flag(bio, BIO_PERCPU_CACHE); 1764 1763 return bio;
+1 -3
block/blk-flush.c
··· 460 460 { 461 461 struct bio bio; 462 462 463 - bio_init(&bio, NULL, 0); 464 - bio_set_dev(&bio, bdev); 465 - bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 463 + bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH); 466 464 return submit_bio_wait(&bio); 467 465 } 468 466 EXPORT_SYMBOL(blkdev_issue_flush);
+1 -4
block/blk-zoned.c
··· 238 238 { 239 239 struct bio bio; 240 240 241 - bio_init(&bio, NULL, 0); 242 - bio_set_dev(&bio, bdev); 243 - bio.bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC; 244 - 241 + bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC); 245 242 return submit_bio_wait(&bio); 246 243 } 247 244
+9 -9
block/fops.c
··· 75 75 return -ENOMEM; 76 76 } 77 77 78 - bio_init(&bio, vecs, nr_pages); 79 - bio_set_dev(&bio, bdev); 78 + if (iov_iter_rw(iter) == READ) { 79 + bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ); 80 + if (iter_is_iovec(iter)) 81 + should_dirty = true; 82 + } else { 83 + bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb)); 84 + } 80 85 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; 81 86 bio.bi_write_hint = iocb->ki_hint; 82 87 bio.bi_private = current; ··· 93 88 goto out; 94 89 ret = bio.bi_iter.bi_size; 95 90 96 - if (iov_iter_rw(iter) == READ) { 97 - bio.bi_opf = REQ_OP_READ; 98 - if (iter_is_iovec(iter)) 99 - should_dirty = true; 100 - } else { 101 - bio.bi_opf = dio_bio_write_op(iocb); 91 + if (iov_iter_rw(iter) == WRITE) 102 92 task_io_account_write(ret); 103 - } 93 + 104 94 if (iocb->ki_flags & IOCB_NOWAIT) 105 95 bio.bi_opf |= REQ_NOWAIT; 106 96 if (iocb->ki_flags & IOCB_HIPRI)
+1 -3
drivers/block/floppy.c
··· 4129 4129 4130 4130 cbdata.drive = drive; 4131 4131 4132 - bio_init(&bio, &bio_vec, 1); 4133 - bio_set_dev(&bio, bdev); 4132 + bio_init(&bio, bdev, &bio_vec, 1, REQ_OP_READ); 4134 4133 bio_add_page(&bio, page, block_size(bdev), 0); 4135 4134 4136 4135 bio.bi_iter.bi_sector = 0; 4137 4136 bio.bi_flags |= (1 << BIO_QUIET); 4138 4137 bio.bi_private = &cbdata; 4139 4138 bio.bi_end_io = floppy_rb0_cb; 4140 - bio_set_op_attrs(&bio, REQ_OP_READ, 0); 4141 4139 4142 4140 init_completion(&cbdata.complete); 4143 4141
+2 -3
drivers/block/zram/zram_drv.c
··· 743 743 continue; 744 744 } 745 745 746 - bio_init(&bio, &bio_vec, 1); 747 - bio_set_dev(&bio, zram->bdev); 746 + bio_init(&bio, zram->bdev, &bio_vec, 1, 747 + REQ_OP_WRITE | REQ_SYNC); 748 748 bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9); 749 - bio.bi_opf = REQ_OP_WRITE | REQ_SYNC; 750 749 751 750 bio_add_page(&bio, bvec.bv_page, bvec.bv_len, 752 751 bvec.bv_offset);
+2 -1
drivers/md/bcache/io.c
··· 26 26 struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO); 27 27 struct bio *bio = &b->bio; 28 28 29 - bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb)); 29 + bio_init(bio, NULL, bio->bi_inline_vecs, 30 + meta_bucket_pages(&c->cache->sb), 0); 30 31 31 32 return bio; 32 33 }
+1 -3
drivers/md/bcache/journal.c
··· 611 611 612 612 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); 613 613 614 - bio_init(bio, bio->bi_inline_vecs, 1); 615 - bio_set_op_attrs(bio, REQ_OP_DISCARD, 0); 614 + bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1, REQ_OP_DISCARD); 616 615 bio->bi_iter.bi_sector = bucket_to_sector(ca->set, 617 616 ca->sb.d[ja->discard_idx]); 618 - bio_set_dev(bio, ca->bdev); 619 617 bio->bi_iter.bi_size = bucket_bytes(ca); 620 618 bio->bi_end_io = journal_discard_endio; 621 619
+2 -2
drivers/md/bcache/movinggc.c
··· 79 79 { 80 80 struct bio *bio = &io->bio.bio; 81 81 82 - bio_init(bio, bio->bi_inline_vecs, 83 - DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS)); 82 + bio_init(bio, NULL, bio->bi_inline_vecs, 83 + DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0); 84 84 bio_get(bio); 85 85 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 86 86
+1 -1
drivers/md/bcache/request.c
··· 685 685 { 686 686 struct bio *bio = &s->bio.bio; 687 687 688 - bio_init(bio, NULL, 0); 688 + bio_init(bio, NULL, NULL, 0, 0); 689 689 __bio_clone_fast(bio, orig_bio); 690 690 /* 691 691 * bi_end_io can be set separately somewhere else, e.g. the
+3 -5
drivers/md/bcache/super.c
··· 342 342 down(&dc->sb_write_mutex); 343 343 closure_init(cl, parent); 344 344 345 - bio_init(bio, dc->sb_bv, 1); 346 - bio_set_dev(bio, dc->bdev); 345 + bio_init(bio, dc->bdev, dc->sb_bv, 1, 0); 347 346 bio->bi_end_io = write_bdev_super_endio; 348 347 bio->bi_private = dc; 349 348 ··· 385 386 if (ca->sb.version < version) 386 387 ca->sb.version = version; 387 388 388 - bio_init(bio, ca->sb_bv, 1); 389 - bio_set_dev(bio, ca->bdev); 389 + bio_init(bio, ca->bdev, ca->sb_bv, 1, 0); 390 390 bio->bi_end_io = write_super_endio; 391 391 bio->bi_private = ca; 392 392 ··· 2237 2239 __module_get(THIS_MODULE); 2238 2240 kobject_init(&ca->kobj, &bch_cache_ktype); 2239 2241 2240 - bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); 2242 + bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0); 2241 2243 2242 2244 /* 2243 2245 * when ca->sb.njournal_buckets is not zero, journal exists,
+2 -2
drivers/md/bcache/writeback.c
··· 292 292 struct dirty_io *io = w->private; 293 293 struct bio *bio = &io->bio; 294 294 295 - bio_init(bio, bio->bi_inline_vecs, 296 - DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)); 295 + bio_init(bio, NULL, bio->bi_inline_vecs, 296 + DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0); 297 297 if (!io->dc->writeback_percent) 298 298 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 299 299
+2 -3
drivers/md/dm.c
··· 1303 1303 * need to reference it after submit. It's just used as 1304 1304 * the basis for the clone(s). 1305 1305 */ 1306 - bio_init(&flush_bio, NULL, 0); 1307 - flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1308 - bio_set_dev(&flush_bio, ci->io->md->disk->part0); 1306 + bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, 1307 + REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); 1309 1308 1310 1309 ci->bio = &flush_bio; 1311 1310 ci->sector_count = 0;
+1 -1
drivers/md/md-multipath.c
··· 121 121 } 122 122 multipath = conf->multipaths + mp_bh->path; 123 123 124 - bio_init(&mp_bh->bio, NULL, 0); 124 + bio_init(&mp_bh->bio, NULL, NULL, 0, 0); 125 125 __bio_clone_fast(&mp_bh->bio, bio); 126 126 127 127 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
+3 -5
drivers/md/md.c
··· 998 998 struct bio bio; 999 999 struct bio_vec bvec; 1000 1000 1001 - bio_init(&bio, &bvec, 1); 1002 - 1003 1001 if (metadata_op && rdev->meta_bdev) 1004 - bio_set_dev(&bio, rdev->meta_bdev); 1002 + bio_init(&bio, rdev->meta_bdev, &bvec, 1, op | op_flags); 1005 1003 else 1006 - bio_set_dev(&bio, rdev->bdev); 1007 - bio.bi_opf = op | op_flags; 1004 + bio_init(&bio, rdev->bdev, &bvec, 1, op | op_flags); 1005 + 1008 1006 if (metadata_op) 1009 1007 bio.bi_iter.bi_sector = sector + rdev->sb_start; 1010 1008 else if (rdev->mddev->reshape_position != MaxSector &&
+1 -1
drivers/md/raid5-cache.c
··· 3108 3108 INIT_LIST_HEAD(&log->io_end_ios); 3109 3109 INIT_LIST_HEAD(&log->flushing_ios); 3110 3110 INIT_LIST_HEAD(&log->finished_ios); 3111 - bio_init(&log->flush_bio, NULL, 0); 3111 + bio_init(&log->flush_bio, NULL, NULL, 0, 0); 3112 3112 3113 3113 log->io_kc = KMEM_CACHE(r5l_io_unit, 0); 3114 3114 if (!log->io_kc)
+1 -1
drivers/md/raid5-ppl.c
··· 250 250 INIT_LIST_HEAD(&io->stripe_list); 251 251 atomic_set(&io->pending_stripes, 0); 252 252 atomic_set(&io->pending_flushes, 0); 253 - bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS); 253 + bio_init(&io->bio, NULL, io->biovec, PPL_IO_INLINE_BVECS, 0); 254 254 255 255 pplhdr = page_address(io->header_page); 256 256 clear_page(pplhdr);
+2 -2
drivers/md/raid5.c
··· 2310 2310 for (i = 0; i < disks; i++) { 2311 2311 struct r5dev *dev = &sh->dev[i]; 2312 2312 2313 - bio_init(&dev->req, &dev->vec, 1); 2314 - bio_init(&dev->rreq, &dev->rvec, 1); 2313 + bio_init(&dev->req, NULL, &dev->vec, 1, 0); 2314 + bio_init(&dev->rreq, NULL, &dev->rvec, 1, 0); 2315 2315 } 2316 2316 2317 2317 if (raid5_has_ppl(conf)) {
+4 -6
drivers/nvme/target/io-cmd-bdev.c
··· 267 267 268 268 if (nvmet_use_inline_bvec(req)) { 269 269 bio = &req->b.inline_bio; 270 - bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); 271 - bio_set_dev(bio, req->ns->bdev); 272 - bio->bi_opf = op; 270 + bio_init(bio, req->ns->bdev, req->inline_bvec, 271 + ARRAY_SIZE(req->inline_bvec), op); 273 272 } else { 274 273 bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), op, 275 274 GFP_KERNEL); ··· 327 328 if (!nvmet_check_transfer_len(req, 0)) 328 329 return; 329 330 330 - bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); 331 - bio_set_dev(bio, req->ns->bdev); 331 + bio_init(bio, req->ns->bdev, req->inline_bvec, 332 + ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH); 332 333 bio->bi_private = req; 333 334 bio->bi_end_io = nvmet_bio_done; 334 - bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 335 335 336 336 submit_bio(bio); 337 337 }
+2 -2
drivers/nvme/target/passthru.c
··· 206 206 207 207 if (nvmet_use_inline_bvec(req)) { 208 208 bio = &req->p.inline_bio; 209 - bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); 210 - bio->bi_opf = req_op(rq); 209 + bio_init(bio, NULL, req->inline_bvec, 210 + ARRAY_SIZE(req->inline_bvec), req_op(rq)); 211 211 } else { 212 212 bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq), 213 213 GFP_KERNEL);
+2 -2
drivers/nvme/target/zns.c
··· 552 552 553 553 if (nvmet_use_inline_bvec(req)) { 554 554 bio = &req->z.inline_bio; 555 - bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); 556 - bio->bi_opf = op; 555 + bio_init(bio, req->ns->bdev, req->inline_bvec, 556 + ARRAY_SIZE(req->inline_bvec), op); 557 557 } else { 558 558 bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL); 559 559 }
+1 -3
fs/iomap/buffered-io.c
··· 549 549 struct bio_vec bvec; 550 550 struct bio bio; 551 551 552 - bio_init(&bio, &bvec, 1); 553 - bio.bi_opf = REQ_OP_READ; 552 + bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ); 554 553 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); 555 - bio_set_dev(&bio, iomap->bdev); 556 554 bio_add_folio(&bio, folio, plen, poff); 557 555 return submit_bio_wait(&bio); 558 556 }
+1 -3
fs/xfs/xfs_bio_io.c
··· 36 36 return; 37 37 } 38 38 39 - bio_init(bio, NULL, 0); 40 - bio_set_dev(bio, bdev); 41 - bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 39 + bio_init(bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); 42 40 bio->bi_private = done; 43 41 bio->bi_end_io = xfs_flush_bdev_async_endio; 44 42
+7 -7
fs/xfs/xfs_log.c
··· 1883 1883 return; 1884 1884 } 1885 1885 1886 - bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE)); 1887 - bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev); 1888 - iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; 1889 - iclog->ic_bio.bi_end_io = xlog_bio_end_io; 1890 - iclog->ic_bio.bi_private = iclog; 1891 - 1892 1886 /* 1893 1887 * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more 1894 1888 * IOs coming immediately after this one. This prevents the block layer 1895 1889 * writeback throttle from throttling log writes behind background 1896 1890 * metadata writeback and causing priority inversions. 1897 1891 */ 1898 - iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE; 1892 + bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec, 1893 + howmany(count, PAGE_SIZE), 1894 + REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE); 1895 + iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; 1896 + iclog->ic_bio.bi_end_io = xlog_bio_end_io; 1897 + iclog->ic_bio.bi_private = iclog; 1898 + 1899 1899 if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) { 1900 1900 iclog->ic_bio.bi_opf |= REQ_PREFLUSH; 1901 1901 /*
+1 -3
fs/zonefs/super.c
··· 1540 1540 if (!page) 1541 1541 return -ENOMEM; 1542 1542 1543 - bio_init(&bio, &bio_vec, 1); 1543 + bio_init(&bio, sb->s_bdev, &bio_vec, 1, REQ_OP_READ); 1544 1544 bio.bi_iter.bi_sector = 0; 1545 - bio.bi_opf = REQ_OP_READ; 1546 - bio_set_dev(&bio, sb->s_bdev); 1547 1545 bio_add_page(&bio, page, PAGE_SIZE, 0); 1548 1546 1549 1547 ret = submit_bio_wait(&bio);
+2 -2
include/linux/bio.h
··· 456 456 struct request_queue; 457 457 458 458 extern int submit_bio_wait(struct bio *bio); 459 - extern void bio_init(struct bio *bio, struct bio_vec *table, 460 - unsigned short max_vecs); 459 + void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, 460 + unsigned short max_vecs, unsigned int opf); 461 461 extern void bio_uninit(struct bio *); 462 462 extern void bio_reset(struct bio *); 463 463 void bio_chain(struct bio *, struct bio *);