Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: submit_bio_wait() conversions

It was being open coded in a few places.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Joern Engel <joern@logfs.org>
Cc: Prasad Joshi <prasadjoshi.linux@gmail.com>
Cc: Neil Brown <neilb@suse.de>
Cc: Chris Mason <chris.mason@fusionio.com>
Acked-by: NeilBrown <neilb@suse.de>

+24 -118
+1 -18
block/blk-flush.c
··· 502 502 } 503 503 } 504 504 505 - static void bio_end_flush(struct bio *bio, int err) 506 - { 507 - if (err) 508 - clear_bit(BIO_UPTODATE, &bio->bi_flags); 509 - if (bio->bi_private) 510 - complete(bio->bi_private); 511 - bio_put(bio); 512 - } 513 - 514 505 /** 515 506 * blkdev_issue_flush - queue a flush 516 507 * @bdev: blockdev to issue flush for ··· 517 526 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 518 527 sector_t *error_sector) 519 528 { 520 - DECLARE_COMPLETION_ONSTACK(wait); 521 529 struct request_queue *q; 522 530 struct bio *bio; 523 531 int ret = 0; ··· 538 548 return -ENXIO; 539 549 540 550 bio = bio_alloc(gfp_mask, 0); 541 - bio->bi_end_io = bio_end_flush; 542 551 bio->bi_bdev = bdev; 543 - bio->bi_private = &wait; 544 552 545 - bio_get(bio); 546 - submit_bio(WRITE_FLUSH, bio); 547 - wait_for_completion_io(&wait); 553 + ret = submit_bio_wait(WRITE_FLUSH, bio); 548 554 549 555 /* 550 556 * The driver must store the error location in ->bi_sector, if ··· 549 563 */ 550 564 if (error_sector) 551 565 *error_sector = bio->bi_sector; 552 - 553 - if (!bio_flagged(bio, BIO_UPTODATE)) 554 - ret = -EIO; 555 566 556 567 bio_put(bio); 557 568 return ret;
+1 -13
drivers/md/md.c
··· 776 776 finish_wait(&mddev->sb_wait, &wq); 777 777 } 778 778 779 - static void bi_complete(struct bio *bio, int error) 780 - { 781 - complete((struct completion*)bio->bi_private); 782 - } 783 - 784 779 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 785 780 struct page *page, int rw, bool metadata_op) 786 781 { 787 782 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); 788 - struct completion event; 789 783 int ret; 790 - 791 - rw |= REQ_SYNC; 792 784 793 785 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 794 786 rdev->meta_bdev : rdev->bdev; ··· 793 801 else 794 802 bio->bi_sector = sector + rdev->data_offset; 795 803 bio_add_page(bio, page, size, 0); 796 - init_completion(&event); 797 - bio->bi_private = &event; 798 - bio->bi_end_io = bi_complete; 799 - submit_bio(rw, bio); 800 - wait_for_completion(&event); 804 + submit_bio_wait(rw, bio); 801 805 802 806 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 803 807 bio_put(bio);
+13 -19
fs/btrfs/check-integrity.c
··· 333 333 static int btrfsic_read_block(struct btrfsic_state *state, 334 334 struct btrfsic_block_data_ctx *block_ctx); 335 335 static void btrfsic_dump_database(struct btrfsic_state *state); 336 - static void btrfsic_complete_bio_end_io(struct bio *bio, int err); 337 336 static int btrfsic_test_for_metadata(struct btrfsic_state *state, 338 337 char **datav, unsigned int num_pages); 339 338 static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, ··· 1686 1687 for (i = 0; i < num_pages;) { 1687 1688 struct bio *bio; 1688 1689 unsigned int j; 1689 - DECLARE_COMPLETION_ONSTACK(complete); 1690 1690 1691 1691 bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i); 1692 1692 if (!bio) { ··· 1696 1698 } 1697 1699 bio->bi_bdev = block_ctx->dev->bdev; 1698 1700 bio->bi_sector = dev_bytenr >> 9; 1699 - bio->bi_end_io = btrfsic_complete_bio_end_io; 1700 - bio->bi_private = &complete; 1701 1701 1702 1702 for (j = i; j < num_pages; j++) { 1703 1703 ret = bio_add_page(bio, block_ctx->pagev[j], ··· 1708 1712 "btrfsic: error, failed to add a single page!\n"); 1709 1713 return -1; 1710 1714 } 1711 - submit_bio(READ, bio); 1712 - 1713 - /* this will also unplug the queue */ 1714 - wait_for_completion(&complete); 1715 - 1716 - if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { 1715 + if (submit_bio_wait(READ, bio)) { 1717 1716 printk(KERN_INFO 1718 1717 "btrfsic: read error at logical %llu dev %s!\n", 1719 1718 block_ctx->start, block_ctx->dev->name); ··· 1729 1738 } 1730 1739 1731 1740 return block_ctx->len; 1732 - } 1733 - 1734 - static void btrfsic_complete_bio_end_io(struct bio *bio, int err) 1735 - { 1736 - complete((struct completion *)bio->bi_private); 1737 1741 } 1738 1742 1739 1743 static void btrfsic_dump_database(struct btrfsic_state *state) ··· 2994 3008 return submit_bh(rw, bh); 2995 3009 } 2996 3010 2997 - void btrfsic_submit_bio(int rw, struct bio *bio) 3011 + static void __btrfsic_submit_bio(int rw, struct bio *bio) 2998 3012 { 2999 3013 struct btrfsic_dev_state *dev_state; 3000 3014 3001 - if (!btrfsic_is_initialized) { 3002 - submit_bio(rw, bio); 3015 + if (!btrfsic_is_initialized) 3003 3016 return; 3004 - } 3005 3017 3006 3018 mutex_lock(&btrfsic_mutex); 3007 3019 /* since btrfsic_submit_bio() is also called before ··· 3090 3106 } 3091 3107 leave: 3092 3108 mutex_unlock(&btrfsic_mutex); 3109 + } 3093 3110 3111 + void btrfsic_submit_bio(int rw, struct bio *bio) 3112 + { 3113 + __btrfsic_submit_bio(rw, bio); 3094 3114 submit_bio(rw, bio); 3115 + } 3116 + 3117 + int btrfsic_submit_bio_wait(int rw, struct bio *bio) 3118 + { 3119 + __btrfsic_submit_bio(rw, bio); 3120 + return submit_bio_wait(rw, bio); 3095 3121 } 3096 3122 3097 3123 int btrfsic_mount(struct btrfs_root *root,
+2
fs/btrfs/check-integrity.h
··· 22 22 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 23 23 int btrfsic_submit_bh(int rw, struct buffer_head *bh); 24 24 void btrfsic_submit_bio(int rw, struct bio *bio); 25 + int btrfsic_submit_bio_wait(int rw, struct bio *bio); 25 26 #else 26 27 #define btrfsic_submit_bh submit_bh 27 28 #define btrfsic_submit_bio submit_bio 29 + #define btrfsic_submit_bio_wait submit_bio_wait 28 30 #endif 29 31 30 32 int btrfsic_mount(struct btrfs_root *root,
+1 -11
fs/btrfs/extent_io.c
··· 1952 1952 return err; 1953 1953 } 1954 1954 1955 - static void repair_io_failure_callback(struct bio *bio, int err) 1956 - { 1957 - complete(bio->bi_private); 1958 - } 1959 - 1960 1955 /* 1961 1956 * this bypasses the standard btrfs submit functions deliberately, as 1962 1957 * the standard behavior is to write all copies in a raid setup. here we only ··· 1968 1973 { 1969 1974 struct bio *bio; 1970 1975 struct btrfs_device *dev; 1971 - DECLARE_COMPLETION_ONSTACK(compl); 1972 1976 u64 map_length = 0; 1973 1977 u64 sector; 1974 1978 struct btrfs_bio *bbio = NULL; ··· 1984 1990 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); 1985 1991 if (!bio) 1986 1992 return -EIO; 1987 - bio->bi_private = &compl; 1988 - bio->bi_end_io = repair_io_failure_callback; 1989 1993 bio->bi_size = 0; 1990 1994 map_length = length; 1991 1995 ··· 2004 2012 } 2005 2013 bio->bi_bdev = dev->bdev; 2006 2014 bio_add_page(bio, page, length, start - page_offset(page)); 2007 - btrfsic_submit_bio(WRITE_SYNC, bio); 2008 - wait_for_completion(&compl); 2009 2015 2010 - if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { 2016 + if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) { 2011 2017 /* try to remap that extent elsewhere? */ 2012 2018 bio_put(bio); 2013 2019 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
+4 -29
fs/btrfs/scrub.c
··· 208 208 int is_metadata, int have_csum, 209 209 const u8 *csum, u64 generation, 210 210 u16 csum_size); 211 - static void scrub_complete_bio_end_io(struct bio *bio, int err); 212 211 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 213 212 struct scrub_block *sblock_good, 214 213 int force_write); ··· 1293 1294 for (page_num = 0; page_num < sblock->page_count; page_num++) { 1294 1295 struct bio *bio; 1295 1296 struct scrub_page *page = sblock->pagev[page_num]; 1296 - DECLARE_COMPLETION_ONSTACK(complete); 1297 1297 1298 1298 if (page->dev->bdev == NULL) { 1299 1299 page->io_error = 1; ··· 1309 1311 } 1310 1312 bio->bi_bdev = page->dev->bdev; 1311 1313 bio->bi_sector = page->physical >> 9; 1312 - bio->bi_end_io = scrub_complete_bio_end_io; 1313 - bio->bi_private = &complete; 1314 1314 1315 1315 bio_add_page(bio, page->page, PAGE_SIZE, 0); 1316 - btrfsic_submit_bio(READ, bio); 1317 - 1318 - /* this will also unplug the queue */ 1319 - wait_for_completion(&complete); 1320 - 1321 - page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags); 1322 - if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 1316 + if (btrfsic_submit_bio_wait(READ, bio)) 1323 1317 sblock->no_io_error_seen = 0; 1318 + 1324 1319 bio_put(bio); 1325 1320 } 1326 1321 ··· 1382 1391 sblock->checksum_error = 1; 1383 1392 } 1384 1393 1385 - static void scrub_complete_bio_end_io(struct bio *bio, int err) 1386 - { 1387 - complete((struct completion *)bio->bi_private); 1388 - } 1389 - 1390 1394 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 1391 1395 struct scrub_block *sblock_good, 1392 1396 int force_write) ··· 1416 1430 sblock_bad->checksum_error || page_bad->io_error) { 1417 1431 struct bio *bio; 1418 1432 int ret; 1419 - DECLARE_COMPLETION_ONSTACK(complete); 1420 1433 1421 1434 if (!page_bad->dev->bdev) { 1422 1435 printk_ratelimited(KERN_WARNING ··· 1428 1443 return -EIO; 1429 1444 bio->bi_bdev = page_bad->dev->bdev; 1430 1445 bio->bi_sector = page_bad->physical >> 9; 1431 - bio->bi_end_io = scrub_complete_bio_end_io; 1432 - bio->bi_private = &complete; 1433 1446 1434 1447 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); 1435 1448 if (PAGE_SIZE != ret) { 1436 1449 bio_put(bio); 1437 1450 return -EIO; 1438 1451 } 1439 - btrfsic_submit_bio(WRITE, bio); 1440 1452 1441 - /* this will also unplug the queue */ 1442 - wait_for_completion(&complete); 1443 - if (!bio_flagged(bio, BIO_UPTODATE)) { 1453 + if (btrfsic_submit_bio_wait(WRITE, bio)) { 1444 1454 btrfs_dev_stat_inc_and_print(page_bad->dev, 1445 1455 BTRFS_DEV_STAT_WRITE_ERRS); 1446 1456 btrfs_dev_replace_stats_inc( ··· 3355 3375 struct bio *bio; 3356 3376 struct btrfs_device *dev; 3357 3377 int ret; 3358 - DECLARE_COMPLETION_ONSTACK(compl); 3359 3378 3360 3379 dev = sctx->wr_ctx.tgtdev; 3361 3380 if (!dev) ··· 3371 3392 spin_unlock(&sctx->stat_lock); 3372 3393 return -ENOMEM; 3373 3394 } 3374 - bio->bi_private = &compl; 3375 - bio->bi_end_io = scrub_complete_bio_end_io; 3376 3395 bio->bi_size = 0; 3377 3396 bio->bi_sector = physical_for_dev_replace >> 9; 3378 3397 bio->bi_bdev = dev->bdev; ··· 3381 3404 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 3382 3405 return -EIO; 3383 3406 } 3384 - btrfsic_submit_bio(WRITE_SYNC, bio); 3385 - wait_for_completion(&compl); 3386 3407 3387 - if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 3408 + if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) 3388 3409 goto leave_with_eio; 3389 3410 3390 3411 bio_put(bio);
+1 -16
fs/hfsplus/wrapper.c
··· 24 24 u16 embed_count; 25 25 }; 26 26 27 - static void hfsplus_end_io_sync(struct bio *bio, int err) 28 - { 29 - if (err) 30 - clear_bit(BIO_UPTODATE, &bio->bi_flags); 31 - complete(bio->bi_private); 32 - } 33 - 34 27 /* 35 28 * hfsplus_submit_bio - Perfrom block I/O 36 29 * @sb: super block of volume for I/O ··· 46 53 int hfsplus_submit_bio(struct super_block *sb, sector_t sector, 47 54 void *buf, void **data, int rw) 48 55 { 49 - DECLARE_COMPLETION_ONSTACK(wait); 50 56 struct bio *bio; 51 57 int ret = 0; 52 58 u64 io_size; ··· 65 73 bio = bio_alloc(GFP_NOIO, 1); 66 74 bio->bi_sector = sector; 67 75 bio->bi_bdev = sb->s_bdev; 68 - bio->bi_end_io = hfsplus_end_io_sync; 69 - bio->bi_private = &wait; 70 76 71 77 if (!(rw & WRITE) && data) 72 78 *data = (u8 *)buf + offset; ··· 83 93 buf = (u8 *)buf + len; 84 94 } 85 95 86 - submit_bio(rw, bio); 87 - wait_for_completion(&wait); 88 - 89 - if (!bio_flagged(bio, BIO_UPTODATE)) 90 - ret = -EIO; 91 - 96 + ret = submit_bio_wait(rw, bio); 92 97 out: 93 98 bio_put(bio); 94 99 return ret < 0 ? ret : 0;
+1 -12
fs/logfs/dev_bdev.c
··· 14 14 15 15 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1)) 16 16 17 - static void request_complete(struct bio *bio, int err) 18 - { 19 - complete((struct completion *)bio->bi_private); 20 - } 21 - 22 17 static int sync_request(struct page *page, struct block_device *bdev, int rw) 23 18 { 24 19 struct bio bio; 25 20 struct bio_vec bio_vec; 26 - struct completion complete; 27 21 28 22 bio_init(&bio); 29 23 bio.bi_max_vecs = 1; ··· 29 35 bio.bi_size = PAGE_SIZE; 30 36 bio.bi_bdev = bdev; 31 37 bio.bi_sector = page->index * (PAGE_SIZE >> 9); 32 - init_completion(&complete); 33 - bio.bi_private = &complete; 34 - bio.bi_end_io = request_complete; 35 38 36 - submit_bio(rw, &bio); 37 - wait_for_completion(&complete); 38 - return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO; 39 + return submit_bio_wait(rw, &bio); 39 40 } 40 41 41 42 static int bdev_readpage(void *_sb, struct page *page)