Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: remove bio_get_nr_vecs()

We can always fill up the bio now, no need to estimate the possible
size based on queue parameters.

Acked-by: Steven Whitehouse <swhiteho@redhat.com>
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
[hch: rebased and wrote a changelog]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lin <ming.l@ssi.samsung.com>
Signed-off-by: Jens Axboe <axboe@fb.com>

authored by

Kent Overstreet and committed by
Jens Axboe
b54ffb73 6cf66b4c

+18 -74
-23
block/bio.c
··· 694 694 EXPORT_SYMBOL(bio_clone_bioset); 695 695 696 696 /** 697 - * bio_get_nr_vecs - return approx number of vecs 698 - * @bdev: I/O target 699 - * 700 - * Return the approximate number of pages we can send to this target. 701 - * There's no guarantee that you will be able to fit this number of pages 702 - * into a bio, it does not account for dynamic restrictions that vary 703 - * on offset. 704 - */ 705 - int bio_get_nr_vecs(struct block_device *bdev) 706 - { 707 - struct request_queue *q = bdev_get_queue(bdev); 708 - int nr_pages; 709 - 710 - nr_pages = min_t(unsigned, 711 - queue_max_segments(q), 712 - queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1); 713 - 714 - return min_t(unsigned, nr_pages, BIO_MAX_PAGES); 715 - 716 - } 717 - EXPORT_SYMBOL(bio_get_nr_vecs); 718 - 719 - /** 720 697 * bio_add_pc_page - attempt to add page to bio 721 698 * @q: the target queue 722 699 * @bio: destination bio
+1 -1
drivers/md/dm-io.c
··· 316 316 if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME)) 317 317 num_bvecs = 1; 318 318 else 319 - num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), 319 + num_bvecs = min_t(int, BIO_MAX_PAGES, 320 320 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); 321 321 322 322 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
+1 -4
fs/btrfs/compression.c
··· 97 97 static struct bio *compressed_bio_alloc(struct block_device *bdev, 98 98 u64 first_byte, gfp_t gfp_flags) 99 99 { 100 - int nr_vecs; 101 - 102 - nr_vecs = bio_get_nr_vecs(bdev); 103 - return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags); 100 + return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags); 104 101 } 105 102 106 103 static int check_compressed_csum(struct inode *inode,
+2 -7
fs/btrfs/extent_io.c
··· 2795 2795 { 2796 2796 int ret = 0; 2797 2797 struct bio *bio; 2798 - int nr; 2799 2798 int contig = 0; 2800 - int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED; 2801 2799 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED; 2802 2800 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE); 2803 2801 ··· 2820 2822 return 0; 2821 2823 } 2822 2824 } 2823 - if (this_compressed) 2824 - nr = BIO_MAX_PAGES; 2825 - else 2826 - nr = bio_get_nr_vecs(bdev); 2827 2825 2828 - bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); 2826 + bio = btrfs_bio_alloc(bdev, sector, BIO_MAX_PAGES, 2827 + GFP_NOFS | __GFP_HIGH); 2829 2828 if (!bio) 2830 2829 return -ENOMEM; 2831 2830
+1 -2
fs/btrfs/inode.c
··· 7959 7959 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, 7960 7960 u64 first_sector, gfp_t gfp_flags) 7961 7961 { 7962 - int nr_vecs = bio_get_nr_vecs(bdev); 7963 - return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags); 7962 + return btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags); 7964 7963 } 7965 7964 7966 7965 static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
+2 -16
fs/btrfs/scrub.c
··· 454 454 struct scrub_ctx *sctx; 455 455 int i; 456 456 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; 457 - int pages_per_rd_bio; 458 457 int ret; 459 458 460 - /* 461 - * the setting of pages_per_rd_bio is correct for scrub but might 462 - * be wrong for the dev_replace code where we might read from 463 - * different devices in the initial huge bios. However, that 464 - * code is able to correctly handle the case when adding a page 465 - * to a bio fails. 466 - */ 467 - if (dev->bdev) 468 - pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO, 469 - bio_get_nr_vecs(dev->bdev)); 470 - else 471 - pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO; 472 459 sctx = kzalloc(sizeof(*sctx), GFP_NOFS); 473 460 if (!sctx) 474 461 goto nomem; 475 462 atomic_set(&sctx->refs, 1); 476 463 sctx->is_dev_replace = is_dev_replace; 477 - sctx->pages_per_rd_bio = pages_per_rd_bio; 464 + sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO; 478 465 sctx->curr = -1; 479 466 sctx->dev_root = dev->dev_root; 480 467 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { ··· 3883 3896 return 0; 3884 3897 3885 3898 WARN_ON(!dev->bdev); 3886 - wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO, 3887 - bio_get_nr_vecs(dev->bdev)); 3899 + wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO; 3888 3900 wr_ctx->tgtdev = dev; 3889 3901 atomic_set(&wr_ctx->flush_all_writes, 0); 3890 3902 return 0;
+1 -1
fs/direct-io.c
··· 655 655 if (ret) 656 656 goto out; 657 657 sector = start_sector << (sdio->blkbits - 9); 658 - nr_pages = min(sdio->pages_in_io, bio_get_nr_vecs(map_bh->b_bdev)); 658 + nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES); 659 659 BUG_ON(nr_pages <= 0); 660 660 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); 661 661 sdio->boundary = 0;
+1 -2
fs/ext4/page-io.c
··· 372 372 static int io_submit_init_bio(struct ext4_io_submit *io, 373 373 struct buffer_head *bh) 374 374 { 375 - int nvecs = bio_get_nr_vecs(bh->b_bdev); 376 375 struct bio *bio; 377 376 378 - bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); 377 + bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); 379 378 if (!bio) 380 379 return -ENOMEM; 381 380 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+1 -1
fs/ext4/readpage.c
··· 284 284 goto set_error_page; 285 285 } 286 286 bio = bio_alloc(GFP_KERNEL, 287 - min_t(int, nr_pages, bio_get_nr_vecs(bdev))); 287 + min_t(int, nr_pages, BIO_MAX_PAGES)); 288 288 if (!bio) { 289 289 if (ctx) 290 290 ext4_release_crypto_ctx(ctx);
+1 -1
fs/f2fs/data.c
··· 1552 1552 } 1553 1553 1554 1554 bio = bio_alloc(GFP_KERNEL, 1555 - min_t(int, nr_pages, bio_get_nr_vecs(bdev))); 1555 + min_t(int, nr_pages, BIO_MAX_PAGES)); 1556 1556 if (!bio) { 1557 1557 if (ctx) 1558 1558 f2fs_release_crypto_ctx(ctx);
+1 -8
fs/gfs2/lops.c
··· 261 261 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno) 262 262 { 263 263 struct super_block *sb = sdp->sd_vfs; 264 - unsigned nrvecs = bio_get_nr_vecs(sb->s_bdev); 265 264 struct bio *bio; 266 265 267 266 BUG_ON(sdp->sd_log_bio); 268 267 269 - while (1) { 270 - bio = bio_alloc(GFP_NOIO, nrvecs); 271 - if (likely(bio)) 272 - break; 273 - nrvecs = max(nrvecs/2, 1U); 274 - } 275 - 268 + bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); 276 269 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9); 277 270 bio->bi_bdev = sb->s_bdev; 278 271 bio->bi_end_io = gfs2_end_log_write;
+2 -2
fs/logfs/dev_bdev.c
··· 81 81 unsigned int max_pages; 82 82 int i; 83 83 84 - max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev)); 84 + max_pages = min(nr_pages, BIO_MAX_PAGES); 85 85 86 86 bio = bio_alloc(GFP_NOFS, max_pages); 87 87 BUG_ON(!bio); ··· 171 171 unsigned int max_pages; 172 172 int i; 173 173 174 - max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev)); 174 + max_pages = min(nr_pages, BIO_MAX_PAGES); 175 175 176 176 bio = bio_alloc(GFP_NOFS, max_pages); 177 177 BUG_ON(!bio);
+2 -2
fs/mpage.c
··· 277 277 goto out; 278 278 } 279 279 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), 280 - min_t(int, nr_pages, bio_get_nr_vecs(bdev)), 280 + min_t(int, nr_pages, BIO_MAX_PAGES), 281 281 GFP_KERNEL); 282 282 if (bio == NULL) 283 283 goto confused; ··· 602 602 } 603 603 } 604 604 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), 605 - bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH); 605 + BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); 606 606 if (bio == NULL) 607 607 goto confused; 608 608
+1 -1
fs/nilfs2/segbuf.c
··· 414 414 { 415 415 wi->bio = NULL; 416 416 wi->rest_blocks = segbuf->sb_sum.nblocks; 417 - wi->max_pages = bio_get_nr_vecs(wi->nilfs->ns_bdev); 417 + wi->max_pages = BIO_MAX_PAGES; 418 418 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks); 419 419 wi->start = wi->end = 0; 420 420 wi->blocknr = segbuf->sb_pseg_start;
+1 -2
fs/xfs/xfs_aops.c
··· 381 381 xfs_alloc_ioend_bio( 382 382 struct buffer_head *bh) 383 383 { 384 - int nvecs = bio_get_nr_vecs(bh->b_bdev); 385 - struct bio *bio = bio_alloc(GFP_NOIO, nvecs); 384 + struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); 386 385 387 386 ASSERT(bio->bi_private == NULL); 388 387 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
-1
include/linux/bio.h
··· 460 460 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); 461 461 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 462 462 unsigned int, unsigned int); 463 - extern int bio_get_nr_vecs(struct block_device *); 464 463 struct rq_map_data; 465 464 extern struct bio *bio_map_user_iov(struct request_queue *, 466 465 const struct iov_iter *, gfp_t);