block: Add bio_max_segs

It's often inconvenient to use BIO_MAX_PAGES due to min() requiring the
sign to be the same. Introduce bio_max_segs() and change BIO_MAX_PAGES to
be unsigned to make it easier for the users.

Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by Matthew Wilcox (Oracle) and committed by Jens Axboe 5f7136db 94d4bffd

+44 -52
+1 -3
block/blk-map.c
··· 150 150 bmd->is_our_pages = !map_data; 151 151 bmd->is_null_mapped = (map_data && map_data->null_mapped); 152 152 153 - nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE); 154 - if (nr_pages > BIO_MAX_PAGES) 155 - nr_pages = BIO_MAX_PAGES; 153 + nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE)); 156 154 157 155 ret = -ENOMEM; 158 156 bio = bio_kmalloc(gfp_mask, nr_pages);
+1 -3
drivers/block/xen-blkback/blkback.c
··· 1326 1326 pages[i]->page, 1327 1327 seg[i].nsec << 9, 1328 1328 seg[i].offset) == 0)) { 1329 - 1330 - int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES); 1331 - bio = bio_alloc(GFP_KERNEL, nr_iovecs); 1329 + bio = bio_alloc(GFP_KERNEL, bio_max_segs(nseg - i)); 1332 1330 if (unlikely(bio == NULL)) 1333 1331 goto fail_put_bio; 1334 1332
+2 -2
drivers/md/dm-io.c
··· 341 341 num_bvecs = 1; 342 342 break; 343 343 default: 344 - num_bvecs = min_t(int, BIO_MAX_PAGES, 345 - dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); 344 + num_bvecs = bio_max_segs(dm_sector_div_up(remaining, 345 + (PAGE_SIZE >> SECTOR_SHIFT))); 346 346 } 347 347 348 348 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios);
+5 -5
drivers/md/dm-log-writes.c
··· 264 264 size_t entrylen, void *data, size_t datalen, 265 265 sector_t sector) 266 266 { 267 - int num_pages, bio_pages, pg_datalen, pg_sectorlen, i; 267 + int bio_pages, pg_datalen, pg_sectorlen, i; 268 268 struct page *page; 269 269 struct bio *bio; 270 270 size_t ret; 271 271 void *ptr; 272 272 273 273 while (datalen) { 274 - num_pages = ALIGN(datalen, PAGE_SIZE) >> PAGE_SHIFT; 275 - bio_pages = min(num_pages, BIO_MAX_PAGES); 274 + bio_pages = bio_max_segs(DIV_ROUND_UP(datalen, PAGE_SIZE)); 276 275 277 276 atomic_inc(&lc->io_blocks); 278 277 ··· 363 364 goto out; 364 365 365 366 atomic_inc(&lc->io_blocks); 366 - bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES)); 367 + bio = bio_alloc(GFP_KERNEL, bio_max_segs(block->vec_cnt)); 367 368 if (!bio) { 368 369 DMERR("Couldn't alloc log bio"); 369 370 goto error; ··· 385 386 if (ret != block->vecs[i].bv_len) { 386 387 atomic_inc(&lc->io_blocks); 387 388 submit_bio(bio); 388 - bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES)); 389 + bio = bio_alloc(GFP_KERNEL, 390 + bio_max_segs(block->vec_cnt - i)); 389 391 if (!bio) { 390 392 DMERR("Couldn't alloc log bio"); 391 393 goto error;
+4 -4
drivers/nvme/target/io-cmd-bdev.c
··· 185 185 } 186 186 187 187 bip = bio_integrity_alloc(bio, GFP_NOIO, 188 - min_t(unsigned int, req->metadata_sg_cnt, BIO_MAX_PAGES)); 188 + bio_max_segs(req->metadata_sg_cnt)); 189 189 if (IS_ERR(bip)) { 190 190 pr_err("Unable to allocate bio_integrity_payload\n"); 191 191 return PTR_ERR(bip); ··· 225 225 226 226 static void nvmet_bdev_execute_rw(struct nvmet_req *req) 227 227 { 228 - int sg_cnt = req->sg_cnt; 228 + unsigned int sg_cnt = req->sg_cnt; 229 229 struct bio *bio; 230 230 struct scatterlist *sg; 231 231 struct blk_plug plug; ··· 262 262 bio = &req->b.inline_bio; 263 263 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); 264 264 } else { 265 - bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); 265 + bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt)); 266 266 } 267 267 bio_set_dev(bio, req->ns->bdev); 268 268 bio->bi_iter.bi_sector = sector; ··· 289 289 } 290 290 } 291 291 292 - bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); 292 + bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt)); 293 293 bio_set_dev(bio, req->ns->bdev); 294 294 bio->bi_iter.bi_sector = sector; 295 295 bio->bi_opf = op;
+2 -2
drivers/nvme/target/passthru.c
··· 26 26 struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl; 27 27 u16 status = NVME_SC_SUCCESS; 28 28 struct nvme_id_ctrl *id; 29 - int max_hw_sectors; 29 + unsigned int max_hw_sectors; 30 30 int page_shift; 31 31 32 32 id = kzalloc(sizeof(*id), GFP_KERNEL); ··· 198 198 bio = &req->p.inline_bio; 199 199 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); 200 200 } else { 201 - bio = bio_alloc(GFP_KERNEL, min(req->sg_cnt, BIO_MAX_PAGES)); 201 + bio = bio_alloc(GFP_KERNEL, bio_max_segs(req->sg_cnt)); 202 202 bio->bi_end_io = bio_put; 203 203 } 204 204 bio->bi_opf = req_op(rq);
+3 -6
drivers/target/target_core_iblock.c
··· 315 315 * Only allocate as many vector entries as the bio code allows us to, 316 316 * we'll loop later on until we have handled the whole request. 317 317 */ 318 - if (sg_num > BIO_MAX_PAGES) 319 - sg_num = BIO_MAX_PAGES; 320 - 321 - bio = bio_alloc_bioset(GFP_NOIO, sg_num, &ib_dev->ibd_bio_set); 318 + bio = bio_alloc_bioset(GFP_NOIO, bio_max_segs(sg_num), 319 + &ib_dev->ibd_bio_set); 322 320 if (!bio) { 323 321 pr_err("Unable to allocate memory for bio\n"); 324 322 return NULL; ··· 636 638 return -ENODEV; 637 639 } 638 640 639 - bip = bio_integrity_alloc(bio, GFP_NOIO, 640 - min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES)); 641 + bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents)); 641 642 if (IS_ERR(bip)) { 642 643 pr_err("Unable to allocate bio_integrity_payload\n"); 643 644 return PTR_ERR(bip);
+1 -1
drivers/target/target_core_pscsi.c
··· 881 881 882 882 if (!bio) { 883 883 new_bio: 884 - nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); 884 + nr_vecs = bio_max_segs(nr_pages); 885 885 nr_pages -= nr_vecs; 886 886 /* 887 887 * Calls bio_kmalloc() and sets bio->bi_end_io()
+5 -5
fs/block_dev.c
··· 221 221 222 222 static ssize_t 223 223 __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, 224 - int nr_pages) 224 + unsigned int nr_pages) 225 225 { 226 226 struct file *file = iocb->ki_filp; 227 227 struct block_device *bdev = I_BDEV(bdev_file_inode(file)); ··· 355 355 } 356 356 } 357 357 358 - static ssize_t 359 - __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) 358 + static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, 359 + unsigned int nr_pages) 360 360 { 361 361 struct file *file = iocb->ki_filp; 362 362 struct inode *inode = bdev_file_inode(file); ··· 486 486 static ssize_t 487 487 blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 488 488 { 489 - int nr_pages; 489 + unsigned int nr_pages; 490 490 491 491 if (!iov_iter_count(iter)) 492 492 return 0; ··· 495 495 if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_PAGES) 496 496 return __blkdev_direct_IO_simple(iocb, iter, nr_pages); 497 497 498 - return __blkdev_direct_IO(iocb, iter, min(nr_pages, BIO_MAX_PAGES)); 498 + return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages)); 499 499 } 500 500 501 501 static __init int blkdev_init(void)
+1 -1
fs/direct-io.c
··· 695 695 if (ret) 696 696 goto out; 697 697 sector = start_sector << (sdio->blkbits - 9); 698 - nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES); 698 + nr_pages = bio_max_segs(sdio->pages_in_io); 699 699 BUG_ON(nr_pages <= 0); 700 700 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); 701 701 sdio->boundary = 0;
+1 -3
fs/erofs/data.c
··· 215 215 /* max # of continuous pages */ 216 216 if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE)) 217 217 nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE); 218 - if (nblocks > BIO_MAX_PAGES) 219 - nblocks = BIO_MAX_PAGES; 220 218 221 - bio = bio_alloc(GFP_NOIO, nblocks); 219 + bio = bio_alloc(GFP_NOIO, bio_max_segs(nblocks)); 222 220 223 221 bio->bi_end_io = erofs_readendio; 224 222 bio_set_dev(bio, sb->s_bdev);
+1 -2
fs/ext4/readpage.c
··· 371 371 * bio_alloc will _always_ be able to allocate a bio if 372 372 * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset(). 373 373 */ 374 - bio = bio_alloc(GFP_KERNEL, 375 - min_t(int, nr_pages, BIO_MAX_PAGES)); 374 + bio = bio_alloc(GFP_KERNEL, bio_max_segs(nr_pages)); 376 375 fscrypt_set_bio_crypt_ctx(bio, inode, next_block, 377 376 GFP_KERNEL); 378 377 ext4_set_bio_post_read_ctx(bio, inode, page->index);
+1 -2
fs/f2fs/data.c
··· 969 969 unsigned int post_read_steps = 0; 970 970 971 971 bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL, 972 - min_t(int, nr_pages, BIO_MAX_PAGES), 973 - &f2fs_bioset); 972 + bio_max_segs(nr_pages), &f2fs_bioset); 974 973 if (!bio) 975 974 return ERR_PTR(-ENOMEM); 976 975
+1 -1
fs/f2fs/node.c
··· 2747 2747 sum_entry = &sum->entries[0]; 2748 2748 2749 2749 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { 2750 - nrpages = min(last_offset - i, BIO_MAX_PAGES); 2750 + nrpages = bio_max_segs(last_offset - i); 2751 2751 2752 2752 /* readahead node pages */ 2753 2753 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
+2 -2
fs/iomap/buffered-io.c
··· 278 278 if (!is_contig || bio_full(ctx->bio, plen)) { 279 279 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); 280 280 gfp_t orig_gfp = gfp; 281 - int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; 281 + unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); 282 282 283 283 if (ctx->bio) 284 284 submit_bio(ctx->bio); 285 285 286 286 if (ctx->rac) /* same as readahead_gfp_mask */ 287 287 gfp |= __GFP_NORETRY | __GFP_NOWARN; 288 - ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs)); 288 + ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs)); 289 289 /* 290 290 * If the bio_alloc fails, try it again for a single page to 291 291 * avoid having to deal with partial page reads. This emulates
+1 -3
fs/mpage.c
··· 304 304 goto out; 305 305 } 306 306 args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), 307 - min_t(int, args->nr_pages, 308 - BIO_MAX_PAGES), 309 - gfp); 307 + bio_max_segs(args->nr_pages), gfp); 310 308 if (args->bio == NULL) 311 309 goto confused; 312 310 }
+3 -3
fs/nfs/blocklayout/blocklayout.c
··· 115 115 return NULL; 116 116 } 117 117 118 - static struct bio * 119 - bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector, 118 + static struct bio *bl_alloc_init_bio(unsigned int npg, 119 + struct block_device *bdev, sector_t disk_sector, 120 120 bio_end_io_t end_io, struct parallel_io *par) 121 121 { 122 122 struct bio *bio; 123 123 124 - npg = min(npg, BIO_MAX_PAGES); 124 + npg = bio_max_segs(npg); 125 125 bio = bio_alloc(GFP_NOIO, npg); 126 126 if (bio) { 127 127 bio->bi_iter.bi_sector = disk_sector;
+1 -1
fs/xfs/xfs_bio_io.c
··· 6 6 7 7 static inline unsigned int bio_max_vecs(unsigned int count) 8 8 { 9 - return min_t(unsigned, howmany(count, PAGE_SIZE), BIO_MAX_PAGES); 9 + return bio_max_segs(howmany(count, PAGE_SIZE)); 10 10 } 11 11 12 12 int
+2 -2
fs/xfs/xfs_buf.c
··· 1480 1480 int op) 1481 1481 { 1482 1482 int page_index; 1483 - int total_nr_pages = bp->b_page_count; 1483 + unsigned int total_nr_pages = bp->b_page_count; 1484 1484 int nr_pages; 1485 1485 struct bio *bio; 1486 1486 sector_t sector = bp->b_maps[map].bm_bn; ··· 1505 1505 1506 1506 next_chunk: 1507 1507 atomic_inc(&bp->b_io_remaining); 1508 - nr_pages = min(total_nr_pages, BIO_MAX_PAGES); 1508 + nr_pages = bio_max_segs(total_nr_pages); 1509 1509 1510 1510 bio = bio_alloc(GFP_NOIO, nr_pages); 1511 1511 bio_set_dev(bio, bp->b_target->bt_bdev);
+6 -1
include/linux/bio.h
··· 20 20 #define BIO_BUG_ON 21 21 #endif 22 22 23 - #define BIO_MAX_PAGES 256 23 + #define BIO_MAX_PAGES 256U 24 + 25 + static inline unsigned int bio_max_segs(unsigned int nr_segs) 26 + { 27 + return min(nr_segs, BIO_MAX_PAGES); 28 + } 24 29 25 30 #define bio_prio(bio) (bio)->bi_ioprio 26 31 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)