Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: allow bio_for_each_segment_all() to iterate over multi-page bvec

This patch introduces one extra iterator variable to bio_for_each_segment_all(),
then we can allow bio_for_each_segment_all() to iterate over multi-page bvec.

Given it is just one mechannical & simple change on all bio_for_each_segment_all()
users, this patch does tree-wide change in one single patch, so that we can
avoid to use a temporary helper for this conversion.

Reviewed-by: Omar Sandoval <osandov@fb.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Ming Lei and committed by
Jens Axboe
6dc4f100 2e1f4f4d

+127 -46
+18 -9
block/bio.c
··· 1072 1072 { 1073 1073 int i; 1074 1074 struct bio_vec *bvec; 1075 + struct bvec_iter_all iter_all; 1075 1076 1076 - bio_for_each_segment_all(bvec, bio, i) { 1077 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 1077 1078 ssize_t ret; 1078 1079 1079 1080 ret = copy_page_from_iter(bvec->bv_page, ··· 1104 1103 { 1105 1104 int i; 1106 1105 struct bio_vec *bvec; 1106 + struct bvec_iter_all iter_all; 1107 1107 1108 - bio_for_each_segment_all(bvec, bio, i) { 1108 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 1109 1109 ssize_t ret; 1110 1110 1111 1111 ret = copy_page_to_iter(bvec->bv_page, ··· 1128 1126 { 1129 1127 struct bio_vec *bvec; 1130 1128 int i; 1129 + struct bvec_iter_all iter_all; 1131 1130 1132 - bio_for_each_segment_all(bvec, bio, i) 1131 + bio_for_each_segment_all(bvec, bio, i, iter_all) 1133 1132 __free_page(bvec->bv_page); 1134 1133 } 1135 1134 EXPORT_SYMBOL(bio_free_pages); ··· 1298 1295 struct bio *bio; 1299 1296 int ret; 1300 1297 struct bio_vec *bvec; 1298 + struct bvec_iter_all iter_all; 1301 1299 1302 1300 if (!iov_iter_count(iter)) 1303 1301 return ERR_PTR(-EINVAL); ··· 1372 1368 return bio; 1373 1369 1374 1370 out_unmap: 1375 - bio_for_each_segment_all(bvec, bio, j) { 1371 + bio_for_each_segment_all(bvec, bio, j, iter_all) { 1376 1372 put_page(bvec->bv_page); 1377 1373 } 1378 1374 bio_put(bio); ··· 1383 1379 { 1384 1380 struct bio_vec *bvec; 1385 1381 int i; 1382 + struct bvec_iter_all iter_all; 1386 1383 1387 1384 /* 1388 1385 * make sure we dirty pages we wrote to 1389 1386 */ 1390 - bio_for_each_segment_all(bvec, bio, i) { 1387 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 1391 1388 if (bio_data_dir(bio) == READ) 1392 1389 set_page_dirty_lock(bvec->bv_page); 1393 1390 ··· 1480 1475 char *p = bio->bi_private; 1481 1476 struct bio_vec *bvec; 1482 1477 int i; 1478 + struct bvec_iter_all iter_all; 1483 1479 1484 - bio_for_each_segment_all(bvec, bio, i) { 1480 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 1485 1481 memcpy(p, page_address(bvec->bv_page), bvec->bv_len); 1486 1482 p += bvec->bv_len; 1487 1483 } ··· 1591 1585 { 1592 1586 struct bio_vec *bvec; 1593 1587 int i; 1588 + struct bvec_iter_all iter_all; 1594 1589 1595 - bio_for_each_segment_all(bvec, bio, i) { 1590 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 1596 1591 if (!PageCompound(bvec->bv_page)) 1597 1592 set_page_dirty_lock(bvec->bv_page); 1598 1593 } ··· 1603 1596 { 1604 1597 struct bio_vec *bvec; 1605 1598 int i; 1599 + struct bvec_iter_all iter_all; 1606 1600 1607 - bio_for_each_segment_all(bvec, bio, i) 1601 + bio_for_each_segment_all(bvec, bio, i, iter_all) 1608 1602 put_page(bvec->bv_page); 1609 1603 } 1610 1604 ··· 1652 1644 struct bio_vec *bvec; 1653 1645 unsigned long flags; 1654 1646 int i; 1647 + struct bvec_iter_all iter_all; 1655 1648 1656 - bio_for_each_segment_all(bvec, bio, i) { 1649 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 1657 1650 if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page)) 1658 1651 goto defer; 1659 1652 }
+4 -2
block/bounce.c
··· 165 165 struct bio_vec *bvec, orig_vec; 166 166 int i; 167 167 struct bvec_iter orig_iter = bio_orig->bi_iter; 168 + struct bvec_iter_all iter_all; 168 169 169 170 /* 170 171 * free up bounce indirect pages used 171 172 */ 172 - bio_for_each_segment_all(bvec, bio, i) { 173 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 173 174 orig_vec = bio_iter_iovec(bio_orig, orig_iter); 174 175 if (bvec->bv_page != orig_vec.bv_page) { 175 176 dec_zone_page_state(bvec->bv_page, NR_BOUNCE); ··· 295 294 bool bounce = false; 296 295 int sectors = 0; 297 296 bool passthrough = bio_is_passthrough(*bio_orig); 297 + struct bvec_iter_all iter_all; 298 298 299 299 bio_for_each_segment(from, *bio_orig, iter) { 300 300 if (i++ < BIO_MAX_PAGES) ··· 315 313 bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL : 316 314 &bounce_bio_set); 317 315 318 - bio_for_each_segment_all(to, bio, i) { 316 + bio_for_each_segment_all(to, bio, i, iter_all) { 319 317 struct page *page = to->bv_page; 320 318 321 319 if (page_to_pfn(page) <= q->limits.bounce_pfn)
+2 -1
drivers/md/bcache/btree.c
··· 432 432 int j; 433 433 struct bio_vec *bv; 434 434 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 435 + struct bvec_iter_all iter_all; 435 436 436 - bio_for_each_segment_all(bv, b->bio, j) 437 + bio_for_each_segment_all(bv, b->bio, j, iter_all) 437 438 memcpy(page_address(bv->bv_page), 438 439 base + j * PAGE_SIZE, PAGE_SIZE); 439 440
+2 -1
drivers/md/dm-crypt.c
··· 1447 1447 { 1448 1448 unsigned int i; 1449 1449 struct bio_vec *bv; 1450 + struct bvec_iter_all iter_all; 1450 1451 1451 - bio_for_each_segment_all(bv, clone, i) { 1452 + bio_for_each_segment_all(bv, clone, i, iter_all) { 1452 1453 BUG_ON(!bv->bv_page); 1453 1454 mempool_free(bv->bv_page, &cc->page_pool); 1454 1455 }
+2 -1
drivers/md/raid1.c
··· 2112 2112 struct page **spages = get_resync_pages(sbio)->pages; 2113 2113 struct bio_vec *bi; 2114 2114 int page_len[RESYNC_PAGES] = { 0 }; 2115 + struct bvec_iter_all iter_all; 2115 2116 2116 2117 if (sbio->bi_end_io != end_sync_read) 2117 2118 continue; 2118 2119 /* Now we can 'fixup' the error value */ 2119 2120 sbio->bi_status = 0; 2120 2121 2121 - bio_for_each_segment_all(bi, sbio, j) 2122 + bio_for_each_segment_all(bi, sbio, j, iter_all) 2122 2123 page_len[j] = bi->bv_len; 2123 2124 2124 2125 if (!status) {
+2 -1
drivers/staging/erofs/data.c
··· 20 20 int i; 21 21 struct bio_vec *bvec; 22 22 const blk_status_t err = bio->bi_status; 23 + struct bvec_iter_all iter_all; 23 24 24 - bio_for_each_segment_all(bvec, bio, i) { 25 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 25 26 struct page *page = bvec->bv_page; 26 27 27 28 /* page is already locked */
+2 -1
drivers/staging/erofs/unzip_vle.c
··· 830 830 #ifdef EROFS_FS_HAS_MANAGED_CACHE 831 831 struct address_space *mc = NULL; 832 832 #endif 833 + struct bvec_iter_all iter_all; 833 834 834 - bio_for_each_segment_all(bvec, bio, i) { 835 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 835 836 struct page *page = bvec->bv_page; 836 837 bool cachemngd = false; 837 838
+4 -2
fs/block_dev.c
··· 211 211 ssize_t ret; 212 212 blk_qc_t qc; 213 213 int i; 214 + struct bvec_iter_all iter_all; 214 215 215 216 if ((pos | iov_iter_alignment(iter)) & 216 217 (bdev_logical_block_size(bdev) - 1)) ··· 261 260 } 262 261 __set_current_state(TASK_RUNNING); 263 262 264 - bio_for_each_segment_all(bvec, &bio, i) { 263 + bio_for_each_segment_all(bvec, &bio, i, iter_all) { 265 264 if (should_dirty && !PageCompound(bvec->bv_page)) 266 265 set_page_dirty_lock(bvec->bv_page); 267 266 put_page(bvec->bv_page); ··· 330 329 } else { 331 330 struct bio_vec *bvec; 332 331 int i; 332 + struct bvec_iter_all iter_all; 333 333 334 - bio_for_each_segment_all(bvec, bio, i) 334 + bio_for_each_segment_all(bvec, bio, i, iter_all) 335 335 put_page(bvec->bv_page); 336 336 bio_put(bio); 337 337 }
+2 -1
fs/btrfs/compression.c
··· 162 162 } else { 163 163 int i; 164 164 struct bio_vec *bvec; 165 + struct bvec_iter_all iter_all; 165 166 166 167 /* 167 168 * we have verified the checksum already, set page 168 169 * checked so the end_io handlers know about it 169 170 */ 170 171 ASSERT(!bio_flagged(bio, BIO_CLONED)); 171 - bio_for_each_segment_all(bvec, cb->orig_bio, i) 172 + bio_for_each_segment_all(bvec, cb->orig_bio, i, iter_all) 172 173 SetPageChecked(bvec->bv_page); 173 174 174 175 bio_endio(cb->orig_bio);
+2 -1
fs/btrfs/disk-io.c
··· 832 832 struct bio_vec *bvec; 833 833 struct btrfs_root *root; 834 834 int i, ret = 0; 835 + struct bvec_iter_all iter_all; 835 836 836 837 ASSERT(!bio_flagged(bio, BIO_CLONED)); 837 - bio_for_each_segment_all(bvec, bio, i) { 838 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 838 839 root = BTRFS_I(bvec->bv_page->mapping->host)->root; 839 840 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); 840 841 if (ret)
+6 -3
fs/btrfs/extent_io.c
··· 2422 2422 u64 start; 2423 2423 u64 end; 2424 2424 int i; 2425 + struct bvec_iter_all iter_all; 2425 2426 2426 2427 ASSERT(!bio_flagged(bio, BIO_CLONED)); 2427 - bio_for_each_segment_all(bvec, bio, i) { 2428 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 2428 2429 struct page *page = bvec->bv_page; 2429 2430 struct inode *inode = page->mapping->host; 2430 2431 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ··· 2494 2493 int mirror; 2495 2494 int ret; 2496 2495 int i; 2496 + struct bvec_iter_all iter_all; 2497 2497 2498 2498 ASSERT(!bio_flagged(bio, BIO_CLONED)); 2499 - bio_for_each_segment_all(bvec, bio, i) { 2499 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 2500 2500 struct page *page = bvec->bv_page; 2501 2501 struct inode *inode = page->mapping->host; 2502 2502 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ··· 3637 3635 struct bio_vec *bvec; 3638 3636 struct extent_buffer *eb; 3639 3637 int i, done; 3638 + struct bvec_iter_all iter_all; 3640 3639 3641 3640 ASSERT(!bio_flagged(bio, BIO_CLONED)); 3642 - bio_for_each_segment_all(bvec, bio, i) { 3641 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 3643 3642 struct page *page = bvec->bv_page; 3644 3643 3645 3644 eb = (struct extent_buffer *)page->private;
+4 -2
fs/btrfs/inode.c
··· 7777 7777 struct bio_vec *bvec; 7778 7778 struct extent_io_tree *io_tree, *failure_tree; 7779 7779 int i; 7780 + struct bvec_iter_all iter_all; 7780 7781 7781 7782 if (bio->bi_status) 7782 7783 goto end; ··· 7789 7788 7790 7789 done->uptodate = 1; 7791 7790 ASSERT(!bio_flagged(bio, BIO_CLONED)); 7792 - bio_for_each_segment_all(bvec, bio, i) 7791 + bio_for_each_segment_all(bvec, bio, i, iter_all) 7793 7792 clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree, 7794 7793 io_tree, done->start, bvec->bv_page, 7795 7794 btrfs_ino(BTRFS_I(inode)), 0); ··· 7868 7867 int uptodate; 7869 7868 int ret; 7870 7869 int i; 7870 + struct bvec_iter_all iter_all; 7871 7871 7872 7872 if (bio->bi_status) 7873 7873 goto end; ··· 7882 7880 failure_tree = &BTRFS_I(inode)->io_failure_tree; 7883 7881 7884 7882 ASSERT(!bio_flagged(bio, BIO_CLONED)); 7885 - bio_for_each_segment_all(bvec, bio, i) { 7883 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 7886 7884 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page, 7887 7885 bvec->bv_offset, done->start, 7888 7886 bvec->bv_len);
+2 -1
fs/btrfs/raid56.c
··· 1443 1443 { 1444 1444 struct bio_vec *bvec; 1445 1445 int i; 1446 + struct bvec_iter_all iter_all; 1446 1447 1447 1448 ASSERT(!bio_flagged(bio, BIO_CLONED)); 1448 1449 1449 - bio_for_each_segment_all(bvec, bio, i) 1450 + bio_for_each_segment_all(bvec, bio, i, iter_all) 1450 1451 SetPageUptodate(bvec->bv_page); 1451 1452 } 1452 1453
+2 -1
fs/crypto/bio.c
··· 30 30 { 31 31 struct bio_vec *bv; 32 32 int i; 33 + struct bvec_iter_all iter_all; 33 34 34 - bio_for_each_segment_all(bv, bio, i) { 35 + bio_for_each_segment_all(bv, bio, i, iter_all) { 35 36 struct page *page = bv->bv_page; 36 37 int ret = fscrypt_decrypt_page(page->mapping->host, page, 37 38 PAGE_SIZE, 0, page->index);
+3 -1
fs/direct-io.c
··· 551 551 if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) { 552 552 bio_check_pages_dirty(bio); /* transfers ownership */ 553 553 } else { 554 - bio_for_each_segment_all(bvec, bio, i) { 554 + struct bvec_iter_all iter_all; 555 + 556 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 555 557 struct page *page = bvec->bv_page; 556 558 557 559 if (dio->op == REQ_OP_READ && !PageCompound(page) &&
+2 -1
fs/exofs/ore.c
··· 420 420 { 421 421 struct bio_vec *bv; 422 422 unsigned i; 423 + struct bvec_iter_all iter_all; 423 424 424 - bio_for_each_segment_all(bv, bio, i) { 425 + bio_for_each_segment_all(bv, bio, i, iter_all) { 425 426 unsigned this_count = bv->bv_len; 426 427 427 428 if (likely(PAGE_SIZE == this_count))
+2 -1
fs/exofs/ore_raid.c
··· 468 468 /* loop on all devices all pages */ 469 469 for (d = 0; d < ios->numdevs; d++) { 470 470 struct bio *bio = ios->per_dev[d].bio; 471 + struct bvec_iter_all iter_all; 471 472 472 473 if (!bio) 473 474 continue; 474 475 475 - bio_for_each_segment_all(bv, bio, i) { 476 + bio_for_each_segment_all(bv, bio, i, iter_all) { 476 477 struct page *page = bv->bv_page; 477 478 478 479 SetPageUptodate(page);
+2 -1
fs/ext4/page-io.c
··· 63 63 { 64 64 int i; 65 65 struct bio_vec *bvec; 66 + struct bvec_iter_all iter_all; 66 67 67 - bio_for_each_segment_all(bvec, bio, i) { 68 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 68 69 struct page *page = bvec->bv_page; 69 70 #ifdef CONFIG_EXT4_FS_ENCRYPTION 70 71 struct page *data_page = NULL;
+2 -1
fs/ext4/readpage.c
··· 72 72 { 73 73 struct bio_vec *bv; 74 74 int i; 75 + struct bvec_iter_all iter_all; 75 76 76 77 if (ext4_bio_encrypted(bio)) { 77 78 if (bio->bi_status) { ··· 82 81 return; 83 82 } 84 83 } 85 - bio_for_each_segment_all(bv, bio, i) { 84 + bio_for_each_segment_all(bv, bio, i, iter_all) { 86 85 struct page *page = bv->bv_page; 87 86 88 87 if (!bio->bi_status) {
+6 -3
fs/f2fs/data.c
··· 87 87 struct page *page; 88 88 struct bio_vec *bv; 89 89 int i; 90 + struct bvec_iter_all iter_all; 90 91 91 - bio_for_each_segment_all(bv, bio, i) { 92 + bio_for_each_segment_all(bv, bio, i, iter_all) { 92 93 page = bv->bv_page; 93 94 94 95 /* PG_error was set if any post_read step failed */ ··· 165 164 struct f2fs_sb_info *sbi = bio->bi_private; 166 165 struct bio_vec *bvec; 167 166 int i; 167 + struct bvec_iter_all iter_all; 168 168 169 169 if (time_to_inject(sbi, FAULT_WRITE_IO)) { 170 170 f2fs_show_injection_info(FAULT_WRITE_IO); 171 171 bio->bi_status = BLK_STS_IOERR; 172 172 } 173 173 174 - bio_for_each_segment_all(bvec, bio, i) { 174 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 175 175 struct page *page = bvec->bv_page; 176 176 enum count_type type = WB_DATA_TYPE(page); 177 177 ··· 349 347 struct bio_vec *bvec; 350 348 struct page *target; 351 349 int i; 350 + struct bvec_iter_all iter_all; 352 351 353 352 if (!io->bio) 354 353 return false; ··· 357 354 if (!inode && !page && !ino) 358 355 return true; 359 356 360 - bio_for_each_segment_all(bvec, io->bio, i) { 357 + bio_for_each_segment_all(bvec, io->bio, i, iter_all) { 361 358 362 359 if (bvec->bv_page->mapping) 363 360 target = bvec->bv_page;
+6 -3
fs/gfs2/lops.c
··· 170 170 * that is pinned in the pagecache. 171 171 */ 172 172 173 - static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec, 173 + static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, 174 + struct bio_vec *bvec, 174 175 blk_status_t error) 175 176 { 176 177 struct buffer_head *bh, *next; ··· 209 208 struct bio_vec *bvec; 210 209 struct page *page; 211 210 int i; 211 + struct bvec_iter_all iter_all; 212 212 213 213 if (bio->bi_status) { 214 214 fs_err(sdp, "Error %d writing to journal, jid=%u\n", ··· 217 215 wake_up(&sdp->sd_logd_waitq); 218 216 } 219 217 220 - bio_for_each_segment_all(bvec, bio, i) { 218 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 221 219 page = bvec->bv_page; 222 220 if (page_has_buffers(page)) 223 221 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status); ··· 390 388 struct page *page; 391 389 struct bio_vec *bvec; 392 390 int i; 391 + struct bvec_iter_all iter_all; 393 392 394 - bio_for_each_segment_all(bvec, bio, i) { 393 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 395 394 page = bvec->bv_page; 396 395 if (bio->bi_status) { 397 396 int err = blk_status_to_errno(bio->bi_status);
+2 -1
fs/gfs2/meta_io.c
··· 190 190 { 191 191 struct bio_vec *bvec; 192 192 int i; 193 + struct bvec_iter_all iter_all; 193 194 194 - bio_for_each_segment_all(bvec, bio, i) { 195 + bio_for_each_segment_all(bvec, bio, i, iter_all) { 195 196 struct page *page = bvec->bv_page; 196 197 struct buffer_head *bh = page_buffers(page); 197 198 unsigned int len = bvec->bv_len;
+4 -2
fs/iomap.c
··· 267 267 int error = blk_status_to_errno(bio->bi_status); 268 268 struct bio_vec *bvec; 269 269 int i; 270 + struct bvec_iter_all iter_all; 270 271 271 - bio_for_each_segment_all(bvec, bio, i) 272 + bio_for_each_segment_all(bvec, bio, i, iter_all) 272 273 iomap_read_page_end_io(bvec, error); 273 274 bio_put(bio); 274 275 } ··· 1560 1559 } else { 1561 1560 struct bio_vec *bvec; 1562 1561 int i; 1562 + struct bvec_iter_all iter_all; 1563 1563 1564 - bio_for_each_segment_all(bvec, bio, i) 1564 + bio_for_each_segment_all(bvec, bio, i, iter_all) 1565 1565 put_page(bvec->bv_page); 1566 1566 bio_put(bio); 1567 1567 }
+2 -1
fs/mpage.c
··· 48 48 { 49 49 struct bio_vec *bv; 50 50 int i; 51 + struct bvec_iter_all iter_all; 51 52 52 - bio_for_each_segment_all(bv, bio, i) { 53 + bio_for_each_segment_all(bv, bio, i, iter_all) { 53 54 struct page *page = bv->bv_page; 54 55 page_endio(page, bio_op(bio), 55 56 blk_status_to_errno(bio->bi_status));
+3 -2
fs/xfs/xfs_aops.c
··· 62 62 static void 63 63 xfs_finish_page_writeback( 64 64 struct inode *inode, 65 - struct bio_vec *bvec, 65 + struct bio_vec *bvec, 66 66 int error) 67 67 { 68 68 struct iomap_page *iop = to_iomap_page(bvec->bv_page); ··· 98 98 for (bio = &ioend->io_inline_bio; bio; bio = next) { 99 99 struct bio_vec *bvec; 100 100 int i; 101 + struct bvec_iter_all iter_all; 101 102 102 103 /* 103 104 * For the last bio, bi_private points to the ioend, so we ··· 110 109 next = bio->bi_private; 111 110 112 111 /* walk each page on bio, ending page IO on them */ 113 - bio_for_each_segment_all(bvec, bio, i) 112 + bio_for_each_segment_all(bvec, bio, i, iter_all) 114 113 xfs_finish_page_writeback(inode, bvec, error); 115 114 bio_put(bio); 116 115 }
+9 -2
include/linux/bio.h
··· 128 128 return bio->bi_vcnt >= bio->bi_max_vecs; 129 129 } 130 130 131 + #define mp_bvec_for_each_segment(bv, bvl, i, iter_all) \ 132 + for (bv = bvec_init_iter_all(&iter_all); \ 133 + (iter_all.done < (bvl)->bv_len) && \ 134 + (mp_bvec_next_segment((bvl), &iter_all), 1); \ 135 + iter_all.done += bv->bv_len, i += 1) 136 + 131 137 /* 132 138 * drivers should _never_ use the all version - the bio may have been split 133 139 * before it got to the driver and the driver won't own all of it 134 140 */ 135 - #define bio_for_each_segment_all(bvl, bio, i) \ 136 - for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++) 141 + #define bio_for_each_segment_all(bvl, bio, i, iter_all) \ 142 + for (i = 0, iter_all.idx = 0; iter_all.idx < (bio)->bi_vcnt; iter_all.idx++) \ 143 + mp_bvec_for_each_segment(bvl, &((bio)->bi_io_vec[iter_all.idx]), i, iter_all) 137 144 138 145 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, 139 146 unsigned bytes)
+30
include/linux/bvec.h
··· 45 45 current bvec */ 46 46 }; 47 47 48 + struct bvec_iter_all { 49 + struct bio_vec bv; 50 + int idx; 51 + unsigned done; 52 + }; 53 + 48 54 /* 49 55 * various member access, note that bio_data should of course not be used 50 56 * on highmem page vectors ··· 135 129 .bi_size = UINT_MAX, \ 136 130 .bi_idx = 0, \ 137 131 .bi_bvec_done = 0, \ 132 + } 133 + 134 + static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all) 135 + { 136 + iter_all->bv.bv_page = NULL; 137 + iter_all->done = 0; 138 + 139 + return &iter_all->bv; 140 + } 141 + 142 + static inline void mp_bvec_next_segment(const struct bio_vec *bvec, 143 + struct bvec_iter_all *iter_all) 144 + { 145 + struct bio_vec *bv = &iter_all->bv; 146 + 147 + if (bv->bv_page) { 148 + bv->bv_page = nth_page(bv->bv_page, 1); 149 + bv->bv_offset = 0; 150 + } else { 151 + bv->bv_page = bvec->bv_page; 152 + bv->bv_offset = bvec->bv_offset; 153 + } 154 + bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset, 155 + bvec->bv_len - iter_all->done); 138 156 } 139 157 140 158 /*