Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: remove the i argument to bio_for_each_segment_all

We only have two callers that need the integer loop iterator, and they
can easily maintain it themselves.

Suggested-by: Matthew Wilcox <willy@infradead.org>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Acked-by: David Sterba <dsterba@suse.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Acked-by: Coly Li <colyli@suse.de>
Reviewed-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
2b070cfe f936b06a

+47 -81
+10 -19
block/bio.c
··· 874 874 { 875 875 struct bvec_iter_all iter_all; 876 876 struct bio_vec *bvec; 877 - int i; 878 877 879 - bio_for_each_segment_all(bvec, bio, i, iter_all) 878 + bio_for_each_segment_all(bvec, bio, iter_all) 880 879 get_page(bvec->bv_page); 881 880 } 882 881 ··· 883 884 { 884 885 struct bvec_iter_all iter_all; 885 886 struct bio_vec *bvec; 886 - int i; 887 887 888 - bio_for_each_segment_all(bvec, bio, i, iter_all) 888 + bio_for_each_segment_all(bvec, bio, iter_all) 889 889 put_page(bvec->bv_page); 890 890 } 891 891 ··· 1164 1166 */ 1165 1167 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) 1166 1168 { 1167 - int i; 1168 1169 struct bio_vec *bvec; 1169 1170 struct bvec_iter_all iter_all; 1170 1171 1171 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 1172 + bio_for_each_segment_all(bvec, bio, iter_all) { 1172 1173 ssize_t ret; 1173 1174 1174 1175 ret = copy_page_from_iter(bvec->bv_page, ··· 1195 1198 */ 1196 1199 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) 1197 1200 { 1198 - int i; 1199 1201 struct bio_vec *bvec; 1200 1202 struct bvec_iter_all iter_all; 1201 1203 1202 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 1204 + bio_for_each_segment_all(bvec, bio, iter_all) { 1203 1205 ssize_t ret; 1204 1206 1205 1207 ret = copy_page_to_iter(bvec->bv_page, ··· 1219 1223 void bio_free_pages(struct bio *bio) 1220 1224 { 1221 1225 struct bio_vec *bvec; 1222 - int i; 1223 1226 struct bvec_iter_all iter_all; 1224 1227 1225 - bio_for_each_segment_all(bvec, bio, i, iter_all) 1228 + bio_for_each_segment_all(bvec, bio, iter_all) 1226 1229 __free_page(bvec->bv_page); 1227 1230 } 1228 1231 EXPORT_SYMBOL(bio_free_pages); ··· 1459 1464 return bio; 1460 1465 1461 1466 out_unmap: 1462 - bio_for_each_segment_all(bvec, bio, j, iter_all) { 1467 + bio_for_each_segment_all(bvec, bio, iter_all) { 1463 1468 put_page(bvec->bv_page); 1464 1469 } 1465 1470 bio_put(bio); ··· 1469 1474 static void __bio_unmap_user(struct bio *bio) 1470 1475 { 1471 1476 struct bio_vec *bvec; 1472 - int i; 1473 1477 struct bvec_iter_all iter_all; 1474 1478 1475 1479 /* 1476 1480 * make sure we dirty pages we wrote to 1477 1481 */ 1478 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 1482 + bio_for_each_segment_all(bvec, bio, iter_all) { 1479 1483 if (bio_data_dir(bio) == READ) 1480 1484 set_page_dirty_lock(bvec->bv_page); 1481 1485 ··· 1565 1571 { 1566 1572 char *p = bio->bi_private; 1567 1573 struct bio_vec *bvec; 1568 - int i; 1569 1574 struct bvec_iter_all iter_all; 1570 1575 1571 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 1576 + bio_for_each_segment_all(bvec, bio, iter_all) { 1572 1577 memcpy(p, page_address(bvec->bv_page), bvec->bv_len); 1573 1578 p += bvec->bv_len; 1574 1579 } ··· 1675 1682 void bio_set_pages_dirty(struct bio *bio) 1676 1683 { 1677 1684 struct bio_vec *bvec; 1678 - int i; 1679 1685 struct bvec_iter_all iter_all; 1680 1686 1681 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 1687 + bio_for_each_segment_all(bvec, bio, iter_all) { 1682 1688 if (!PageCompound(bvec->bv_page)) 1683 1689 set_page_dirty_lock(bvec->bv_page); 1684 1690 } ··· 1726 1734 { 1727 1735 struct bio_vec *bvec; 1728 1736 unsigned long flags; 1729 - int i; 1730 1737 struct bvec_iter_all iter_all; 1731 1738 1732 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 1739 + bio_for_each_segment_all(bvec, bio, iter_all) { 1733 1740 if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page)) 1734 1741 goto defer; 1735 1742 }
+1 -2
block/bounce.c
··· 163 163 { 164 164 struct bio *bio_orig = bio->bi_private; 165 165 struct bio_vec *bvec, orig_vec; 166 - int i; 167 166 struct bvec_iter orig_iter = bio_orig->bi_iter; 168 167 struct bvec_iter_all iter_all; 169 168 170 169 /* 171 170 * free up bounce indirect pages used 172 171 */ 173 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 172 + bio_for_each_segment_all(bvec, bio, iter_all) { 174 173 orig_vec = bio_iter_iovec(bio_orig, orig_iter); 175 174 if (bvec->bv_page != orig_vec.bv_page) { 176 175 dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
+1 -2
drivers/md/bcache/btree.c
··· 429 429 bset_sector_offset(&b->keys, i)); 430 430 431 431 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { 432 - int j; 433 432 struct bio_vec *bv; 434 433 void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 435 434 struct bvec_iter_all iter_all; 436 435 437 - bio_for_each_segment_all(bv, b->bio, j, iter_all) { 436 + bio_for_each_segment_all(bv, b->bio, iter_all) { 438 437 memcpy(page_address(bv->bv_page), addr, PAGE_SIZE); 439 438 addr += PAGE_SIZE; 440 439 }
+1 -2
drivers/md/dm-crypt.c
··· 1445 1445 1446 1446 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) 1447 1447 { 1448 - unsigned int i; 1449 1448 struct bio_vec *bv; 1450 1449 struct bvec_iter_all iter_all; 1451 1450 1452 - bio_for_each_segment_all(bv, clone, i, iter_all) { 1451 + bio_for_each_segment_all(bv, clone, iter_all) { 1453 1452 BUG_ON(!bv->bv_page); 1454 1453 mempool_free(bv->bv_page, &cc->page_pool); 1455 1454 }
+3 -3
drivers/md/raid1.c
··· 2110 2110 } 2111 2111 r1_bio->read_disk = primary; 2112 2112 for (i = 0; i < conf->raid_disks * 2; i++) { 2113 - int j; 2113 + int j = 0; 2114 2114 struct bio *pbio = r1_bio->bios[primary]; 2115 2115 struct bio *sbio = r1_bio->bios[i]; 2116 2116 blk_status_t status = sbio->bi_status; ··· 2125 2125 /* Now we can 'fixup' the error value */ 2126 2126 sbio->bi_status = 0; 2127 2127 2128 - bio_for_each_segment_all(bi, sbio, j, iter_all) 2129 - page_len[j] = bi->bv_len; 2128 + bio_for_each_segment_all(bi, sbio, iter_all) 2129 + page_len[j++] = bi->bv_len; 2130 2130 2131 2131 if (!status) { 2132 2132 for (j = vcnt; j-- ; ) {
+1 -2
drivers/staging/erofs/data.c
··· 17 17 18 18 static inline void read_endio(struct bio *bio) 19 19 { 20 - int i; 21 20 struct bio_vec *bvec; 22 21 const blk_status_t err = bio->bi_status; 23 22 struct bvec_iter_all iter_all; 24 23 25 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 24 + bio_for_each_segment_all(bvec, bio, iter_all) { 26 25 struct page *page = bvec->bv_page; 27 26 28 27 /* page is already locked */
+1 -2
drivers/staging/erofs/unzip_vle.c
··· 844 844 static inline void z_erofs_vle_read_endio(struct bio *bio) 845 845 { 846 846 const blk_status_t err = bio->bi_status; 847 - unsigned int i; 848 847 struct bio_vec *bvec; 849 848 #ifdef EROFS_FS_HAS_MANAGED_CACHE 850 849 struct address_space *mc = NULL; 851 850 #endif 852 851 struct bvec_iter_all iter_all; 853 852 854 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 853 + bio_for_each_segment_all(bvec, bio, iter_all) { 855 854 struct page *page = bvec->bv_page; 856 855 bool cachemngd = false; 857 856
+2 -4
fs/block_dev.c
··· 210 210 struct bio bio; 211 211 ssize_t ret; 212 212 blk_qc_t qc; 213 - int i; 214 213 struct bvec_iter_all iter_all; 215 214 216 215 if ((pos | iov_iter_alignment(iter)) & ··· 260 261 } 261 262 __set_current_state(TASK_RUNNING); 262 263 263 - bio_for_each_segment_all(bvec, &bio, i, iter_all) { 264 + bio_for_each_segment_all(bvec, &bio, iter_all) { 264 265 if (should_dirty && !PageCompound(bvec->bv_page)) 265 266 set_page_dirty_lock(bvec->bv_page); 266 267 put_page(bvec->bv_page); ··· 338 339 if (!bio_flagged(bio, BIO_NO_PAGE_REF)) { 339 340 struct bvec_iter_all iter_all; 340 341 struct bio_vec *bvec; 341 - int i; 342 342 343 - bio_for_each_segment_all(bvec, bio, i, iter_all) 343 + bio_for_each_segment_all(bvec, bio, iter_all) 344 344 put_page(bvec->bv_page); 345 345 } 346 346 bio_put(bio);
+1 -2
fs/btrfs/compression.c
··· 160 160 if (cb->errors) { 161 161 bio_io_error(cb->orig_bio); 162 162 } else { 163 - int i; 164 163 struct bio_vec *bvec; 165 164 struct bvec_iter_all iter_all; 166 165 ··· 168 169 * checked so the end_io handlers know about it 169 170 */ 170 171 ASSERT(!bio_flagged(bio, BIO_CLONED)); 171 - bio_for_each_segment_all(bvec, cb->orig_bio, i, iter_all) 172 + bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) 172 173 SetPageChecked(bvec->bv_page); 173 174 174 175 bio_endio(cb->orig_bio);
+2 -2
fs/btrfs/disk-io.c
··· 832 832 { 833 833 struct bio_vec *bvec; 834 834 struct btrfs_root *root; 835 - int i, ret = 0; 835 + int ret = 0; 836 836 struct bvec_iter_all iter_all; 837 837 838 838 ASSERT(!bio_flagged(bio, BIO_CLONED)); 839 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 839 + bio_for_each_segment_all(bvec, bio, iter_all) { 840 840 root = BTRFS_I(bvec->bv_page->mapping->host)->root; 841 841 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); 842 842 if (ret)
+4 -6
fs/btrfs/extent_io.c
··· 2451 2451 struct bio_vec *bvec; 2452 2452 u64 start; 2453 2453 u64 end; 2454 - int i; 2455 2454 struct bvec_iter_all iter_all; 2456 2455 2457 2456 ASSERT(!bio_flagged(bio, BIO_CLONED)); 2458 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 2457 + bio_for_each_segment_all(bvec, bio, iter_all) { 2459 2458 struct page *page = bvec->bv_page; 2460 2459 struct inode *inode = page->mapping->host; 2461 2460 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ··· 2522 2523 u64 extent_len = 0; 2523 2524 int mirror; 2524 2525 int ret; 2525 - int i; 2526 2526 struct bvec_iter_all iter_all; 2527 2527 2528 2528 ASSERT(!bio_flagged(bio, BIO_CLONED)); 2529 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 2529 + bio_for_each_segment_all(bvec, bio, iter_all) { 2530 2530 struct page *page = bvec->bv_page; 2531 2531 struct inode *inode = page->mapping->host; 2532 2532 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ··· 3641 3643 { 3642 3644 struct bio_vec *bvec; 3643 3645 struct extent_buffer *eb; 3644 - int i, done; 3646 + int done; 3645 3647 struct bvec_iter_all iter_all; 3646 3648 3647 3649 ASSERT(!bio_flagged(bio, BIO_CLONED)); 3648 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 3650 + bio_for_each_segment_all(bvec, bio, iter_all) { 3649 3651 struct page *page = bvec->bv_page; 3650 3652 3651 3653 eb = (struct extent_buffer *)page->private;
+4 -4
fs/btrfs/inode.c
··· 7828 7828 struct inode *inode = done->inode; 7829 7829 struct bio_vec *bvec; 7830 7830 struct extent_io_tree *io_tree, *failure_tree; 7831 - int i; 7832 7831 struct bvec_iter_all iter_all; 7833 7832 7834 7833 if (bio->bi_status) ··· 7840 7841 7841 7842 done->uptodate = 1; 7842 7843 ASSERT(!bio_flagged(bio, BIO_CLONED)); 7843 - bio_for_each_segment_all(bvec, bio, i, iter_all) 7844 + bio_for_each_segment_all(bvec, bio, iter_all) 7844 7845 clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree, 7845 7846 io_tree, done->start, bvec->bv_page, 7846 7847 btrfs_ino(BTRFS_I(inode)), 0); ··· 7918 7919 struct bio_vec *bvec; 7919 7920 int uptodate; 7920 7921 int ret; 7921 - int i; 7922 + int i = 0; 7922 7923 struct bvec_iter_all iter_all; 7923 7924 7924 7925 if (bio->bi_status) ··· 7933 7934 failure_tree = &BTRFS_I(inode)->io_failure_tree; 7934 7935 7935 7936 ASSERT(!bio_flagged(bio, BIO_CLONED)); 7936 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 7937 + bio_for_each_segment_all(bvec, bio, iter_all) { 7937 7938 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page, 7938 7939 bvec->bv_offset, done->start, 7939 7940 bvec->bv_len); ··· 7945 7946 bvec->bv_offset); 7946 7947 else 7947 7948 uptodate = 0; 7949 + i++; 7948 7950 } 7949 7951 7950 7952 done->uptodate = uptodate;
+1 -2
fs/btrfs/raid56.c
··· 1442 1442 static void set_bio_pages_uptodate(struct bio *bio) 1443 1443 { 1444 1444 struct bio_vec *bvec; 1445 - int i; 1446 1445 struct bvec_iter_all iter_all; 1447 1446 1448 1447 ASSERT(!bio_flagged(bio, BIO_CLONED)); 1449 1448 1450 - bio_for_each_segment_all(bvec, bio, i, iter_all) 1449 + bio_for_each_segment_all(bvec, bio, iter_all) 1451 1450 SetPageUptodate(bvec->bv_page); 1452 1451 } 1453 1452
+1 -2
fs/crypto/bio.c
··· 29 29 static void __fscrypt_decrypt_bio(struct bio *bio, bool done) 30 30 { 31 31 struct bio_vec *bv; 32 - int i; 33 32 struct bvec_iter_all iter_all; 34 33 35 - bio_for_each_segment_all(bv, bio, i, iter_all) { 34 + bio_for_each_segment_all(bv, bio, iter_all) { 36 35 struct page *page = bv->bv_page; 37 36 int ret = fscrypt_decrypt_page(page->mapping->host, page, 38 37 PAGE_SIZE, 0, page->index);
+1 -2
fs/direct-io.c
··· 538 538 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio) 539 539 { 540 540 struct bio_vec *bvec; 541 - unsigned i; 542 541 blk_status_t err = bio->bi_status; 543 542 544 543 if (err) { ··· 552 553 } else { 553 554 struct bvec_iter_all iter_all; 554 555 555 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 556 + bio_for_each_segment_all(bvec, bio, iter_all) { 556 557 struct page *page = bvec->bv_page; 557 558 558 559 if (dio->op == REQ_OP_READ && !PageCompound(page) &&
+1 -2
fs/ext4/page-io.c
··· 61 61 62 62 static void ext4_finish_bio(struct bio *bio) 63 63 { 64 - int i; 65 64 struct bio_vec *bvec; 66 65 struct bvec_iter_all iter_all; 67 66 68 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 67 + bio_for_each_segment_all(bvec, bio, iter_all) { 69 68 struct page *page = bvec->bv_page; 70 69 #ifdef CONFIG_FS_ENCRYPTION 71 70 struct page *data_page = NULL;
+1 -2
fs/ext4/readpage.c
··· 71 71 static void mpage_end_io(struct bio *bio) 72 72 { 73 73 struct bio_vec *bv; 74 - int i; 75 74 struct bvec_iter_all iter_all; 76 75 77 76 if (ext4_bio_encrypted(bio)) { ··· 81 82 return; 82 83 } 83 84 } 84 - bio_for_each_segment_all(bv, bio, i, iter_all) { 85 + bio_for_each_segment_all(bv, bio, iter_all) { 85 86 struct page *page = bv->bv_page; 86 87 87 88 if (!bio->bi_status) {
+3 -6
fs/f2fs/data.c
··· 86 86 { 87 87 struct page *page; 88 88 struct bio_vec *bv; 89 - int i; 90 89 struct bvec_iter_all iter_all; 91 90 92 - bio_for_each_segment_all(bv, bio, i, iter_all) { 91 + bio_for_each_segment_all(bv, bio, iter_all) { 93 92 page = bv->bv_page; 94 93 95 94 /* PG_error was set if any post_read step failed */ ··· 163 164 { 164 165 struct f2fs_sb_info *sbi = bio->bi_private; 165 166 struct bio_vec *bvec; 166 - int i; 167 167 struct bvec_iter_all iter_all; 168 168 169 169 if (time_to_inject(sbi, FAULT_WRITE_IO)) { ··· 170 172 bio->bi_status = BLK_STS_IOERR; 171 173 } 172 174 173 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 175 + bio_for_each_segment_all(bvec, bio, iter_all) { 174 176 struct page *page = bvec->bv_page; 175 177 enum count_type type = WB_DATA_TYPE(page); 176 178 ··· 347 349 { 348 350 struct bio_vec *bvec; 349 351 struct page *target; 350 - int i; 351 352 struct bvec_iter_all iter_all; 352 353 353 354 if (!io->bio) ··· 355 358 if (!inode && !page && !ino) 356 359 return true; 357 360 358 - bio_for_each_segment_all(bvec, io->bio, i, iter_all) { 361 + bio_for_each_segment_all(bvec, io->bio, iter_all) { 359 362 360 363 if (bvec->bv_page->mapping) 361 364 target = bvec->bv_page;
+1 -2
fs/gfs2/lops.c
··· 207 207 struct gfs2_sbd *sdp = bio->bi_private; 208 208 struct bio_vec *bvec; 209 209 struct page *page; 210 - int i; 211 210 struct bvec_iter_all iter_all; 212 211 213 212 if (bio->bi_status) { ··· 215 216 wake_up(&sdp->sd_logd_waitq); 216 217 } 217 218 218 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 219 + bio_for_each_segment_all(bvec, bio, iter_all) { 219 220 page = bvec->bv_page; 220 221 if (page_has_buffers(page)) 221 222 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
+1 -2
fs/gfs2/meta_io.c
··· 189 189 static void gfs2_meta_read_endio(struct bio *bio) 190 190 { 191 191 struct bio_vec *bvec; 192 - int i; 193 192 struct bvec_iter_all iter_all; 194 193 195 - bio_for_each_segment_all(bvec, bio, i, iter_all) { 194 + bio_for_each_segment_all(bvec, bio, iter_all) { 196 195 struct page *page = bvec->bv_page; 197 196 struct buffer_head *bh = page_buffers(page); 198 197 unsigned int len = bvec->bv_len;
+2 -4
fs/iomap.c
··· 273 273 { 274 274 int error = blk_status_to_errno(bio->bi_status); 275 275 struct bio_vec *bvec; 276 - int i; 277 276 struct bvec_iter_all iter_all; 278 277 279 - bio_for_each_segment_all(bvec, bio, i, iter_all) 278 + bio_for_each_segment_all(bvec, bio, iter_all) 280 279 iomap_read_page_end_io(bvec, error); 281 280 bio_put(bio); 282 281 } ··· 1591 1592 if (!bio_flagged(bio, BIO_NO_PAGE_REF)) { 1592 1593 struct bvec_iter_all iter_all; 1593 1594 struct bio_vec *bvec; 1594 - int i; 1595 1595 1596 - bio_for_each_segment_all(bvec, bio, i, iter_all) 1596 + bio_for_each_segment_all(bvec, bio, iter_all) 1597 1597 put_page(bvec->bv_page); 1598 1598 } 1599 1599 bio_put(bio);
+1 -2
fs/mpage.c
··· 47 47 static void mpage_end_io(struct bio *bio) 48 48 { 49 49 struct bio_vec *bv; 50 - int i; 51 50 struct bvec_iter_all iter_all; 52 51 53 - bio_for_each_segment_all(bv, bio, i, iter_all) { 52 + bio_for_each_segment_all(bv, bio, iter_all) { 54 53 struct page *page = bv->bv_page; 55 54 page_endio(page, bio_op(bio), 56 55 blk_status_to_errno(bio->bi_status));
+1 -2
fs/xfs/xfs_aops.c
··· 98 98 99 99 for (bio = &ioend->io_inline_bio; bio; bio = next) { 100 100 struct bio_vec *bvec; 101 - int i; 102 101 struct bvec_iter_all iter_all; 103 102 104 103 /* ··· 110 111 next = bio->bi_private; 111 112 112 113 /* walk each page on bio, ending page IO on them */ 113 - bio_for_each_segment_all(bvec, bio, i, iter_all) 114 + bio_for_each_segment_all(bvec, bio, iter_all) 114 115 xfs_finish_page_writeback(inode, bvec, error); 115 116 bio_put(bio); 116 117 }
+2 -3
include/linux/bio.h
··· 134 134 * drivers should _never_ use the all version - the bio may have been split 135 135 * before it got to the driver and the driver won't own all of it 136 136 */ 137 - #define bio_for_each_segment_all(bvl, bio, i, iter) \ 138 - for (i = 0, bvl = bvec_init_iter_all(&iter); \ 139 - bio_next_segment((bio), &iter); i++) 137 + #define bio_for_each_segment_all(bvl, bio, iter) \ 138 + for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) 140 139 141 140 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, 142 141 unsigned bytes)