Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: Convert bio_for_each_segment() to bvec_iter

More prep work for immutable biovecs - with immutable bvecs drivers
won't be able to use the biovec directly, they'll need to use helpers
that take into account bio->bi_iter.bi_bvec_done.

This updates callers for the new usage without changing the
implementation yet.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: "Ed L. Cashin" <ecashin@coraid.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Lars Ellenberg <drbd-dev@lists.linbit.com>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Paul Clements <Paul.Clements@steeleye.com>
Cc: Jim Paris <jim@jtan.com>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Yehuda Sadeh <yehuda@inktank.com>
Cc: Sage Weil <sage@inktank.com>
Cc: Alex Elder <elder@inktank.com>
Cc: ceph-devel@vger.kernel.org
Cc: Joshua Morris <josh.h.morris@us.ibm.com>
Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Neil Brown <neilb@suse.de>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: linux390@de.ibm.com
Cc: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com>
Cc: Sreekanth Reddy <Sreekanth.Reddy@lsi.com>
Cc: support@lsi.com
Cc: "James E.J. Bottomley" <JBottomley@parallels.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Guo Chao <yan@linux.vnet.ibm.com>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Cc: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Stephen Hemminger <shemminger@vyatta.com>
Cc: Quoc-Son Anh <quoc-sonx.anh@intel.com>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Seth Jennings <sjenning@linux.vnet.ibm.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: "Darrick J. Wong" <darrick.wong@oracle.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Jan Kara <jack@suse.cz>
Cc: linux-m68k@lists.linux-m68k.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: drbd-user@lists.linbit.com
Cc: nbd-general@lists.sourceforge.net
Cc: cbe-oss-dev@lists.ozlabs.org
Cc: xen-devel@lists.xensource.com
Cc: virtualization@lists.linux-foundation.org
Cc: linux-raid@vger.kernel.org
Cc: linux-s390@vger.kernel.org
Cc: DL-MPTFusionLinux@lsi.com
Cc: linux-scsi@vger.kernel.org
Cc: devel@driverdev.osuosl.org
Cc: linux-fsdevel@vger.kernel.org
Cc: cluster-devel@redhat.com
Cc: linux-mm@kvack.org
Acked-by: Geoff Levand <geoff@infradead.org>

+402 -398
+6 -5
arch/m68k/emu/nfblock.c
··· 62 62 static void nfhd_make_request(struct request_queue *queue, struct bio *bio) 63 63 { 64 64 struct nfhd_device *dev = queue->queuedata; 65 - struct bio_vec *bvec; 66 - int i, dir, len, shift; 65 + struct bio_vec bvec; 66 + struct bvec_iter iter; 67 + int dir, len, shift; 67 68 sector_t sec = bio->bi_iter.bi_sector; 68 69 69 70 dir = bio_data_dir(bio); 70 71 shift = dev->bshift; 71 - bio_for_each_segment(bvec, bio, i) { 72 - len = bvec->bv_len; 72 + bio_for_each_segment(bvec, bio, iter) { 73 + len = bvec.bv_len; 73 74 len >>= 9; 74 75 nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift, 75 - bvec_to_phys(bvec)); 76 + bvec_to_phys(&bvec)); 76 77 sec += len; 77 78 } 78 79 bio_endio(bio, 0);
+9 -9
arch/powerpc/sysdev/axonram.c
··· 109 109 struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; 110 110 unsigned long phys_mem, phys_end; 111 111 void *user_mem; 112 - struct bio_vec *vec; 112 + struct bio_vec vec; 113 113 unsigned int transfered; 114 - unsigned short idx; 114 + struct bvec_iter iter; 115 115 116 116 phys_mem = bank->io_addr + (bio->bi_iter.bi_sector << 117 117 AXON_RAM_SECTOR_SHIFT); 118 118 phys_end = bank->io_addr + bank->size; 119 119 transfered = 0; 120 - bio_for_each_segment(vec, bio, idx) { 121 - if (unlikely(phys_mem + vec->bv_len > phys_end)) { 120 + bio_for_each_segment(vec, bio, iter) { 121 + if (unlikely(phys_mem + vec.bv_len > phys_end)) { 122 122 bio_io_error(bio); 123 123 return; 124 124 } 125 125 126 - user_mem = page_address(vec->bv_page) + vec->bv_offset; 126 + user_mem = page_address(vec.bv_page) + vec.bv_offset; 127 127 if (bio_data_dir(bio) == READ) 128 - memcpy(user_mem, (void *) phys_mem, vec->bv_len); 128 + memcpy(user_mem, (void *) phys_mem, vec.bv_len); 129 129 else 130 - memcpy((void *) phys_mem, user_mem, vec->bv_len); 130 + memcpy((void *) phys_mem, user_mem, vec.bv_len); 131 131 132 - phys_mem += vec->bv_len; 133 - transfered += vec->bv_len; 132 + phys_mem += vec.bv_len; 133 + transfered += vec.bv_len; 134 134 } 135 135 bio_endio(bio, 0); 136 136 }
+2 -2
block/blk-core.c
··· 2746 2746 void rq_flush_dcache_pages(struct request *rq) 2747 2747 { 2748 2748 struct req_iterator iter; 2749 - struct bio_vec *bvec; 2749 + struct bio_vec bvec; 2750 2750 2751 2751 rq_for_each_segment(bvec, rq, iter) 2752 - flush_dcache_page(bvec->bv_page); 2752 + flush_dcache_page(bvec.bv_page); 2753 2753 } 2754 2754 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 2755 2755 #endif
+23 -26
block/blk-merge.c
··· 12 12 static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 13 13 struct bio *bio) 14 14 { 15 - struct bio_vec *bv, *bvprv = NULL; 16 - int cluster, i, high, highprv = 1; 15 + struct bio_vec bv, bvprv = { NULL }; 16 + int cluster, high, highprv = 1; 17 17 unsigned int seg_size, nr_phys_segs; 18 18 struct bio *fbio, *bbio; 19 + struct bvec_iter iter; 19 20 20 21 if (!bio) 21 22 return 0; ··· 26 25 seg_size = 0; 27 26 nr_phys_segs = 0; 28 27 for_each_bio(bio) { 29 - bio_for_each_segment(bv, bio, i) { 28 + bio_for_each_segment(bv, bio, iter) { 30 29 /* 31 30 * the trick here is making sure that a high page is 32 31 * never considered part of another segment, since that 33 32 * might change with the bounce page. 34 33 */ 35 - high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); 36 - if (high || highprv) 37 - goto new_segment; 38 - if (cluster) { 39 - if (seg_size + bv->bv_len 34 + high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); 35 + if (!high && !highprv && cluster) { 36 + if (seg_size + bv.bv_len 40 37 > queue_max_segment_size(q)) 41 38 goto new_segment; 42 - if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) 39 + if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) 43 40 goto new_segment; 44 - if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) 41 + if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) 45 42 goto new_segment; 46 43 47 - seg_size += bv->bv_len; 44 + seg_size += bv.bv_len; 48 45 bvprv = bv; 49 46 continue; 50 47 } ··· 53 54 54 55 nr_phys_segs++; 55 56 bvprv = bv; 56 - seg_size = bv->bv_len; 57 + seg_size = bv.bv_len; 57 58 highprv = high; 58 59 } 59 60 bbio = bio; ··· 109 110 return 0; 110 111 } 111 112 112 - static void 113 + static inline void 113 114 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, 114 - struct scatterlist *sglist, struct bio_vec **bvprv, 115 + struct scatterlist *sglist, struct bio_vec *bvprv, 115 116 struct scatterlist **sg, int *nsegs, int *cluster) 116 117 { 117 118 118 119 int nbytes = bvec->bv_len; 119 120 120 - if (*bvprv && *cluster) { 121 + if (*sg && *cluster) { 121 122 if ((*sg)->length + nbytes > queue_max_segment_size(q)) 122 123 goto new_segment; 123 124 124 - if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) 125 + if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) 125 126 goto new_segment; 126 - if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) 127 + if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) 127 128 goto new_segment; 128 129 129 130 (*sg)->length += nbytes; ··· 149 150 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); 150 151 (*nsegs)++; 151 152 } 152 - *bvprv = bvec; 153 + *bvprv = *bvec; 153 154 } 154 155 155 156 /* ··· 159 160 int blk_rq_map_sg(struct request_queue *q, struct request *rq, 160 161 struct scatterlist *sglist) 161 162 { 162 - struct bio_vec *bvec, *bvprv; 163 + struct bio_vec bvec, bvprv; 163 164 struct req_iterator iter; 164 165 struct scatterlist *sg; 165 166 int nsegs, cluster; ··· 170 171 /* 171 172 * for each bio in rq 172 173 */ 173 - bvprv = NULL; 174 174 sg = NULL; 175 175 rq_for_each_segment(bvec, rq, iter) { 176 - __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, 176 + __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, 177 177 &nsegs, &cluster); 178 178 } /* segments in rq */ 179 179 ··· 221 223 int blk_bio_map_sg(struct request_queue *q, struct bio *bio, 222 224 struct scatterlist *sglist) 223 225 { 224 - struct bio_vec *bvec, *bvprv; 226 + struct bio_vec bvec, bvprv; 225 227 struct scatterlist *sg; 226 228 int nsegs, cluster; 227 - unsigned long i; 229 + struct bvec_iter iter; 228 230 229 231 nsegs = 0; 230 232 cluster = blk_queue_cluster(q); 231 233 232 - bvprv = NULL; 233 234 sg = NULL; 234 - bio_for_each_segment(bvec, bio, i) { 235 - __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, 235 + bio_for_each_segment(bvec, bio, iter) { 236 + __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, 236 237 &nsegs, &cluster); 237 238 } /* segments in bio */ 238 239
+8 -8
drivers/block/aoe/aoecmd.c
··· 897 897 static void 898 898 bio_pageinc(struct bio *bio) 899 899 { 900 - struct bio_vec *bv; 900 + struct bio_vec bv; 901 901 struct page *page; 902 - int i; 902 + struct bvec_iter iter; 903 903 904 - bio_for_each_segment(bv, bio, i) { 904 + bio_for_each_segment(bv, bio, iter) { 905 905 /* Non-zero page count for non-head members of 906 906 * compound pages is no longer allowed by the kernel. 907 907 */ 908 - page = compound_trans_head(bv->bv_page); 908 + page = compound_trans_head(bv.bv_page); 909 909 atomic_inc(&page->_count); 910 910 } 911 911 } ··· 913 913 static void 914 914 bio_pagedec(struct bio *bio) 915 915 { 916 - struct bio_vec *bv; 917 916 struct page *page; 918 - int i; 917 + struct bio_vec bv; 918 + struct bvec_iter iter; 919 919 920 - bio_for_each_segment(bv, bio, i) { 921 - page = compound_trans_head(bv->bv_page); 920 + bio_for_each_segment(bv, bio, iter) { 921 + page = compound_trans_head(bv.bv_page); 922 922 atomic_dec(&page->_count); 923 923 } 924 924 }
+6 -6
drivers/block/brd.c
··· 328 328 struct block_device *bdev = bio->bi_bdev; 329 329 struct brd_device *brd = bdev->bd_disk->private_data; 330 330 int rw; 331 - struct bio_vec *bvec; 331 + struct bio_vec bvec; 332 332 sector_t sector; 333 - int i; 333 + struct bvec_iter iter; 334 334 int err = -EIO; 335 335 336 336 sector = bio->bi_iter.bi_sector; ··· 347 347 if (rw == READA) 348 348 rw = READ; 349 349 350 - bio_for_each_segment(bvec, bio, i) { 351 - unsigned int len = bvec->bv_len; 352 - err = brd_do_bvec(brd, bvec->bv_page, len, 353 - bvec->bv_offset, rw, sector); 350 + bio_for_each_segment(bvec, bio, iter) { 351 + unsigned int len = bvec.bv_len; 352 + err = brd_do_bvec(brd, bvec.bv_page, len, 353 + bvec.bv_offset, rw, sector); 354 354 if (err) 355 355 break; 356 356 sector += len >> SECTOR_SHIFT;
+15 -12
drivers/block/drbd/drbd_main.c
··· 1537 1537 1538 1538 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) 1539 1539 { 1540 - struct bio_vec *bvec; 1541 - int i; 1540 + struct bio_vec bvec; 1541 + struct bvec_iter iter; 1542 + 1542 1543 /* hint all but last page with MSG_MORE */ 1543 - bio_for_each_segment(bvec, bio, i) { 1544 + bio_for_each_segment(bvec, bio, iter) { 1544 1545 int err; 1545 1546 1546 - err = _drbd_no_send_page(mdev, bvec->bv_page, 1547 - bvec->bv_offset, bvec->bv_len, 1548 - i == bio->bi_vcnt - 1 ? 0 : MSG_MORE); 1547 + err = _drbd_no_send_page(mdev, bvec.bv_page, 1548 + bvec.bv_offset, bvec.bv_len, 1549 + bio_iter_last(bio, iter) 1550 + ? 0 : MSG_MORE); 1549 1551 if (err) 1550 1552 return err; 1551 1553 } ··· 1556 1554 1557 1555 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) 1558 1556 { 1559 - struct bio_vec *bvec; 1560 - int i; 1557 + struct bio_vec bvec; 1558 + struct bvec_iter iter; 1559 + 1561 1560 /* hint all but last page with MSG_MORE */ 1562 - bio_for_each_segment(bvec, bio, i) { 1561 + bio_for_each_segment(bvec, bio, iter) { 1563 1562 int err; 1564 1563 1565 - err = _drbd_send_page(mdev, bvec->bv_page, 1566 - bvec->bv_offset, bvec->bv_len, 1567 - i == bio->bi_vcnt - 1 ? 0 : MSG_MORE); 1564 + err = _drbd_send_page(mdev, bvec.bv_page, 1565 + bvec.bv_offset, bvec.bv_len, 1566 + bio_iter_last(bio, iter) ? 0 : MSG_MORE); 1568 1567 if (err) 1569 1568 return err; 1570 1569 }
+7 -6
drivers/block/drbd/drbd_receiver.c
··· 1595 1595 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, 1596 1596 sector_t sector, int data_size) 1597 1597 { 1598 - struct bio_vec *bvec; 1598 + struct bio_vec bvec; 1599 + struct bvec_iter iter; 1599 1600 struct bio *bio; 1600 - int dgs, err, i, expect; 1601 + int dgs, err, expect; 1601 1602 void *dig_in = mdev->tconn->int_dig_in; 1602 1603 void *dig_vv = mdev->tconn->int_dig_vv; 1603 1604 ··· 1618 1617 bio = req->master_bio; 1619 1618 D_ASSERT(sector == bio->bi_iter.bi_sector); 1620 1619 1621 - bio_for_each_segment(bvec, bio, i) { 1622 - void *mapped = kmap(bvec->bv_page) + bvec->bv_offset; 1623 - expect = min_t(int, data_size, bvec->bv_len); 1620 + bio_for_each_segment(bvec, bio, iter) { 1621 + void *mapped = kmap(bvec.bv_page) + bvec.bv_offset; 1622 + expect = min_t(int, data_size, bvec.bv_len); 1624 1623 err = drbd_recv_all_warn(mdev->tconn, mapped, expect); 1625 - kunmap(bvec->bv_page); 1624 + kunmap(bvec.bv_page); 1626 1625 if (err) 1627 1626 return err; 1628 1627 data_size -= expect;
+4 -4
drivers/block/drbd/drbd_worker.c
··· 313 313 { 314 314 struct hash_desc desc; 315 315 struct scatterlist sg; 316 - struct bio_vec *bvec; 317 - int i; 316 + struct bio_vec bvec; 317 + struct bvec_iter iter; 318 318 319 319 desc.tfm = tfm; 320 320 desc.flags = 0; ··· 322 322 sg_init_table(&sg, 1); 323 323 crypto_hash_init(&desc); 324 324 325 - bio_for_each_segment(bvec, bio, i) { 326 - sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); 325 + bio_for_each_segment(bvec, bio, iter) { 326 + sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); 327 327 crypto_hash_update(&desc, &sg, sg.length); 328 328 } 329 329 crypto_hash_final(&desc, digest);
+6 -6
drivers/block/floppy.c
··· 2351 2351 /* Compute maximal contiguous buffer size. */ 2352 2352 static int buffer_chain_size(void) 2353 2353 { 2354 - struct bio_vec *bv; 2354 + struct bio_vec bv; 2355 2355 int size; 2356 2356 struct req_iterator iter; 2357 2357 char *base; ··· 2360 2360 size = 0; 2361 2361 2362 2362 rq_for_each_segment(bv, current_req, iter) { 2363 - if (page_address(bv->bv_page) + bv->bv_offset != base + size) 2363 + if (page_address(bv.bv_page) + bv.bv_offset != base + size) 2364 2364 break; 2365 2365 2366 - size += bv->bv_len; 2366 + size += bv.bv_len; 2367 2367 } 2368 2368 2369 2369 return size >> 9; ··· 2389 2389 static void copy_buffer(int ssize, int max_sector, int max_sector_2) 2390 2390 { 2391 2391 int remaining; /* number of transferred 512-byte sectors */ 2392 - struct bio_vec *bv; 2392 + struct bio_vec bv; 2393 2393 char *buffer; 2394 2394 char *dma_buffer; 2395 2395 int size; ··· 2427 2427 if (!remaining) 2428 2428 break; 2429 2429 2430 - size = bv->bv_len; 2430 + size = bv.bv_len; 2431 2431 SUPBOUND(size, remaining); 2432 2432 2433 - buffer = page_address(bv->bv_page) + bv->bv_offset; 2433 + buffer = page_address(bv.bv_page) + bv.bv_offset; 2434 2434 if (dma_buffer + size > 2435 2435 floppy_track_buffer + (max_buffer_sectors << 10) || 2436 2436 dma_buffer < floppy_track_buffer) {
+12 -11
drivers/block/loop.c
··· 288 288 { 289 289 int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t, 290 290 struct page *page); 291 - struct bio_vec *bvec; 291 + struct bio_vec bvec; 292 + struct bvec_iter iter; 292 293 struct page *page = NULL; 293 - int i, ret = 0; 294 + int ret = 0; 294 295 295 296 if (lo->transfer != transfer_none) { 296 297 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); ··· 303 302 do_lo_send = do_lo_send_direct_write; 304 303 } 305 304 306 - bio_for_each_segment(bvec, bio, i) { 307 - ret = do_lo_send(lo, bvec, pos, page); 305 + bio_for_each_segment(bvec, bio, iter) { 306 + ret = do_lo_send(lo, &bvec, pos, page); 308 307 if (ret < 0) 309 308 break; 310 - pos += bvec->bv_len; 309 + pos += bvec.bv_len; 311 310 } 312 311 if (page) { 313 312 kunmap(page); ··· 393 392 static int 394 393 lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) 395 394 { 396 - struct bio_vec *bvec; 395 + struct bio_vec bvec; 396 + struct bvec_iter iter; 397 397 ssize_t s; 398 - int i; 399 398 400 - bio_for_each_segment(bvec, bio, i) { 401 - s = do_lo_receive(lo, bvec, bsize, pos); 399 + bio_for_each_segment(bvec, bio, iter) { 400 + s = do_lo_receive(lo, &bvec, bsize, pos); 402 401 if (s < 0) 403 402 return s; 404 403 405 - if (s != bvec->bv_len) { 404 + if (s != bvec.bv_len) { 406 405 zero_fill_bio(bio); 407 406 break; 408 407 } 409 - pos += bvec->bv_len; 408 + pos += bvec.bv_len; 410 409 } 411 410 return 0; 412 411 }
+7 -6
drivers/block/mtip32xx/mtip32xx.c
··· 3962 3962 { 3963 3963 struct driver_data *dd = queue->queuedata; 3964 3964 struct scatterlist *sg; 3965 - struct bio_vec *bvec; 3966 - int i, nents = 0; 3965 + struct bio_vec bvec; 3966 + struct bvec_iter iter; 3967 + int nents = 0; 3967 3968 int tag = 0, unaligned = 0; 3968 3969 3969 3970 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { ··· 4027 4026 } 4028 4027 4029 4028 /* Create the scatter list for this bio. */ 4030 - bio_for_each_segment(bvec, bio, i) { 4029 + bio_for_each_segment(bvec, bio, iter) { 4031 4030 sg_set_page(&sg[nents], 4032 - bvec->bv_page, 4033 - bvec->bv_len, 4034 - bvec->bv_offset); 4031 + bvec.bv_page, 4032 + bvec.bv_len, 4033 + bvec.bv_offset); 4035 4034 nents++; 4036 4035 } 4037 4036
+6 -6
drivers/block/nbd.c
··· 271 271 272 272 if (nbd_cmd(req) == NBD_CMD_WRITE) { 273 273 struct req_iterator iter; 274 - struct bio_vec *bvec; 274 + struct bio_vec bvec; 275 275 /* 276 276 * we are really probing at internals to determine 277 277 * whether to set MSG_MORE or not... ··· 281 281 if (!rq_iter_last(req, iter)) 282 282 flags = MSG_MORE; 283 283 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", 284 - nbd->disk->disk_name, req, bvec->bv_len); 285 - result = sock_send_bvec(nbd, bvec, flags); 284 + nbd->disk->disk_name, req, bvec.bv_len); 285 + result = sock_send_bvec(nbd, &bvec, flags); 286 286 if (result <= 0) { 287 287 dev_err(disk_to_dev(nbd->disk), 288 288 "Send data failed (result %d)\n", ··· 378 378 nbd->disk->disk_name, req); 379 379 if (nbd_cmd(req) == NBD_CMD_READ) { 380 380 struct req_iterator iter; 381 - struct bio_vec *bvec; 381 + struct bio_vec bvec; 382 382 383 383 rq_for_each_segment(bvec, req, iter) { 384 - result = sock_recv_bvec(nbd, bvec); 384 + result = sock_recv_bvec(nbd, &bvec); 385 385 if (result <= 0) { 386 386 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 387 387 result); ··· 389 389 return req; 390 390 } 391 391 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", 392 - nbd->disk->disk_name, req, bvec->bv_len); 392 + nbd->disk->disk_name, req, bvec.bv_len); 393 393 } 394 394 } 395 395 return req;
+19 -14
drivers/block/nvme-core.c
··· 550 550 static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, 551 551 struct bio *bio, enum dma_data_direction dma_dir, int psegs) 552 552 { 553 - struct bio_vec *bvec, *bvprv = NULL; 553 + struct bio_vec bvec, bvprv; 554 + struct bvec_iter iter; 554 555 struct scatterlist *sg = NULL; 555 - int i, length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size; 556 + int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size; 557 + int first = 1; 556 558 557 559 if (nvmeq->dev->stripe_size) 558 560 split_len = nvmeq->dev->stripe_size - ··· 562 560 (nvmeq->dev->stripe_size - 1)); 563 561 564 562 sg_init_table(iod->sg, psegs); 565 - bio_for_each_segment(bvec, bio, i) { 566 - if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { 567 - sg->length += bvec->bv_len; 563 + bio_for_each_segment(bvec, bio, iter) { 564 + if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) { 565 + sg->length += bvec.bv_len; 568 566 } else { 569 - if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) 570 - return nvme_split_and_submit(bio, nvmeq, i, 571 - length, 0); 567 + if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec)) 568 + return nvme_split_and_submit(bio, nvmeq, 569 + iter.bi_idx, 570 + length, 0); 572 571 573 572 sg = sg ? sg + 1 : iod->sg; 574 - sg_set_page(sg, bvec->bv_page, bvec->bv_len, 575 - bvec->bv_offset); 573 + sg_set_page(sg, bvec.bv_page, 574 + bvec.bv_len, bvec.bv_offset); 576 575 nsegs++; 577 576 } 578 577 579 - if (split_len - length < bvec->bv_len) 580 - return nvme_split_and_submit(bio, nvmeq, i, split_len, 581 - split_len - length); 582 - length += bvec->bv_len; 578 + if (split_len - length < bvec.bv_len) 579 + return nvme_split_and_submit(bio, nvmeq, iter.bi_idx, 580 + split_len, 581 + split_len - length); 582 + length += bvec.bv_len; 583 583 bvprv = bvec; 584 + first = 0; 584 585 } 585 586 iod->nents = nsegs; 586 587 sg_mark_end(sg);
+5 -5
drivers/block/ps3disk.c
··· 94 94 { 95 95 unsigned int offset = 0; 96 96 struct req_iterator iter; 97 - struct bio_vec *bvec; 97 + struct bio_vec bvec; 98 98 unsigned int i = 0; 99 99 size_t size; 100 100 void *buf; ··· 106 106 __func__, __LINE__, i, bio_segments(iter.bio), 107 107 bio_sectors(iter.bio), iter.bio->bi_iter.bi_sector); 108 108 109 - size = bvec->bv_len; 110 - buf = bvec_kmap_irq(bvec, &flags); 109 + size = bvec.bv_len; 110 + buf = bvec_kmap_irq(&bvec, &flags); 111 111 if (gather) 112 112 memcpy(dev->bounce_buf+offset, buf, size); 113 113 else 114 114 memcpy(buf, dev->bounce_buf+offset, size); 115 115 offset += size; 116 - flush_kernel_dcache_page(bvec->bv_page); 116 + flush_kernel_dcache_page(bvec.bv_page); 117 117 bvec_kunmap_irq(buf, &flags); 118 118 i++; 119 119 } ··· 130 130 131 131 #ifdef DEBUG 132 132 unsigned int n = 0; 133 - struct bio_vec *bv; 133 + struct bio_vec bv; 134 134 struct req_iterator iter; 135 135 136 136 rq_for_each_segment(bv, req, iter)
+5 -5
drivers/block/ps3vram.c
··· 555 555 const char *op = write ? "write" : "read"; 556 556 loff_t offset = bio->bi_iter.bi_sector << 9; 557 557 int error = 0; 558 - struct bio_vec *bvec; 559 - unsigned int i; 558 + struct bio_vec bvec; 559 + struct bvec_iter iter; 560 560 struct bio *next; 561 561 562 - bio_for_each_segment(bvec, bio, i) { 562 + bio_for_each_segment(bvec, bio, iter) { 563 563 /* PS3 is ppc64, so we don't handle highmem */ 564 - char *ptr = page_address(bvec->bv_page) + bvec->bv_offset; 565 - size_t len = bvec->bv_len, retlen; 564 + char *ptr = page_address(bvec.bv_page) + bvec.bv_offset; 565 + size_t len = bvec.bv_len, retlen; 566 566 567 567 dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op, 568 568 len, offset);
+19 -19
drivers/block/rbd.c
··· 1109 1109 */ 1110 1110 static void zero_bio_chain(struct bio *chain, int start_ofs) 1111 1111 { 1112 - struct bio_vec *bv; 1112 + struct bio_vec bv; 1113 + struct bvec_iter iter; 1113 1114 unsigned long flags; 1114 1115 void *buf; 1115 - int i; 1116 1116 int pos = 0; 1117 1117 1118 1118 while (chain) { 1119 - bio_for_each_segment(bv, chain, i) { 1120 - if (pos + bv->bv_len > start_ofs) { 1119 + bio_for_each_segment(bv, chain, iter) { 1120 + if (pos + bv.bv_len > start_ofs) { 1121 1121 int remainder = max(start_ofs - pos, 0); 1122 - buf = bvec_kmap_irq(bv, &flags); 1122 + buf = bvec_kmap_irq(&bv, &flags); 1123 1123 memset(buf + remainder, 0, 1124 - bv->bv_len - remainder); 1125 - flush_dcache_page(bv->bv_page); 1124 + bv.bv_len - remainder); 1125 + flush_dcache_page(bv.bv_page); 1126 1126 bvec_kunmap_irq(buf, &flags); 1127 1127 } 1128 - pos += bv->bv_len; 1128 + pos += bv.bv_len; 1129 1129 } 1130 1130 1131 1131 chain = chain->bi_next; ··· 1173 1173 unsigned int len, 1174 1174 gfp_t gfpmask) 1175 1175 { 1176 - struct bio_vec *bv; 1176 + struct bio_vec bv; 1177 + struct bvec_iter iter; 1178 + struct bvec_iter end_iter; 1177 1179 unsigned int resid; 1178 - unsigned short idx; 1179 1180 unsigned int voff; 1180 - unsigned short end_idx; 1181 1181 unsigned short vcnt; 1182 1182 struct bio *bio; 1183 1183 ··· 1196 1196 /* Find first affected segment... */ 1197 1197 1198 1198 resid = offset; 1199 - bio_for_each_segment(bv, bio_src, idx) { 1200 - if (resid < bv->bv_len) 1199 + bio_for_each_segment(bv, bio_src, iter) { 1200 + if (resid < bv.bv_len) 1201 1201 break; 1202 - resid -= bv->bv_len; 1202 + resid -= bv.bv_len; 1203 1203 } 1204 1204 voff = resid; 1205 1205 1206 1206 /* ...and the last affected segment */ 1207 1207 1208 1208 resid += len; 1209 - __bio_for_each_segment(bv, bio_src, end_idx, idx) { 1210 - if (resid <= bv->bv_len) 1209 + __bio_for_each_segment(bv, bio_src, end_iter, iter) { 1210 + if (resid <= bv.bv_len) 1211 1211 break; 1212 - resid -= bv->bv_len; 1212 + resid -= bv.bv_len; 1213 1213 } 1214 - vcnt = end_idx - idx + 1; 1214 + vcnt = end_iter.bi_idx = iter.bi_idx + 1; 1215 1215 1216 1216 /* Build the clone */ 1217 1217 ··· 1229 1229 * Copy over our part of the bio_vec, then update the first 1230 1230 * and last (or only) entries. 1231 1231 */ 1232 - memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx], 1232 + memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[iter.bi_idx], 1233 1233 vcnt * sizeof (struct bio_vec)); 1234 1234 bio->bi_io_vec[0].bv_offset += voff; 1235 1235 if (vcnt > 1) {
+6 -5
drivers/block/rsxx/dma.c
··· 684 684 void *cb_data) 685 685 { 686 686 struct list_head dma_list[RSXX_MAX_TARGETS]; 687 - struct bio_vec *bvec; 687 + struct bio_vec bvec; 688 + struct bvec_iter iter; 688 689 unsigned long long addr8; 689 690 unsigned int laddr; 690 691 unsigned int bv_len; ··· 723 722 bv_len -= RSXX_HW_BLK_SIZE; 724 723 } 725 724 } else { 726 - bio_for_each_segment(bvec, bio, i) { 727 - bv_len = bvec->bv_len; 728 - bv_off = bvec->bv_offset; 725 + bio_for_each_segment(bvec, bio, iter) { 726 + bv_len = bvec.bv_len; 727 + bv_off = bvec.bv_offset; 729 728 730 729 while (bv_len > 0) { 731 730 tgt = rsxx_get_dma_tgt(card, addr8); ··· 737 736 st = rsxx_queue_dma(card, &dma_list[tgt], 738 737 bio_data_dir(bio), 739 738 dma_off, dma_len, 740 - laddr, bvec->bv_page, 739 + laddr, bvec.bv_page, 741 740 bv_off, cb, cb_data); 742 741 if (st) 743 742 goto bvec_err;
+2 -2
drivers/md/bcache/btree.c
··· 362 362 struct bio_vec *bv; 363 363 int n; 364 364 365 - __bio_for_each_segment(bv, b->bio, n, 0) 365 + bio_for_each_segment_all(bv, b->bio, n) 366 366 __free_page(bv->bv_page); 367 367 368 368 __btree_node_write_done(cl); ··· 421 421 struct bio_vec *bv; 422 422 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 423 423 424 - bio_for_each_segment(bv, b->bio, j) 424 + bio_for_each_segment_all(bv, b->bio, j) 425 425 memcpy(page_address(bv->bv_page), 426 426 base + j * PAGE_SIZE, PAGE_SIZE); 427 427
+10 -9
drivers/md/bcache/debug.c
··· 173 173 { 174 174 char name[BDEVNAME_SIZE]; 175 175 struct bio *check; 176 - struct bio_vec *bv; 176 + struct bio_vec bv, *bv2; 177 + struct bvec_iter iter; 177 178 int i; 178 179 179 180 check = bio_clone(bio, GFP_NOIO); ··· 186 185 187 186 submit_bio_wait(READ_SYNC, check); 188 187 189 - bio_for_each_segment(bv, bio, i) { 190 - void *p1 = kmap_atomic(bv->bv_page); 191 - void *p2 = page_address(check->bi_io_vec[i].bv_page); 188 + bio_for_each_segment(bv, bio, iter) { 189 + void *p1 = kmap_atomic(bv.bv_page); 190 + void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page); 192 191 193 - cache_set_err_on(memcmp(p1 + bv->bv_offset, 194 - p2 + bv->bv_offset, 195 - bv->bv_len), 192 + cache_set_err_on(memcmp(p1 + bv.bv_offset, 193 + p2 + bv.bv_offset, 194 + bv.bv_len), 196 195 dc->disk.c, 197 196 "verify failed at dev %s sector %llu", 198 197 bdevname(dc->bdev, name), ··· 201 200 kunmap_atomic(p1); 202 201 } 203 202 204 - bio_for_each_segment_all(bv, check, i) 205 - __free_page(bv->bv_page); 203 + bio_for_each_segment_all(bv2, check, i) 204 + __free_page(bv2->bv_page); 206 205 out_put: 207 206 bio_put(check); 208 207 }
+32 -43
drivers/md/bcache/io.c
··· 22 22 static void bch_generic_make_request_hack(struct bio *bio) 23 23 { 24 24 if (bio->bi_iter.bi_idx) { 25 - int i; 26 - struct bio_vec *bv; 25 + struct bio_vec bv; 26 + struct bvec_iter iter; 27 27 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); 28 28 29 - bio_for_each_segment(bv, bio, i) 30 - clone->bi_io_vec[clone->bi_vcnt++] = *bv; 29 + bio_for_each_segment(bv, bio, iter) 30 + clone->bi_io_vec[clone->bi_vcnt++] = bv; 31 31 32 32 clone->bi_iter.bi_sector = bio->bi_iter.bi_sector; 33 33 clone->bi_bdev = bio->bi_bdev; ··· 73 73 struct bio *bch_bio_split(struct bio *bio, int sectors, 74 74 gfp_t gfp, struct bio_set *bs) 75 75 { 76 - unsigned idx = bio->bi_iter.bi_idx, vcnt = 0, nbytes = sectors << 9; 77 - struct bio_vec *bv; 76 + unsigned vcnt = 0, nbytes = sectors << 9; 77 + struct bio_vec bv; 78 + struct bvec_iter iter; 78 79 struct bio *ret = NULL; 79 80 80 81 BUG_ON(sectors <= 0); ··· 87 86 ret = bio_alloc_bioset(gfp, 1, bs); 88 87 if (!ret) 89 88 return NULL; 90 - idx = 0; 91 89 goto out; 92 90 } 93 91 94 - bio_for_each_segment(bv, bio, idx) { 95 - vcnt = idx - bio->bi_iter.bi_idx; 92 + bio_for_each_segment(bv, bio, iter) { 93 + vcnt++; 96 94 97 - if (!nbytes) { 98 - ret = bio_alloc_bioset(gfp, vcnt, bs); 99 - if (!ret) 100 - return NULL; 101 - 102 - memcpy(ret->bi_io_vec, __bio_iovec(bio), 103 - sizeof(struct bio_vec) * vcnt); 104 - 95 + if (nbytes <= bv.bv_len) 105 96 break; 106 - } else if (nbytes < bv->bv_len) { 107 - ret = bio_alloc_bioset(gfp, ++vcnt, bs); 108 - if (!ret) 109 - return NULL; 110 97 111 - memcpy(ret->bi_io_vec, __bio_iovec(bio), 112 - sizeof(struct bio_vec) * vcnt); 113 - 114 - ret->bi_io_vec[vcnt - 1].bv_len = nbytes; 115 - bv->bv_offset += nbytes; 116 - bv->bv_len -= nbytes; 117 - break; 118 - } 119 - 120 - nbytes -= bv->bv_len; 98 + nbytes -= bv.bv_len; 121 99 } 100 + 101 + ret = bio_alloc_bioset(gfp, vcnt, bs); 102 + if (!ret) 103 + return NULL; 104 + 105 + bio_for_each_segment(bv, bio, iter) { 106 + ret->bi_io_vec[ret->bi_vcnt++] = bv; 107 + 108 + if (ret->bi_vcnt == vcnt) 109 + break; 110 + } 111 + 112 + ret->bi_io_vec[ret->bi_vcnt - 1].bv_len = nbytes; 122 113 out: 123 114 ret->bi_bdev = bio->bi_bdev; 124 115 ret->bi_iter.bi_sector = bio->bi_iter.bi_sector; 125 116 ret->bi_iter.bi_size = sectors << 9; 126 117 ret->bi_rw = bio->bi_rw; 127 - ret->bi_vcnt = vcnt; 128 - ret->bi_max_vecs = vcnt; 129 - 130 - bio->bi_iter.bi_sector += sectors; 131 - bio->bi_iter.bi_size -= sectors << 9; 132 - bio->bi_iter.bi_idx = idx; 133 118 134 119 if (bio_integrity(bio)) { 135 120 if (bio_integrity_clone(ret, bio, gfp)) { ··· 124 137 } 125 138 126 139 bio_integrity_trim(ret, 0, bio_sectors(ret)); 127 - bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio)); 128 140 } 141 + 142 + bio_advance(bio, ret->bi_iter.bi_size); 129 143 130 144 return ret; 131 145 } ··· 143 155 144 156 if (bio_segments(bio) > max_segments || 145 157 q->merge_bvec_fn) { 146 - struct bio_vec *bv; 147 - int i, seg = 0; 158 + struct bio_vec bv; 159 + struct bvec_iter iter; 160 + unsigned seg = 0; 148 161 149 162 ret = 0; 150 163 151 - bio_for_each_segment(bv, bio, i) { 164 + bio_for_each_segment(bv, bio, iter) { 152 165 struct bvec_merge_data bvm = { 153 166 .bi_bdev = bio->bi_bdev, 154 167 .bi_sector = bio->bi_iter.bi_sector, ··· 161 172 break; 162 173 163 174 if (q->merge_bvec_fn && 164 - q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) 175 + q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) 165 176 break; 166 177 167 178 seg++; 168 - ret += bv->bv_len >> 9; 179 + ret += bv.bv_len >> 9; 169 180 } 170 181 } 171 182
+13 -13
drivers/md/bcache/request.c
··· 198 198 199 199 static void bio_csum(struct bio *bio, struct bkey *k) 200 200 { 201 - struct bio_vec *bv; 201 + struct bio_vec bv; 202 + struct bvec_iter iter; 202 203 uint64_t csum = 0; 203 - int i; 204 204 205 - bio_for_each_segment(bv, bio, i) { 206 - void *d = kmap(bv->bv_page) + bv->bv_offset; 207 - csum = bch_crc64_update(csum, d, bv->bv_len); 208 - kunmap(bv->bv_page); 205 + bio_for_each_segment(bv, bio, iter) { 206 + void *d = kmap(bv.bv_page) + bv.bv_offset; 207 + csum = bch_crc64_update(csum, d, bv.bv_len); 208 + kunmap(bv.bv_page); 209 209 } 210 210 211 211 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); ··· 1182 1182 static int flash_dev_cache_miss(struct btree *b, struct search *s, 1183 1183 struct bio *bio, unsigned sectors) 1184 1184 { 1185 - struct bio_vec *bv; 1186 - int i; 1185 + struct bio_vec bv; 1186 + struct bvec_iter iter; 1187 1187 1188 1188 /* Zero fill bio */ 1189 1189 1190 - bio_for_each_segment(bv, bio, i) { 1191 - unsigned j = min(bv->bv_len >> 9, sectors); 1190 + bio_for_each_segment(bv, bio, iter) { 1191 + unsigned j = min(bv.bv_len >> 9, sectors); 1192 1192 1193 - void *p = kmap(bv->bv_page); 1194 - memset(p + bv->bv_offset, 0, j << 9); 1195 - kunmap(bv->bv_page); 1193 + void *p = kmap(bv.bv_page); 1194 + memset(p + bv.bv_offset, 0, j << 9); 1195 + kunmap(bv.bv_page); 1196 1196 1197 1197 sectors -= j; 1198 1198 }
+6 -6
drivers/md/raid5.c
··· 937 937 async_copy_data(int frombio, struct bio *bio, struct page *page, 938 938 sector_t sector, struct dma_async_tx_descriptor *tx) 939 939 { 940 - struct bio_vec *bvl; 940 + struct bio_vec bvl; 941 + struct bvec_iter iter; 941 942 struct page *bio_page; 942 - int i; 943 943 int page_offset; 944 944 struct async_submit_ctl submit; 945 945 enum async_tx_flags flags = 0; ··· 953 953 flags |= ASYNC_TX_FENCE; 954 954 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 955 955 956 - bio_for_each_segment(bvl, bio, i) { 957 - int len = bvl->bv_len; 956 + bio_for_each_segment(bvl, bio, iter) { 957 + int len = bvl.bv_len; 958 958 int clen; 959 959 int b_offset = 0; 960 960 ··· 970 970 clen = len; 971 971 972 972 if (clen > 0) { 973 - b_offset += bvl->bv_offset; 974 - bio_page = bvl->bv_page; 973 + b_offset += bvl.bv_offset; 974 + bio_page = bvl.bv_page; 975 975 if (frombio) 976 976 tx = async_memcpy(page, bio_page, page_offset, 977 977 b_offset, clen, &submit);
+5 -5
drivers/s390/block/dasd_diag.c
··· 504 504 struct dasd_diag_req *dreq; 505 505 struct dasd_diag_bio *dbio; 506 506 struct req_iterator iter; 507 - struct bio_vec *bv; 507 + struct bio_vec bv; 508 508 char *dst; 509 509 unsigned int count, datasize; 510 510 sector_t recid, first_rec, last_rec; ··· 525 525 /* Check struct bio and count the number of blocks for the request. */ 526 526 count = 0; 527 527 rq_for_each_segment(bv, req, iter) { 528 - if (bv->bv_len & (blksize - 1)) 528 + if (bv.bv_len & (blksize - 1)) 529 529 /* Fba can only do full blocks. */ 530 530 return ERR_PTR(-EINVAL); 531 - count += bv->bv_len >> (block->s2b_shift + 9); 531 + count += bv.bv_len >> (block->s2b_shift + 9); 532 532 } 533 533 /* Paranoia. */ 534 534 if (count != last_rec - first_rec + 1) ··· 545 545 dbio = dreq->bio; 546 546 recid = first_rec; 547 547 rq_for_each_segment(bv, req, iter) { 548 - dst = page_address(bv->bv_page) + bv->bv_offset; 549 - for (off = 0; off < bv->bv_len; off += blksize) { 548 + dst = page_address(bv.bv_page) + bv.bv_offset; 549 + for (off = 0; off < bv.bv_len; off += blksize) { 550 550 memset(dbio, 0, sizeof (struct dasd_diag_bio)); 551 551 dbio->type = rw_cmd; 552 552 dbio->block_number = recid + 1;
+24 -24
drivers/s390/block/dasd_eckd.c
··· 2551 2551 struct dasd_ccw_req *cqr; 2552 2552 struct ccw1 *ccw; 2553 2553 struct req_iterator iter; 2554 - struct bio_vec *bv; 2554 + struct bio_vec bv; 2555 2555 char *dst; 2556 2556 unsigned int off; 2557 2557 int count, cidaw, cplength, datasize; ··· 2573 2573 count = 0; 2574 2574 cidaw = 0; 2575 2575 rq_for_each_segment(bv, req, iter) { 2576 - if (bv->bv_len & (blksize - 1)) 2576 + if (bv.bv_len & (blksize - 1)) 2577 2577 /* Eckd can only do full blocks. */ 2578 2578 return ERR_PTR(-EINVAL); 2579 - count += bv->bv_len >> (block->s2b_shift + 9); 2579 + count += bv.bv_len >> (block->s2b_shift + 9); 2580 2580 #if defined(CONFIG_64BIT) 2581 - if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) 2582 - cidaw += bv->bv_len >> (block->s2b_shift + 9); 2581 + if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) 2582 + cidaw += bv.bv_len >> (block->s2b_shift + 9); 2583 2583 #endif 2584 2584 } 2585 2585 /* Paranoia. */ ··· 2650 2650 last_rec - recid + 1, cmd, basedev, blksize); 2651 2651 } 2652 2652 rq_for_each_segment(bv, req, iter) { 2653 - dst = page_address(bv->bv_page) + bv->bv_offset; 2653 + dst = page_address(bv.bv_page) + bv.bv_offset; 2654 2654 if (dasd_page_cache) { 2655 2655 char *copy = kmem_cache_alloc(dasd_page_cache, 2656 2656 GFP_DMA | __GFP_NOWARN); 2657 2657 if (copy && rq_data_dir(req) == WRITE) 2658 - memcpy(copy + bv->bv_offset, dst, bv->bv_len); 2658 + memcpy(copy + bv.bv_offset, dst, bv.bv_len); 2659 2659 if (copy) 2660 - dst = copy + bv->bv_offset; 2660 + dst = copy + bv.bv_offset; 2661 2661 } 2662 - for (off = 0; off < bv->bv_len; off += blksize) { 2662 + for (off = 0; off < bv.bv_len; off += blksize) { 2663 2663 sector_t trkid = recid; 2664 2664 unsigned int recoffs = sector_div(trkid, blk_per_trk); 2665 2665 rcmd = cmd; ··· 2735 2735 struct dasd_ccw_req *cqr; 2736 2736 struct ccw1 *ccw; 2737 2737 struct req_iterator iter; 2738 - struct bio_vec *bv; 2738 + struct bio_vec bv; 2739 2739 char *dst, *idaw_dst; 2740 2740 unsigned int cidaw, cplength, datasize; 2741 2741 unsigned int tlf; ··· 2813 2813 idaw_dst = NULL; 2814 2814 idaw_len = 0; 2815 2815 rq_for_each_segment(bv, req, iter) { 2816 - dst = page_address(bv->bv_page) + bv->bv_offset; 2817 - seg_len = bv->bv_len; 2816 + dst = page_address(bv.bv_page) + bv.bv_offset; 2817 + seg_len = bv.bv_len; 2818 2818 while (seg_len) { 2819 2819 if (new_track) { 2820 2820 trkid = recid; ··· 3039 3039 { 3040 3040 struct dasd_ccw_req *cqr; 3041 3041 struct req_iterator iter; 3042 - struct bio_vec *bv; 3042 + struct bio_vec bv; 3043 3043 char *dst; 3044 3044 unsigned int trkcount, ctidaw; 3045 3045 unsigned char cmd; ··· 3125 3125 new_track = 1; 3126 3126 recid = first_rec; 3127 3127 rq_for_each_segment(bv, req, iter) { 3128 - dst = page_address(bv->bv_page) + bv->bv_offset; 3129 - seg_len = bv->bv_len; 3128 + dst = page_address(bv.bv_page) + bv.bv_offset; 3129 + seg_len = bv.bv_len; 3130 3130 while (seg_len) { 3131 3131 if (new_track) { 3132 3132 trkid = recid; ··· 3158 3158 } 3159 3159 } else { 3160 3160 rq_for_each_segment(bv, req, iter) { 3161 - dst = page_address(bv->bv_page) + bv->bv_offset; 3161 + dst = page_address(bv.bv_page) + bv.bv_offset; 3162 3162 last_tidaw = itcw_add_tidaw(itcw, 0x00, 3163 - dst, bv->bv_len); 3163 + dst, bv.bv_len); 3164 3164 if (IS_ERR(last_tidaw)) { 3165 3165 ret = -EINVAL; 3166 3166 goto out_error; ··· 3276 3276 struct dasd_ccw_req *cqr; 3277 3277 struct ccw1 *ccw; 3278 3278 struct req_iterator iter; 3279 - struct bio_vec *bv; 3279 + struct bio_vec bv; 3280 3280 char *dst; 3281 3281 unsigned char cmd; 3282 3282 unsigned int trkcount; ··· 3376 3376 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); 3377 3377 } 3378 3378 rq_for_each_segment(bv, req, iter) { 3379 - dst = page_address(bv->bv_page) + bv->bv_offset; 3380 - seg_len = bv->bv_len; 3379 + dst = page_address(bv.bv_page) + bv.bv_offset; 3380 + seg_len = bv.bv_len; 3381 3381 if (cmd == DASD_ECKD_CCW_READ_TRACK) 3382 3382 memset(dst, 0, seg_len); 3383 3383 if (!len_to_track_end) { ··· 3422 3422 struct dasd_eckd_private *private; 3423 3423 struct ccw1 *ccw; 3424 3424 struct req_iterator iter; 3425 - struct bio_vec *bv; 3425 + struct bio_vec bv; 3426 3426 char *dst, *cda; 3427 3427 unsigned int blksize, blk_per_trk, off; 3428 3428 sector_t recid; ··· 3440 3440 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) 3441 3441 ccw++; 3442 3442 rq_for_each_segment(bv, req, iter) { 3443 - dst = page_address(bv->bv_page) + bv->bv_offset; 3444 - for (off = 0; off < bv->bv_len; off += blksize) { 3443 + dst = page_address(bv.bv_page) + bv.bv_offset; 3444 + for (off = 0; off < bv.bv_len; off += blksize) { 3445 3445 /* Skip locate record. */ 3446 3446 if (private->uses_cdl && recid <= 2*blk_per_trk) 3447 3447 ccw++; ··· 3452 3452 cda = (char *)((addr_t) ccw->cda); 3453 3453 if (dst != cda) { 3454 3454 if (rq_data_dir(req) == READ) 3455 - memcpy(dst, cda, bv->bv_len); 3455 + memcpy(dst, cda, bv.bv_len); 3456 3456 kmem_cache_free(dasd_page_cache, 3457 3457 (void *)((addr_t)cda & PAGE_MASK)); 3458 3458 }
+13 -13
drivers/s390/block/dasd_fba.c
··· 260 260 struct dasd_ccw_req *cqr; 261 261 struct ccw1 *ccw; 262 262 struct req_iterator iter; 263 - struct bio_vec *bv; 263 + struct bio_vec bv; 264 264 char *dst; 265 265 int count, cidaw, cplength, datasize; 266 266 sector_t recid, first_rec, last_rec; ··· 283 283 count = 0; 284 284 cidaw = 0; 285 285 rq_for_each_segment(bv, req, iter) { 286 - if (bv->bv_len & (blksize - 1)) 286 + if (bv.bv_len & (blksize - 1)) 287 287 /* Fba can only do full blocks. */ 288 288 return ERR_PTR(-EINVAL); 289 - count += bv->bv_len >> (block->s2b_shift + 9); 289 + count += bv.bv_len >> (block->s2b_shift + 9); 290 290 #if defined(CONFIG_64BIT) 291 - if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) 292 - cidaw += bv->bv_len / blksize; 291 + if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) 292 + cidaw += bv.bv_len / blksize; 293 293 #endif 294 294 } 295 295 /* Paranoia. */ ··· 326 326 } 327 327 recid = first_rec; 328 328 rq_for_each_segment(bv, req, iter) { 329 - dst = page_address(bv->bv_page) + bv->bv_offset; 329 + dst = page_address(bv.bv_page) + bv.bv_offset; 330 330 if (dasd_page_cache) { 331 331 char *copy = kmem_cache_alloc(dasd_page_cache, 332 332 GFP_DMA | __GFP_NOWARN); 333 333 if (copy && rq_data_dir(req) == WRITE) 334 - memcpy(copy + bv->bv_offset, dst, bv->bv_len); 334 + memcpy(copy + bv.bv_offset, dst, bv.bv_len); 335 335 if (copy) 336 - dst = copy + bv->bv_offset; 336 + dst = copy + bv.bv_offset; 337 337 } 338 - for (off = 0; off < bv->bv_len; off += blksize) { 338 + for (off = 0; off < bv.bv_len; off += blksize) { 339 339 /* Locate record for stupid devices. */ 340 340 if (private->rdc_data.mode.bits.data_chain == 0) { 341 341 ccw[-1].flags |= CCW_FLAG_CC; ··· 384 384 struct dasd_fba_private *private; 385 385 struct ccw1 *ccw; 386 386 struct req_iterator iter; 387 - struct bio_vec *bv; 387 + struct bio_vec bv; 388 388 char *dst, *cda; 389 389 unsigned int blksize, off; 390 390 int status; ··· 399 399 if (private->rdc_data.mode.bits.data_chain != 0) 400 400 ccw++; 401 401 rq_for_each_segment(bv, req, iter) { 402 - dst = page_address(bv->bv_page) + bv->bv_offset; 403 - for (off = 0; off < bv->bv_len; off += blksize) { 402 + dst = page_address(bv.bv_page) + bv.bv_offset; 403 + for (off = 0; off < bv.bv_len; off += blksize) { 404 404 /* Skip locate record. */ 405 405 if (private->rdc_data.mode.bits.data_chain == 0) 406 406 ccw++; ··· 411 411 cda = (char *)((addr_t) ccw->cda); 412 412 if (dst != cda) { 413 413 if (rq_data_dir(req) == READ) 414 - memcpy(dst, cda, bv->bv_len); 414 + memcpy(dst, cda, bv.bv_len); 415 415 kmem_cache_free(dasd_page_cache, 416 416 (void *)((addr_t)cda & PAGE_MASK)); 417 417 }
+8 -8
drivers/s390/block/dcssblk.c
··· 808 808 dcssblk_make_request(struct request_queue *q, struct bio *bio) 809 809 { 810 810 struct dcssblk_dev_info *dev_info; 811 - struct bio_vec *bvec; 811 + struct bio_vec bvec; 812 + struct bvec_iter iter; 812 813 unsigned long index; 813 814 unsigned long page_addr; 814 815 unsigned long source_addr; 815 816 unsigned long bytes_done; 816 - int i; 817 817 818 818 bytes_done = 0; 819 819 dev_info = bio->bi_bdev->bd_disk->private_data; ··· 844 844 } 845 845 846 846 index = (bio->bi_iter.bi_sector >> 3); 847 - bio_for_each_segment(bvec, bio, i) { 847 + bio_for_each_segment(bvec, bio, iter) { 848 848 page_addr = (unsigned long) 849 - page_address(bvec->bv_page) + bvec->bv_offset; 849 + page_address(bvec.bv_page) + bvec.bv_offset; 850 850 source_addr = dev_info->start + (index<<12) + bytes_done; 851 - if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0) 851 + if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0) 852 852 // More paranoia. 853 853 goto fail; 854 854 if (bio_data_dir(bio) == READ) { 855 855 memcpy((void*)page_addr, (void*)source_addr, 856 - bvec->bv_len); 856 + bvec.bv_len); 857 857 } else { 858 858 memcpy((void*)source_addr, (void*)page_addr, 859 - bvec->bv_len); 859 + bvec.bv_len); 860 860 } 861 - bytes_done += bvec->bv_len; 861 + bytes_done += bvec.bv_len; 862 862 } 863 863 bio_endio(bio, 0); 864 864 return;
+4 -4
drivers/s390/block/scm_blk.c
··· 130 130 struct aidaw *aidaw = scmrq->aidaw; 131 131 struct msb *msb = &scmrq->aob->msb[0]; 132 132 struct req_iterator iter; 133 - struct bio_vec *bv; 133 + struct bio_vec bv; 134 134 135 135 msb->bs = MSB_BS_4K; 136 136 scmrq->aob->request.msb_count = 1; ··· 142 142 msb->data_addr = (u64) aidaw; 143 143 144 144 rq_for_each_segment(bv, scmrq->request, iter) { 145 - WARN_ON(bv->bv_offset); 146 - msb->blk_count += bv->bv_len >> 12; 147 - aidaw->data_addr = (u64) page_address(bv->bv_page); 145 + WARN_ON(bv.bv_offset); 146 + msb->blk_count += bv.bv_len >> 12; 147 + aidaw->data_addr = (u64) page_address(bv.bv_page); 148 148 aidaw++; 149 149 } 150 150 }
+2 -2
drivers/s390/block/scm_blk_cluster.c
··· 122 122 struct aidaw *aidaw = scmrq->aidaw; 123 123 struct msb *msb = &scmrq->aob->msb[0]; 124 124 struct req_iterator iter; 125 - struct bio_vec *bv; 125 + struct bio_vec bv; 126 126 int i = 0; 127 127 u64 addr; 128 128 ··· 163 163 i++; 164 164 } 165 165 rq_for_each_segment(bv, req, iter) { 166 - aidaw->data_addr = (u64) page_address(bv->bv_page); 166 + aidaw->data_addr = (u64) page_address(bv.bv_page); 167 167 aidaw++; 168 168 i++; 169 169 }
+5 -5
drivers/s390/block/xpram.c
··· 184 184 static void xpram_make_request(struct request_queue *q, struct bio *bio) 185 185 { 186 186 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; 187 - struct bio_vec *bvec; 187 + struct bio_vec bvec; 188 + struct bvec_iter iter; 188 189 unsigned int index; 189 190 unsigned long page_addr; 190 191 unsigned long bytes; 191 - int i; 192 192 193 193 if ((bio->bi_iter.bi_sector & 7) != 0 || 194 194 (bio->bi_iter.bi_size & 4095) != 0) ··· 200 200 if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset) 201 201 goto fail; 202 202 index = (bio->bi_iter.bi_sector >> 3) + xdev->offset; 203 - bio_for_each_segment(bvec, bio, i) { 203 + bio_for_each_segment(bvec, bio, iter) { 204 204 page_addr = (unsigned long) 205 - kmap(bvec->bv_page) + bvec->bv_offset; 206 - bytes = bvec->bv_len; 205 + kmap(bvec.bv_page) + bvec.bv_offset; 206 + bytes = bvec.bv_len; 207 207 if ((page_addr & 4095) != 0 || (bytes & 4095) != 0) 208 208 /* More paranoia. */ 209 209 goto fail;
+16 -15
drivers/scsi/mpt2sas/mpt2sas_transport.c
··· 1901 1901 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1902 1902 Mpi2SmpPassthroughRequest_t *mpi_request; 1903 1903 Mpi2SmpPassthroughReply_t *mpi_reply; 1904 - int rc, i; 1904 + int rc; 1905 1905 u16 smid; 1906 1906 u32 ioc_state; 1907 1907 unsigned long timeleft; ··· 1916 1916 void *pci_addr_out = NULL; 1917 1917 u16 wait_state_count; 1918 1918 struct request *rsp = req->next_rq; 1919 - struct bio_vec *bvec = NULL; 1919 + struct bio_vec bvec; 1920 + struct bvec_iter iter; 1920 1921 1921 1922 if (!rsp) { 1922 1923 printk(MPT2SAS_ERR_FMT "%s: the smp response space is " ··· 1956 1955 goto out; 1957 1956 } 1958 1957 1959 - bio_for_each_segment(bvec, req->bio, i) { 1958 + bio_for_each_segment(bvec, req->bio, iter) { 1960 1959 memcpy(pci_addr_out + offset, 1961 - page_address(bvec->bv_page) + bvec->bv_offset, 1962 - bvec->bv_len); 1963 - offset += bvec->bv_len; 1960 + page_address(bvec.bv_page) + bvec.bv_offset, 1961 + bvec.bv_len); 1962 + offset += bvec.bv_len; 1964 1963 } 1965 1964 } else { 1966 1965 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), ··· 2107 2106 u32 offset = 0; 2108 2107 u32 bytes_to_copy = 2109 2108 le16_to_cpu(mpi_reply->ResponseDataLength); 2110 - bio_for_each_segment(bvec, rsp->bio, i) { 2111 - if (bytes_to_copy <= bvec->bv_len) { 2112 - memcpy(page_address(bvec->bv_page) + 2113 - bvec->bv_offset, pci_addr_in + 2109 + bio_for_each_segment(bvec, rsp->bio, iter) { 2110 + if (bytes_to_copy <= bvec.bv_len) { 2111 + memcpy(page_address(bvec.bv_page) + 2112 + bvec.bv_offset, pci_addr_in + 2114 2113 offset, bytes_to_copy); 2115 2114 break; 2116 2115 } else { 2117 - memcpy(page_address(bvec->bv_page) + 2118 - bvec->bv_offset, pci_addr_in + 2119 - offset, bvec->bv_len); 2120 - bytes_to_copy -= bvec->bv_len; 2116 + memcpy(page_address(bvec.bv_page) + 2117 + bvec.bv_offset, pci_addr_in + 2118 + offset, bvec.bv_len); 2119 + bytes_to_copy -= bvec.bv_len; 2121 2120 } 2122 - offset += bvec->bv_len; 2121 + offset += bvec.bv_len; 2123 2122 } 2124 2123 } 2125 2124 } else {
+16 -15
drivers/scsi/mpt3sas/mpt3sas_transport.c
··· 1884 1884 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1885 1885 Mpi2SmpPassthroughRequest_t *mpi_request; 1886 1886 Mpi2SmpPassthroughReply_t *mpi_reply; 1887 - int rc, i; 1887 + int rc; 1888 1888 u16 smid; 1889 1889 u32 ioc_state; 1890 1890 unsigned long timeleft; ··· 1898 1898 void *pci_addr_out = NULL; 1899 1899 u16 wait_state_count; 1900 1900 struct request *rsp = req->next_rq; 1901 - struct bio_vec *bvec = NULL; 1901 + struct bio_vec bvec; 1902 + struct bvec_iter iter; 1902 1903 1903 1904 if (!rsp) { 1904 1905 pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n", ··· 1939 1938 goto out; 1940 1939 } 1941 1940 1942 - bio_for_each_segment(bvec, req->bio, i) { 1941 + bio_for_each_segment(bvec, req->bio, iter) { 1943 1942 memcpy(pci_addr_out + offset, 1944 - page_address(bvec->bv_page) + bvec->bv_offset, 1945 - bvec->bv_len); 1946 - offset += bvec->bv_len; 1943 + page_address(bvec.bv_page) + bvec.bv_offset, 1944 + bvec.bv_len); 1945 + offset += bvec.bv_len; 1947 1946 } 1948 1947 } else { 1949 1948 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), ··· 2068 2067 u32 offset = 0; 2069 2068 u32 bytes_to_copy = 2070 2069 le16_to_cpu(mpi_reply->ResponseDataLength); 2071 - bio_for_each_segment(bvec, rsp->bio, i) { 2072 - if (bytes_to_copy <= bvec->bv_len) { 2073 - memcpy(page_address(bvec->bv_page) + 2074 - bvec->bv_offset, pci_addr_in + 2070 + bio_for_each_segment(bvec, rsp->bio, iter) { 2071 + if (bytes_to_copy <= bvec.bv_len) { 2072 + memcpy(page_address(bvec.bv_page) + 2073 + bvec.bv_offset, pci_addr_in + 2075 2074 offset, bytes_to_copy); 2076 2075 break; 2077 2076 } else { 2078 - memcpy(page_address(bvec->bv_page) + 2079 - bvec->bv_offset, pci_addr_in + 2080 - offset, bvec->bv_len); 2081 - bytes_to_copy -= bvec->bv_len; 2077 + memcpy(page_address(bvec.bv_page) + 2078 + bvec.bv_offset, pci_addr_in + 2079 + offset, bvec.bv_len); 2080 + bytes_to_copy -= bvec.bv_len; 2082 2081 } 2083 - offset += bvec->bv_len; 2082 + offset += bvec.bv_len; 2084 2083 } 2085 2084 } 2086 2085 } else {
+7 -7
drivers/staging/lustre/lustre/llite/lloop.c
··· 194 194 struct cl_object *obj = ll_i2info(inode)->lli_clob; 195 195 pgoff_t offset; 196 196 int ret; 197 - int i; 198 197 int rw; 199 198 obd_count page_count = 0; 200 - struct bio_vec *bvec; 199 + struct bio_vec bvec; 200 + struct bvec_iter iter; 201 201 struct bio *bio; 202 202 ssize_t bytes; 203 203 ··· 221 221 LASSERT(rw == bio->bi_rw); 222 222 223 223 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; 224 - bio_for_each_segment(bvec, bio, i) { 225 - BUG_ON(bvec->bv_offset != 0); 226 - BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE); 224 + bio_for_each_segment(bvec, bio, iter) { 225 + BUG_ON(bvec.bv_offset != 0); 226 + BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE); 227 227 228 - pages[page_count] = bvec->bv_page; 228 + pages[page_count] = bvec.bv_page; 229 229 offsets[page_count] = offset; 230 230 page_count++; 231 - offset += bvec->bv_len; 231 + offset += bvec.bv_len; 232 232 } 233 233 LASSERT(page_count <= LLOOP_MAX_SEGMENTS); 234 234 }
+10 -9
drivers/staging/zram/zram_drv.c
··· 672 672 673 673 static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) 674 674 { 675 - int i, offset; 675 + int offset; 676 676 u32 index; 677 - struct bio_vec *bvec; 677 + struct bio_vec bvec; 678 + struct bvec_iter iter; 678 679 679 680 switch (rw) { 680 681 case READ: ··· 690 689 offset = (bio->bi_iter.bi_sector & 691 690 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; 692 691 693 - bio_for_each_segment(bvec, bio, i) { 692 + bio_for_each_segment(bvec, bio, iter) { 694 693 int max_transfer_size = PAGE_SIZE - offset; 695 694 696 - if (bvec->bv_len > max_transfer_size) { 695 + if (bvec.bv_len > max_transfer_size) { 697 696 /* 698 697 * zram_bvec_rw() can only make operation on a single 699 698 * zram page. Split the bio vector. 700 699 */ 701 700 struct bio_vec bv; 702 701 703 - bv.bv_page = bvec->bv_page; 702 + bv.bv_page = bvec.bv_page; 704 703 bv.bv_len = max_transfer_size; 705 - bv.bv_offset = bvec->bv_offset; 704 + bv.bv_offset = bvec.bv_offset; 706 705 707 706 if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0) 708 707 goto out; 709 708 710 - bv.bv_len = bvec->bv_len - max_transfer_size; 709 + bv.bv_len = bvec.bv_len - max_transfer_size; 711 710 bv.bv_offset += max_transfer_size; 712 711 if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0) 713 712 goto out; 714 713 } else 715 - if (zram_bvec_rw(zram, bvec, index, offset, bio, rw) 714 + if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw) 716 715 < 0) 717 716 goto out; 718 717 719 - update_position(&index, &offset, bvec); 718 + update_position(&index, &offset, &bvec); 720 719 } 721 720 722 721 set_bit(BIO_UPTODATE, &bio->bi_flags);
+16 -14
fs/bio-integrity.c
··· 299 299 { 300 300 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 301 301 struct blk_integrity_exchg bix; 302 - struct bio_vec *bv; 302 + struct bio_vec bv; 303 + struct bvec_iter iter; 303 304 sector_t sector = bio->bi_iter.bi_sector; 304 - unsigned int i, sectors, total; 305 + unsigned int sectors, total; 305 306 void *prot_buf = bio->bi_integrity->bip_buf; 306 307 307 308 total = 0; 308 309 bix.disk_name = bio->bi_bdev->bd_disk->disk_name; 309 310 bix.sector_size = bi->sector_size; 310 311 311 - bio_for_each_segment(bv, bio, i) { 312 - void *kaddr = kmap_atomic(bv->bv_page); 313 - bix.data_buf = kaddr + bv->bv_offset; 314 - bix.data_size = bv->bv_len; 312 + bio_for_each_segment(bv, bio, iter) { 313 + void *kaddr = kmap_atomic(bv.bv_page); 314 + bix.data_buf = kaddr + bv.bv_offset; 315 + bix.data_size = bv.bv_len; 315 316 bix.prot_buf = prot_buf; 316 317 bix.sector = sector; 317 318 318 319 bi->generate_fn(&bix); 319 320 320 - sectors = bv->bv_len / bi->sector_size; 321 + sectors = bv.bv_len / bi->sector_size; 321 322 sector += sectors; 322 323 prot_buf += sectors * bi->tuple_size; 323 324 total += sectors * bi->tuple_size; ··· 442 441 { 443 442 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 444 443 struct blk_integrity_exchg bix; 445 - struct bio_vec *bv; 444 + struct bio_vec bv; 445 + struct bvec_iter iter; 446 446 sector_t sector = bio->bi_integrity->bip_sector; 447 - unsigned int i, sectors, total, ret; 447 + unsigned int sectors, total, ret; 448 448 void *prot_buf = bio->bi_integrity->bip_buf; 449 449 450 450 ret = total = 0; 451 451 bix.disk_name = bio->bi_bdev->bd_disk->disk_name; 452 452 bix.sector_size = bi->sector_size; 453 453 454 - bio_for_each_segment(bv, bio, i) { 455 - void *kaddr = kmap_atomic(bv->bv_page); 456 - bix.data_buf = kaddr + bv->bv_offset; 457 - bix.data_size = bv->bv_len; 454 + bio_for_each_segment(bv, bio, iter) { 455 + void *kaddr = kmap_atomic(bv.bv_page); 456 + bix.data_buf = kaddr + bv.bv_offset; 457 + bix.data_size = bv.bv_len; 458 458 bix.prot_buf = prot_buf; 459 459 bix.sector = sector; 460 460 ··· 466 464 return ret; 467 465 } 468 466 469 - sectors = bv->bv_len / bi->sector_size; 467 + sectors = bv.bv_len / bi->sector_size; 470 468 sector += sectors; 471 469 prot_buf += sectors * bi->tuple_size; 472 470 total += sectors * bi->tuple_size;
+11 -11
fs/bio.c
··· 473 473 void zero_fill_bio(struct bio *bio) 474 474 { 475 475 unsigned long flags; 476 - struct bio_vec *bv; 477 - int i; 476 + struct bio_vec bv; 477 + struct bvec_iter iter; 478 478 479 - bio_for_each_segment(bv, bio, i) { 480 - char *data = bvec_kmap_irq(bv, &flags); 481 - memset(data, 0, bv->bv_len); 482 - flush_dcache_page(bv->bv_page); 479 + bio_for_each_segment(bv, bio, iter) { 480 + char *data = bvec_kmap_irq(&bv, &flags); 481 + memset(data, 0, bv.bv_len); 482 + flush_dcache_page(bv.bv_page); 483 483 bvec_kunmap_irq(data, &flags); 484 484 } 485 485 } ··· 1687 1687 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1688 1688 void bio_flush_dcache_pages(struct bio *bi) 1689 1689 { 1690 - int i; 1691 - struct bio_vec *bvec; 1690 + struct bio_vec bvec; 1691 + struct bvec_iter iter; 1692 1692 1693 - bio_for_each_segment(bvec, bi, i) 1694 - flush_dcache_page(bvec->bv_page); 1693 + bio_for_each_segment(bvec, bi, iter) 1694 + flush_dcache_page(bvec.bv_page); 1695 1695 } 1696 1696 EXPORT_SYMBOL(bio_flush_dcache_pages); 1697 1697 #endif ··· 1840 1840 bio->bi_iter.bi_idx = 0; 1841 1841 } 1842 1842 /* Make sure vcnt and last bv are not too big */ 1843 - bio_for_each_segment(bvec, bio, i) { 1843 + bio_for_each_segment_all(bvec, bio, i) { 1844 1844 if (sofar + bvec->bv_len > size) 1845 1845 bvec->bv_len = size - sofar; 1846 1846 if (bvec->bv_len == 0) {
+14 -14
include/linux/bio.h
··· 63 63 */ 64 64 #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) 65 65 #define __bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_iter.bi_idx) 66 - #define bio_iovec(bio) (*__bio_iovec(bio)) 66 + 67 + #define bio_iter_iovec(bio, iter) ((bio)->bi_io_vec[(iter).bi_idx]) 67 68 68 69 #define bio_page(bio) (bio_iovec((bio)).bv_page) 69 70 #define bio_offset(bio) (bio_iovec((bio)).bv_offset) 71 + #define bio_iovec(bio) (*__bio_iovec(bio)) 72 + 70 73 #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_iter.bi_idx) 71 74 #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) 72 75 #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) ··· 137 134 #define bio_io_error(bio) bio_endio((bio), -EIO) 138 135 139 136 /* 140 - * drivers should not use the __ version unless they _really_ know what 141 - * they're doing 142 - */ 143 - #define __bio_for_each_segment(bvl, bio, i, start_idx) \ 144 - for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \ 145 - i < (bio)->bi_vcnt; \ 146 - bvl++, i++) 147 - 148 - /* 149 137 * drivers should _never_ use the all version - the bio may have been split 150 138 * before it got to the driver and the driver won't own all of it 151 139 */ ··· 145 151 bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \ 146 152 i++) 147 153 148 - #define bio_for_each_segment(bvl, bio, i) \ 149 - for (i = (bio)->bi_iter.bi_idx; \ 150 - bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \ 151 - i++) 154 + #define __bio_for_each_segment(bvl, bio, iter, start) \ 155 + for (iter = (start); \ 156 + bvl = bio_iter_iovec((bio), (iter)), \ 157 + (iter).bi_idx < (bio)->bi_vcnt; \ 158 + (iter).bi_idx++) 159 + 160 + #define bio_for_each_segment(bvl, bio, iter) \ 161 + __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) 162 + 163 + #define bio_iter_last(bio, iter) ((iter).bi_idx == (bio)->bi_vcnt - 1) 152 164 153 165 /* 154 166 * get a reference to a bio, so it won't disappear. the intended use is
+4 -3
include/linux/blkdev.h
··· 735 735 }; 736 736 737 737 struct req_iterator { 738 - int i; 738 + struct bvec_iter iter; 739 739 struct bio *bio; 740 740 }; 741 741 ··· 748 748 749 749 #define rq_for_each_segment(bvl, _rq, _iter) \ 750 750 __rq_for_each_bio(_iter.bio, _rq) \ 751 - bio_for_each_segment(bvl, _iter.bio, _iter.i) 751 + bio_for_each_segment(bvl, _iter.bio, _iter.iter) 752 752 753 753 #define rq_iter_last(rq, _iter) \ 754 - (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 754 + (_iter.bio->bi_next == NULL && \ 755 + bio_iter_last(_iter.bio, _iter.iter)) 755 756 756 757 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 757 758 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
+19 -21
mm/bounce.c
··· 98 98 static void copy_to_high_bio_irq(struct bio *to, struct bio *from) 99 99 { 100 100 unsigned char *vfrom; 101 - struct bio_vec *tovec, *fromvec; 102 - int i; 101 + struct bio_vec tovec, *fromvec = from->bi_io_vec; 102 + struct bvec_iter iter; 103 103 104 - bio_for_each_segment(tovec, to, i) { 105 - fromvec = from->bi_io_vec + i; 104 + bio_for_each_segment(tovec, to, iter) { 105 + if (tovec.bv_page != fromvec->bv_page) { 106 + /* 107 + * fromvec->bv_offset and fromvec->bv_len might have 108 + * been modified by the block layer, so use the original 109 + * copy, bounce_copy_vec already uses tovec->bv_len 110 + */ 111 + vfrom = page_address(fromvec->bv_page) + 112 + tovec.bv_offset; 106 113 107 - /* 108 - * not bounced 109 - */ 110 - if (tovec->bv_page == fromvec->bv_page) 111 - continue; 114 + bounce_copy_vec(&tovec, vfrom); 115 + flush_dcache_page(tovec.bv_page); 116 + } 112 117 113 - /* 114 - * fromvec->bv_offset and fromvec->bv_len might have been 115 - * modified by the block layer, so use the original copy, 116 - * bounce_copy_vec already uses tovec->bv_len 117 - */ 118 - vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; 119 - 120 - bounce_copy_vec(tovec, vfrom); 121 - flush_dcache_page(tovec->bv_page); 118 + fromvec++; 122 119 } 123 120 } 124 121 ··· 198 201 { 199 202 struct bio *bio; 200 203 int rw = bio_data_dir(*bio_orig); 201 - struct bio_vec *to, *from; 204 + struct bio_vec *to, from; 205 + struct bvec_iter iter; 202 206 unsigned i; 203 207 204 208 if (force) 205 209 goto bounce; 206 - bio_for_each_segment(from, *bio_orig, i) 207 - if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q)) 210 + bio_for_each_segment(from, *bio_orig, iter) 211 + if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) 208 212 goto bounce; 209 213 210 214 return;