Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: Abstract out bvec iterator

Immutable biovecs are going to require an explicit iterator. To
implement immutable bvecs, a later patch is going to add a bi_bvec_done
member to this struct; for now, this patch effectively just renames
things.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: "Ed L. Cashin" <ecashin@coraid.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Lars Ellenberg <drbd-dev@lists.linbit.com>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Matthew Wilcox <willy@linux.intel.com>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Yehuda Sadeh <yehuda@inktank.com>
Cc: Sage Weil <sage@inktank.com>
Cc: Alex Elder <elder@inktank.com>
Cc: ceph-devel@vger.kernel.org
Cc: Joshua Morris <josh.h.morris@us.ibm.com>
Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Neil Brown <neilb@suse.de>
Cc: Alasdair Kergon <agk@redhat.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: dm-devel@redhat.com
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: linux390@de.ibm.com
Cc: Boaz Harrosh <bharrosh@panasas.com>
Cc: Benny Halevy <bhalevy@tonian.com>
Cc: "James E.J. Bottomley" <JBottomley@parallels.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Chris Mason <chris.mason@fusionio.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Dave Kleikamp <shaggy@kernel.org>
Cc: Joern Engel <joern@logfs.org>
Cc: Prasad Joshi <prasadjoshi.linux@gmail.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Cc: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Ben Myers <bpm@sgi.com>
Cc: xfs@oss.sgi.com
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Len Brown <len.brown@intel.com>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com>
Cc: Ben Hutchings <ben@decadent.org.uk>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Guo Chao <yan@linux.vnet.ibm.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Cc: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
Cc: "Roger Pau Monné" <roger.pau@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Cc: Ian Campbell <Ian.Campbell@citrix.com>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Jerome Marchand <jmarchand@redhat.com>
Cc: Joe Perches <joe@perches.com>
Cc: Peng Tao <tao.peng@emc.com>
Cc: Andy Adamson <andros@netapp.com>
Cc: fanchaoting <fanchaoting@cn.fujitsu.com>
Cc: Jie Liu <jeff.liu@oracle.com>
Cc: Sunil Mushran <sunil.mushran@gmail.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Cc: Namjae Jeon <namjae.jeon@samsung.com>
Cc: Pankaj Kumar <pankaj.km@samsung.com>
Cc: Dan Magenheimer <dan.magenheimer@oracle.com>
Cc: Mel Gorman <mgorman@suse.de>6

+700 -638
+3 -4
Documentation/block/biodoc.txt
··· 447 447 * main unit of I/O for the block layer and lower layers (ie drivers) 448 448 */ 449 449 struct bio { 450 - sector_t bi_sector; 451 450 struct bio *bi_next; /* request queue link */ 452 451 struct block_device *bi_bdev; /* target device */ 453 452 unsigned long bi_flags; /* status, command, etc */ 454 453 unsigned long bi_rw; /* low bits: r/w, high: priority */ 455 454 456 455 unsigned int bi_vcnt; /* how may bio_vec's */ 457 - unsigned int bi_idx; /* current index into bio_vec array */ 456 + struct bvec_iter bi_iter; /* current index into bio_vec array */ 458 457 459 458 unsigned int bi_size; /* total size in bytes */ 460 459 unsigned short bi_phys_segments; /* segments after physaddr coalesce*/ ··· 479 480 - Code that traverses the req list can find all the segments of a bio 480 481 by using rq_for_each_segment. This handles the fact that a request 481 482 has multiple bios, each of which can have multiple segments. 482 - - Drivers which can't process a large bio in one shot can use the bi_idx 483 + - Drivers which can't process a large bio in one shot can use the bi_iter 483 484 field to keep track of the next bio_vec entry to process. 484 485 (e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE) 485 486 [TBD: Should preferably also have a bi_voffset and bi_vlen to avoid modifying ··· 588 589 nr_sectors and current_nr_sectors fields (based on the corresponding 589 590 hard_xxx values and the number of bytes transferred) and updates it on 590 591 every transfer that invokes end_that_request_first. It does the same for the 591 - buffer, bio, bio->bi_idx fields too. 592 + buffer, bio, bio->bi_iter fields too. 592 593 593 594 The buffer field is just a virtual address mapping of the current segment 594 595 of the i/o buffer in cases where the buffer resides in low-memory. For high
+1 -1
arch/m68k/emu/nfblock.c
··· 64 64 struct nfhd_device *dev = queue->queuedata; 65 65 struct bio_vec *bvec; 66 66 int i, dir, len, shift; 67 - sector_t sec = bio->bi_sector; 67 + sector_t sec = bio->bi_iter.bi_sector; 68 68 69 69 dir = bio_data_dir(bio); 70 70 shift = dev->bshift;
+2 -1
arch/powerpc/sysdev/axonram.c
··· 113 113 unsigned int transfered; 114 114 unsigned short idx; 115 115 116 - phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT); 116 + phys_mem = bank->io_addr + (bio->bi_iter.bi_sector << 117 + AXON_RAM_SECTOR_SHIFT); 117 118 phys_end = bank->io_addr + bank->size; 118 119 transfered = 0; 119 120 bio_for_each_segment(vec, bio, idx) {
+18 -18
block/blk-core.c
··· 130 130 bio_advance(bio, nbytes); 131 131 132 132 /* don't actually finish bio if it's part of flush sequence */ 133 - if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 133 + if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 134 134 bio_endio(bio, error); 135 135 } 136 136 ··· 1326 1326 bio->bi_io_vec->bv_offset = 0; 1327 1327 bio->bi_io_vec->bv_len = len; 1328 1328 1329 - bio->bi_size = len; 1329 + bio->bi_iter.bi_size = len; 1330 1330 bio->bi_vcnt = 1; 1331 1331 bio->bi_phys_segments = 1; 1332 1332 ··· 1351 1351 1352 1352 req->biotail->bi_next = bio; 1353 1353 req->biotail = bio; 1354 - req->__data_len += bio->bi_size; 1354 + req->__data_len += bio->bi_iter.bi_size; 1355 1355 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1356 1356 1357 1357 blk_account_io_start(req, false); ··· 1380 1380 * not touch req->buffer either... 1381 1381 */ 1382 1382 req->buffer = bio_data(bio); 1383 - req->__sector = bio->bi_sector; 1384 - req->__data_len += bio->bi_size; 1383 + req->__sector = bio->bi_iter.bi_sector; 1384 + req->__data_len += bio->bi_iter.bi_size; 1385 1385 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1386 1386 1387 1387 blk_account_io_start(req, false); ··· 1459 1459 req->cmd_flags |= REQ_FAILFAST_MASK; 1460 1460 1461 1461 req->errors = 0; 1462 - req->__sector = bio->bi_sector; 1462 + req->__sector = bio->bi_iter.bi_sector; 1463 1463 req->ioprio = bio_prio(bio); 1464 1464 blk_rq_bio_prep(req->q, req, bio); 1465 1465 } ··· 1583 1583 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1584 1584 struct hd_struct *p = bdev->bd_part; 1585 1585 1586 - bio->bi_sector += p->start_sect; 1586 + bio->bi_iter.bi_sector += p->start_sect; 1587 1587 bio->bi_bdev = bdev->bd_contains; 1588 1588 1589 1589 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1590 1590 bdev->bd_dev, 1591 - bio->bi_sector - p->start_sect); 1591 + bio->bi_iter.bi_sector - p->start_sect); 1592 1592 } 1593 1593 } 1594 1594 ··· 1654 1654 /* Test device or partition size, when known. */ 1655 1655 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1656 1656 if (maxsector) { 1657 - sector_t sector = bio->bi_sector; 1657 + sector_t sector = bio->bi_iter.bi_sector; 1658 1658 1659 1659 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1660 1660 /* ··· 1690 1690 "generic_make_request: Trying to access " 1691 1691 "nonexistent block-device %s (%Lu)\n", 1692 1692 bdevname(bio->bi_bdev, b), 1693 - (long long) bio->bi_sector); 1693 + (long long) bio->bi_iter.bi_sector); 1694 1694 goto end_io; 1695 1695 } 1696 1696 ··· 1704 1704 } 1705 1705 1706 1706 part = bio->bi_bdev->bd_part; 1707 - if (should_fail_request(part, bio->bi_size) || 1707 + if (should_fail_request(part, bio->bi_iter.bi_size) || 1708 1708 should_fail_request(&part_to_disk(part)->part0, 1709 - bio->bi_size)) 1709 + bio->bi_iter.bi_size)) 1710 1710 goto end_io; 1711 1711 1712 1712 /* ··· 1865 1865 if (rw & WRITE) { 1866 1866 count_vm_events(PGPGOUT, count); 1867 1867 } else { 1868 - task_io_account_read(bio->bi_size); 1868 + task_io_account_read(bio->bi_iter.bi_size); 1869 1869 count_vm_events(PGPGIN, count); 1870 1870 } 1871 1871 ··· 1874 1874 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 1875 1875 current->comm, task_pid_nr(current), 1876 1876 (rw & WRITE) ? "WRITE" : "READ", 1877 - (unsigned long long)bio->bi_sector, 1877 + (unsigned long long)bio->bi_iter.bi_sector, 1878 1878 bdevname(bio->bi_bdev, b), 1879 1879 count); 1880 1880 } ··· 2007 2007 for (bio = rq->bio; bio; bio = bio->bi_next) { 2008 2008 if ((bio->bi_rw & ff) != ff) 2009 2009 break; 2010 - bytes += bio->bi_size; 2010 + bytes += bio->bi_iter.bi_size; 2011 2011 } 2012 2012 2013 2013 /* this could lead to infinite loop */ ··· 2378 2378 total_bytes = 0; 2379 2379 while (req->bio) { 2380 2380 struct bio *bio = req->bio; 2381 - unsigned bio_bytes = min(bio->bi_size, nr_bytes); 2381 + unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 2382 2382 2383 - if (bio_bytes == bio->bi_size) 2383 + if (bio_bytes == bio->bi_iter.bi_size) 2384 2384 req->bio = bio->bi_next; 2385 2385 2386 2386 req_bio_endio(req, bio, bio_bytes, error); ··· 2728 2728 rq->nr_phys_segments = bio_phys_segments(q, bio); 2729 2729 rq->buffer = bio_data(bio); 2730 2730 } 2731 - rq->__data_len = bio->bi_size; 2731 + rq->__data_len = bio->bi_iter.bi_size; 2732 2732 rq->bio = rq->biotail = bio; 2733 2733 2734 2734 if (bio->bi_bdev)
+1 -1
block/blk-flush.c
··· 548 548 * copied from blk_rq_pos(rq). 549 549 */ 550 550 if (error_sector) 551 - *error_sector = bio->bi_sector; 551 + *error_sector = bio->bi_iter.bi_sector; 552 552 553 553 bio_put(bio); 554 554 return ret;
+6 -6
block/blk-lib.c
··· 108 108 req_sects = end_sect - sector; 109 109 } 110 110 111 - bio->bi_sector = sector; 111 + bio->bi_iter.bi_sector = sector; 112 112 bio->bi_end_io = bio_batch_end_io; 113 113 bio->bi_bdev = bdev; 114 114 bio->bi_private = &bb; 115 115 116 - bio->bi_size = req_sects << 9; 116 + bio->bi_iter.bi_size = req_sects << 9; 117 117 nr_sects -= req_sects; 118 118 sector = end_sect; 119 119 ··· 174 174 break; 175 175 } 176 176 177 - bio->bi_sector = sector; 177 + bio->bi_iter.bi_sector = sector; 178 178 bio->bi_end_io = bio_batch_end_io; 179 179 bio->bi_bdev = bdev; 180 180 bio->bi_private = &bb; ··· 184 184 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); 185 185 186 186 if (nr_sects > max_write_same_sectors) { 187 - bio->bi_size = max_write_same_sectors << 9; 187 + bio->bi_iter.bi_size = max_write_same_sectors << 9; 188 188 nr_sects -= max_write_same_sectors; 189 189 sector += max_write_same_sectors; 190 190 } else { 191 - bio->bi_size = nr_sects << 9; 191 + bio->bi_iter.bi_size = nr_sects << 9; 192 192 nr_sects = 0; 193 193 } 194 194 ··· 240 240 break; 241 241 } 242 242 243 - bio->bi_sector = sector; 243 + bio->bi_iter.bi_sector = sector; 244 244 bio->bi_bdev = bdev; 245 245 bio->bi_end_io = bio_batch_end_io; 246 246 bio->bi_private = &bb;
+3 -3
block/blk-map.c
··· 20 20 rq->biotail->bi_next = bio; 21 21 rq->biotail = bio; 22 22 23 - rq->__data_len += bio->bi_size; 23 + rq->__data_len += bio->bi_iter.bi_size; 24 24 } 25 25 return 0; 26 26 } ··· 76 76 77 77 ret = blk_rq_append_bio(q, rq, bio); 78 78 if (!ret) 79 - return bio->bi_size; 79 + return bio->bi_iter.bi_size; 80 80 81 81 /* if it was boucned we must call the end io function */ 82 82 bio_endio(bio, 0); ··· 220 220 if (IS_ERR(bio)) 221 221 return PTR_ERR(bio); 222 222 223 - if (bio->bi_size != len) { 223 + if (bio->bi_iter.bi_size != len) { 224 224 /* 225 225 * Grab an extra reference to this bio, as bio_unmap_user() 226 226 * expects to be able to drop it twice as it happens on the
+2 -2
block/blk-merge.c
··· 543 543 544 544 int blk_try_merge(struct request *rq, struct bio *bio) 545 545 { 546 - if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector) 546 + if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) 547 547 return ELEVATOR_BACK_MERGE; 548 - else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector) 548 + else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) 549 549 return ELEVATOR_FRONT_MERGE; 550 550 return ELEVATOR_NO_MERGE; 551 551 }
+1 -1
block/blk-mq.c
··· 301 301 struct bio *next = bio->bi_next; 302 302 303 303 bio->bi_next = NULL; 304 - bytes += bio->bi_size; 304 + bytes += bio->bi_iter.bi_size; 305 305 blk_mq_bio_endio(rq, bio, error); 306 306 bio = next; 307 307 }
+7 -7
block/blk-throttle.c
··· 877 877 do_div(tmp, HZ); 878 878 bytes_allowed = tmp; 879 879 880 - if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) { 880 + if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { 881 881 if (wait) 882 882 *wait = 0; 883 883 return 1; 884 884 } 885 885 886 886 /* Calc approx time to dispatch */ 887 - extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed; 887 + extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; 888 888 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]); 889 889 890 890 if (!jiffy_wait) ··· 987 987 bool rw = bio_data_dir(bio); 988 988 989 989 /* Charge the bio to the group */ 990 - tg->bytes_disp[rw] += bio->bi_size; 990 + tg->bytes_disp[rw] += bio->bi_iter.bi_size; 991 991 tg->io_disp[rw]++; 992 992 993 993 /* ··· 1003 1003 */ 1004 1004 if (!(bio->bi_rw & REQ_THROTTLED)) { 1005 1005 bio->bi_rw |= REQ_THROTTLED; 1006 - throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, 1007 - bio->bi_rw); 1006 + throtl_update_dispatch_stats(tg_to_blkg(tg), 1007 + bio->bi_iter.bi_size, bio->bi_rw); 1008 1008 } 1009 1009 } 1010 1010 ··· 1508 1508 if (tg) { 1509 1509 if (!tg->has_rules[rw]) { 1510 1510 throtl_update_dispatch_stats(tg_to_blkg(tg), 1511 - bio->bi_size, bio->bi_rw); 1511 + bio->bi_iter.bi_size, bio->bi_rw); 1512 1512 goto out_unlock_rcu; 1513 1513 } 1514 1514 } ··· 1564 1564 /* out-of-limit, queue to @tg */ 1565 1565 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", 1566 1566 rw == READ ? 'R' : 'W', 1567 - tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], 1567 + tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw], 1568 1568 tg->io_disp[rw], tg->iops[rw], 1569 1569 sq->nr_queued[READ], sq->nr_queued[WRITE]); 1570 1570
+1 -1
block/elevator.c
··· 440 440 /* 441 441 * See if our hash lookup can find a potential backmerge. 442 442 */ 443 - __rq = elv_rqhash_find(q, bio->bi_sector); 443 + __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); 444 444 if (__rq && elv_rq_merge_ok(__rq, bio)) { 445 445 *req = __rq; 446 446 return ELEVATOR_BACK_MERGE;
+3 -3
drivers/block/aoe/aoecmd.c
··· 929 929 memset(buf, 0, sizeof(*buf)); 930 930 buf->rq = rq; 931 931 buf->bio = bio; 932 - buf->resid = bio->bi_size; 933 - buf->sector = bio->bi_sector; 932 + buf->resid = bio->bi_iter.bi_size; 933 + buf->sector = bio->bi_iter.bi_sector; 934 934 bio_pageinc(bio); 935 935 buf->bv = bio_iovec(bio); 936 936 buf->bv_resid = buf->bv->bv_len; ··· 1152 1152 do { 1153 1153 bio = rq->bio; 1154 1154 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags); 1155 - } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size)); 1155 + } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size)); 1156 1156 1157 1157 /* cf. http://lkml.org/lkml/2006/10/31/28 */ 1158 1158 if (!fastfail)
+2 -2
drivers/block/brd.c
··· 333 333 int i; 334 334 int err = -EIO; 335 335 336 - sector = bio->bi_sector; 336 + sector = bio->bi_iter.bi_sector; 337 337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) 338 338 goto out; 339 339 340 340 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 341 341 err = 0; 342 - discard_from_brd(brd, sector, bio->bi_size); 342 + discard_from_brd(brd, sector, bio->bi_iter.bi_size); 343 343 goto out; 344 344 } 345 345
+1 -1
drivers/block/drbd/drbd_actlog.c
··· 159 159 160 160 bio = bio_alloc_drbd(GFP_NOIO); 161 161 bio->bi_bdev = bdev->md_bdev; 162 - bio->bi_sector = sector; 162 + bio->bi_iter.bi_sector = sector; 163 163 err = -EIO; 164 164 if (bio_add_page(bio, page, size, 0) != size) 165 165 goto out;
+1 -1
drivers/block/drbd/drbd_bitmap.c
··· 1028 1028 } else 1029 1029 page = b->bm_pages[page_nr]; 1030 1030 bio->bi_bdev = mdev->ldev->md_bdev; 1031 - bio->bi_sector = on_disk_sector; 1031 + bio->bi_iter.bi_sector = on_disk_sector; 1032 1032 /* bio_add_page of a single page to an empty bio will always succeed, 1033 1033 * according to api. Do we want to assert that? */ 1034 1034 bio_add_page(bio, page, len, 0);
+3 -3
drivers/block/drbd/drbd_receiver.c
··· 1333 1333 goto fail; 1334 1334 } 1335 1335 /* > peer_req->i.sector, unless this is the first bio */ 1336 - bio->bi_sector = sector; 1336 + bio->bi_iter.bi_sector = sector; 1337 1337 bio->bi_bdev = mdev->ldev->backing_bdev; 1338 1338 bio->bi_rw = rw; 1339 1339 bio->bi_private = peer_req; ··· 1353 1353 dev_err(DEV, 1354 1354 "bio_add_page failed for len=%u, " 1355 1355 "bi_vcnt=0 (bi_sector=%llu)\n", 1356 - len, (unsigned long long)bio->bi_sector); 1356 + len, (uint64_t)bio->bi_iter.bi_sector); 1357 1357 err = -ENOSPC; 1358 1358 goto fail; 1359 1359 } ··· 1615 1615 mdev->recv_cnt += data_size>>9; 1616 1616 1617 1617 bio = req->master_bio; 1618 - D_ASSERT(sector == bio->bi_sector); 1618 + D_ASSERT(sector == bio->bi_iter.bi_sector); 1619 1619 1620 1620 bio_for_each_segment(bvec, bio, i) { 1621 1621 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
+3 -3
drivers/block/drbd/drbd_req.c
··· 77 77 req->epoch = 0; 78 78 79 79 drbd_clear_interval(&req->i); 80 - req->i.sector = bio_src->bi_sector; 81 - req->i.size = bio_src->bi_size; 80 + req->i.sector = bio_src->bi_iter.bi_sector; 81 + req->i.size = bio_src->bi_iter.bi_size; 82 82 req->i.local = true; 83 83 req->i.waiting = false; 84 84 ··· 1280 1280 /* 1281 1281 * what we "blindly" assume: 1282 1282 */ 1283 - D_ASSERT(IS_ALIGNED(bio->bi_size, 512)); 1283 + D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512)); 1284 1284 1285 1285 inc_ap_bio(mdev); 1286 1286 __drbd_make_request(mdev, bio, start_time);
+1 -1
drivers/block/drbd/drbd_req.h
··· 269 269 270 270 /* Short lived temporary struct on the stack. 271 271 * We could squirrel the error to be returned into 272 - * bio->bi_size, or similar. But that would be too ugly. */ 272 + * bio->bi_iter.bi_size, or similar. But that would be too ugly. */ 273 273 struct bio_and_error { 274 274 struct bio *bio; 275 275 int error;
+2 -2
drivers/block/floppy.c
··· 3775 3775 bio_vec.bv_len = size; 3776 3776 bio_vec.bv_offset = 0; 3777 3777 bio.bi_vcnt = 1; 3778 - bio.bi_size = size; 3778 + bio.bi_iter.bi_size = size; 3779 3779 bio.bi_bdev = bdev; 3780 - bio.bi_sector = 0; 3780 + bio.bi_iter.bi_sector = 0; 3781 3781 bio.bi_flags = (1 << BIO_QUIET); 3782 3782 init_completion(&complete); 3783 3783 bio.bi_private = &complete;
+2 -2
drivers/block/loop.c
··· 415 415 loff_t pos; 416 416 int ret; 417 417 418 - pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 418 + pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset; 419 419 420 420 if (bio_rw(bio) == WRITE) { 421 421 struct file *file = lo->lo_backing_file; ··· 444 444 goto out; 445 445 } 446 446 ret = file->f_op->fallocate(file, mode, pos, 447 - bio->bi_size); 447 + bio->bi_iter.bi_size); 448 448 if (unlikely(ret && ret != -EINVAL && 449 449 ret != -EOPNOTSUPP)) 450 450 ret = -EIO;
+4 -3
drivers/block/mtip32xx/mtip32xx.c
··· 3993 3993 } 3994 3994 3995 3995 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 3996 - bio_endio(bio, mtip_send_trim(dd, bio->bi_sector, 3996 + bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector, 3997 3997 bio_sectors(bio))); 3998 3998 return; 3999 3999 } ··· 4006 4006 4007 4007 if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 && 4008 4008 dd->unal_qdepth) { 4009 - if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */ 4009 + if (bio->bi_iter.bi_sector % 8 != 0) 4010 + /* Unaligned on 4k boundaries */ 4010 4011 unaligned = 1; 4011 4012 else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */ 4012 4013 unaligned = 1; ··· 4036 4035 4037 4036 /* Issue the read/write. */ 4038 4037 mtip_hw_submit_io(dd, 4039 - bio->bi_sector, 4038 + bio->bi_iter.bi_sector, 4040 4039 bio_sectors(bio), 4041 4040 nents, 4042 4041 tag,
+13 -12
drivers/block/nvme-core.c
··· 468 468 { 469 469 struct nvme_bio_pair *bp; 470 470 471 - BUG_ON(len > bio->bi_size); 471 + BUG_ON(len > bio->bi_iter.bi_size); 472 472 BUG_ON(idx > bio->bi_vcnt); 473 473 474 474 bp = kmalloc(sizeof(*bp), GFP_ATOMIC); ··· 479 479 bp->b1 = *bio; 480 480 bp->b2 = *bio; 481 481 482 - bp->b1.bi_size = len; 483 - bp->b2.bi_size -= len; 482 + bp->b1.bi_iter.bi_size = len; 483 + bp->b2.bi_iter.bi_size -= len; 484 484 bp->b1.bi_vcnt = idx; 485 - bp->b2.bi_idx = idx; 486 - bp->b2.bi_sector += len >> 9; 485 + bp->b2.bi_iter.bi_idx = idx; 486 + bp->b2.bi_iter.bi_sector += len >> 9; 487 487 488 488 if (offset) { 489 489 bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), ··· 552 552 { 553 553 struct bio_vec *bvec, *bvprv = NULL; 554 554 struct scatterlist *sg = NULL; 555 - int i, length = 0, nsegs = 0, split_len = bio->bi_size; 555 + int i, length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size; 556 556 557 557 if (nvmeq->dev->stripe_size) 558 558 split_len = nvmeq->dev->stripe_size - 559 - ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1)); 559 + ((bio->bi_iter.bi_sector << 9) & 560 + (nvmeq->dev->stripe_size - 1)); 560 561 561 562 sg_init_table(iod->sg, psegs); 562 563 bio_for_each_segment(bvec, bio, i) { ··· 585 584 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0) 586 585 return -ENOMEM; 587 586 588 - BUG_ON(length != bio->bi_size); 587 + BUG_ON(length != bio->bi_iter.bi_size); 589 588 return length; 590 589 } 591 590 ··· 609 608 iod->npages = 0; 610 609 611 610 range->cattr = cpu_to_le32(0); 612 - range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift); 613 - range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 611 + range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift); 612 + range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector)); 614 613 615 614 memset(cmnd, 0, sizeof(*cmnd)); 616 615 cmnd->dsm.opcode = nvme_cmd_dsm; ··· 675 674 } 676 675 677 676 result = -ENOMEM; 678 - iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); 677 + iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC); 679 678 if (!iod) 680 679 goto nomem; 681 680 iod->private = bio; ··· 724 723 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 725 724 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, 726 725 GFP_ATOMIC); 727 - cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 726 + cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector)); 728 727 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); 729 728 cmnd->rw.control = cpu_to_le16(control); 730 729 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+29 -25
drivers/block/pktcdvd.c
··· 651 651 652 652 for (;;) { 653 653 tmp = rb_entry(n, struct pkt_rb_node, rb_node); 654 - if (s <= tmp->bio->bi_sector) 654 + if (s <= tmp->bio->bi_iter.bi_sector) 655 655 next = n->rb_left; 656 656 else 657 657 next = n->rb_right; ··· 660 660 n = next; 661 661 } 662 662 663 - if (s > tmp->bio->bi_sector) { 663 + if (s > tmp->bio->bi_iter.bi_sector) { 664 664 tmp = pkt_rbtree_next(tmp); 665 665 if (!tmp) 666 666 return NULL; 667 667 } 668 - BUG_ON(s > tmp->bio->bi_sector); 668 + BUG_ON(s > tmp->bio->bi_iter.bi_sector); 669 669 return tmp; 670 670 } 671 671 ··· 676 676 { 677 677 struct rb_node **p = &pd->bio_queue.rb_node; 678 678 struct rb_node *parent = NULL; 679 - sector_t s = node->bio->bi_sector; 679 + sector_t s = node->bio->bi_iter.bi_sector; 680 680 struct pkt_rb_node *tmp; 681 681 682 682 while (*p) { 683 683 parent = *p; 684 684 tmp = rb_entry(parent, struct pkt_rb_node, rb_node); 685 - if (s < tmp->bio->bi_sector) 685 + if (s < tmp->bio->bi_iter.bi_sector) 686 686 p = &(*p)->rb_left; 687 687 else 688 688 p = &(*p)->rb_right; ··· 857 857 spin_lock(&pd->iosched.lock); 858 858 bio = bio_list_peek(&pd->iosched.write_queue); 859 859 spin_unlock(&pd->iosched.lock); 860 - if (bio && (bio->bi_sector == pd->iosched.last_write)) 860 + if (bio && (bio->bi_iter.bi_sector == 861 + pd->iosched.last_write)) 861 862 need_write_seek = 0; 862 863 if (need_write_seek && reads_queued) { 863 864 if (atomic_read(&pd->cdrw.pending_bios) > 0) { ··· 889 888 continue; 890 889 891 890 if (bio_data_dir(bio) == READ) 892 - pd->iosched.successive_reads += bio->bi_size >> 10; 891 + pd->iosched.successive_reads += 892 + bio->bi_iter.bi_size >> 10; 893 893 else { 894 894 pd->iosched.successive_reads = 0; 895 895 pd->iosched.last_write = bio_end_sector(bio); ··· 980 978 981 979 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", 982 980 bio, (unsigned long long)pkt->sector, 983 - (unsigned long long)bio->bi_sector, err); 981 + (unsigned long long)bio->bi_iter.bi_sector, err); 984 982 985 983 if (err) 986 984 atomic_inc(&pkt->io_errors); ··· 1028 1026 memset(written, 0, sizeof(written)); 1029 1027 spin_lock(&pkt->lock); 1030 1028 bio_list_for_each(bio, &pkt->orig_bios) { 1031 - int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); 1032 - int num_frames = bio->bi_size / CD_FRAMESIZE; 1029 + int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / 1030 + (CD_FRAMESIZE >> 9); 1031 + int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE; 1033 1032 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); 1034 1033 BUG_ON(first_frame < 0); 1035 1034 BUG_ON(first_frame + num_frames > pkt->frames); ··· 1056 1053 1057 1054 bio = pkt->r_bios[f]; 1058 1055 bio_reset(bio); 1059 - bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); 1056 + bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); 1060 1057 bio->bi_bdev = pd->bdev; 1061 1058 bio->bi_end_io = pkt_end_io_read; 1062 1059 bio->bi_private = pkt; ··· 1153 1150 bio_reset(pkt->bio); 1154 1151 pkt->bio->bi_bdev = pd->bdev; 1155 1152 pkt->bio->bi_rw = REQ_WRITE; 1156 - pkt->bio->bi_sector = new_sector; 1157 - pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE; 1153 + pkt->bio->bi_iter.bi_sector = new_sector; 1154 + pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; 1158 1155 pkt->bio->bi_vcnt = pkt->frames; 1159 1156 1160 1157 pkt->bio->bi_end_io = pkt_end_io_packet_write; ··· 1216 1213 node = first_node; 1217 1214 while (node) { 1218 1215 bio = node->bio; 1219 - zone = get_zone(bio->bi_sector, pd); 1216 + zone = get_zone(bio->bi_iter.bi_sector, pd); 1220 1217 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { 1221 1218 if (p->sector == zone) { 1222 1219 bio = NULL; ··· 1255 1252 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); 1256 1253 while ((node = pkt_rbtree_find(pd, zone)) != NULL) { 1257 1254 bio = node->bio; 1258 - pkt_dbg(2, pd, "found zone=%llx\n", 1259 - (unsigned long long)get_zone(bio->bi_sector, pd)); 1260 - if (get_zone(bio->bi_sector, pd) != zone) 1255 + pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long) 1256 + get_zone(bio->bi_iter.bi_sector, pd)); 1257 + if (get_zone(bio->bi_iter.bi_sector, pd) != zone) 1261 1258 break; 1262 1259 pkt_rbtree_erase(pd, node); 1263 1260 spin_lock(&pkt->lock); 1264 1261 bio_list_add(&pkt->orig_bios, bio); 1265 - pkt->write_size += bio->bi_size / CD_FRAMESIZE; 1262 + pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE; 1266 1263 spin_unlock(&pkt->lock); 1267 1264 } 1268 1265 /* check write congestion marks, and if bio_queue_size is ··· 1296 1293 struct bio_vec *bvec = pkt->w_bio->bi_io_vec; 1297 1294 1298 1295 bio_reset(pkt->w_bio); 1299 - pkt->w_bio->bi_sector = pkt->sector; 1296 + pkt->w_bio->bi_iter.bi_sector = pkt->sector; 1300 1297 pkt->w_bio->bi_bdev = pd->bdev; 1301 1298 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; 1302 1299 pkt->w_bio->bi_private = pkt; ··· 2373 2370 2374 2371 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { 2375 2372 pkt_notice(pd, "WRITE for ro device (%llu)\n", 2376 - (unsigned long long)bio->bi_sector); 2373 + (unsigned long long)bio->bi_iter.bi_sector); 2377 2374 goto end_io; 2378 2375 } 2379 2376 2380 - if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) { 2377 + if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) { 2381 2378 pkt_err(pd, "wrong bio size\n"); 2382 2379 goto end_io; 2383 2380 } 2384 2381 2385 2382 blk_queue_bounce(q, &bio); 2386 2383 2387 - zone = get_zone(bio->bi_sector, pd); 2384 + zone = get_zone(bio->bi_iter.bi_sector, pd); 2388 2385 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", 2389 - (unsigned long long)bio->bi_sector, 2386 + (unsigned long long)bio->bi_iter.bi_sector, 2390 2387 (unsigned long long)bio_end_sector(bio)); 2391 2388 2392 2389 /* Check if we have to split the bio */ ··· 2398 2395 last_zone = get_zone(bio_end_sector(bio) - 1, pd); 2399 2396 if (last_zone != zone) { 2400 2397 BUG_ON(last_zone != zone + pd->settings.size); 2401 - first_sectors = last_zone - bio->bi_sector; 2398 + first_sectors = last_zone - bio->bi_iter.bi_sector; 2402 2399 bp = bio_split(bio, first_sectors); 2403 2400 BUG_ON(!bp); 2404 2401 pkt_make_request(q, &bp->bio1); ··· 2420 2417 if ((pkt->state == PACKET_WAITING_STATE) || 2421 2418 (pkt->state == PACKET_READ_WAIT_STATE)) { 2422 2419 bio_list_add(&pkt->orig_bios, bio); 2423 - pkt->write_size += bio->bi_size / CD_FRAMESIZE; 2420 + pkt->write_size += 2421 + bio->bi_iter.bi_size / CD_FRAMESIZE; 2424 2422 if ((pkt->write_size >= pkt->frames) && 2425 2423 (pkt->state == PACKET_WAITING_STATE)) { 2426 2424 atomic_inc(&pkt->run_sm);
+1 -1
drivers/block/ps3disk.c
··· 104 104 dev_dbg(&dev->sbd.core, 105 105 "%s:%u: bio %u: %u segs %u sectors from %lu\n", 106 106 __func__, __LINE__, i, bio_segments(iter.bio), 107 - bio_sectors(iter.bio), iter.bio->bi_sector); 107 + bio_sectors(iter.bio), iter.bio->bi_iter.bi_sector); 108 108 109 109 size = bvec->bv_len; 110 110 buf = bvec_kmap_irq(bvec, &flags);
+1 -1
drivers/block/ps3vram.c
··· 553 553 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); 554 554 int write = bio_data_dir(bio) == WRITE; 555 555 const char *op = write ? "write" : "read"; 556 - loff_t offset = bio->bi_sector << 9; 556 + loff_t offset = bio->bi_iter.bi_sector << 9; 557 557 int error = 0; 558 558 struct bio_vec *bvec; 559 559 unsigned int i;
+11 -10
drivers/block/rbd.c
··· 1183 1183 1184 1184 /* Handle the easy case for the caller */ 1185 1185 1186 - if (!offset && len == bio_src->bi_size) 1186 + if (!offset && len == bio_src->bi_iter.bi_size) 1187 1187 return bio_clone(bio_src, gfpmask); 1188 1188 1189 1189 if (WARN_ON_ONCE(!len)) 1190 1190 return NULL; 1191 - if (WARN_ON_ONCE(len > bio_src->bi_size)) 1191 + if (WARN_ON_ONCE(len > bio_src->bi_iter.bi_size)) 1192 1192 return NULL; 1193 - if (WARN_ON_ONCE(offset > bio_src->bi_size - len)) 1193 + if (WARN_ON_ONCE(offset > bio_src->bi_iter.bi_size - len)) 1194 1194 return NULL; 1195 1195 1196 1196 /* Find first affected segment... */ ··· 1220 1220 return NULL; /* ENOMEM */ 1221 1221 1222 1222 bio->bi_bdev = bio_src->bi_bdev; 1223 - bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT); 1223 + bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector + 1224 + (offset >> SECTOR_SHIFT); 1224 1225 bio->bi_rw = bio_src->bi_rw; 1225 1226 bio->bi_flags |= 1 << BIO_CLONED; 1226 1227 ··· 1240 1239 } 1241 1240 1242 1241 bio->bi_vcnt = vcnt; 1243 - bio->bi_size = len; 1244 - bio->bi_idx = 0; 1242 + bio->bi_iter.bi_size = len; 1245 1243 1246 1244 return bio; 1247 1245 } ··· 1271 1271 1272 1272 /* Build up a chain of clone bios up to the limit */ 1273 1273 1274 - if (!bi || off >= bi->bi_size || !len) 1274 + if (!bi || off >= bi->bi_iter.bi_size || !len) 1275 1275 return NULL; /* Nothing to clone */ 1276 1276 1277 1277 end = &chain; ··· 1283 1283 rbd_warn(NULL, "bio_chain exhausted with %u left", len); 1284 1284 goto out_err; /* EINVAL; ran out of bio's */ 1285 1285 } 1286 - bi_size = min_t(unsigned int, bi->bi_size - off, len); 1286 + bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len); 1287 1287 bio = bio_clone_range(bi, off, bi_size, gfpmask); 1288 1288 if (!bio) 1289 1289 goto out_err; /* ENOMEM */ ··· 1292 1292 end = &bio->bi_next; 1293 1293 1294 1294 off += bi_size; 1295 - if (off == bi->bi_size) { 1295 + if (off == bi->bi_iter.bi_size) { 1296 1296 bi = bi->bi_next; 1297 1297 off = 0; 1298 1298 } ··· 2186 2186 2187 2187 if (type == OBJ_REQUEST_BIO) { 2188 2188 bio_list = data_desc; 2189 - rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT); 2189 + rbd_assert(img_offset == 2190 + bio_list->bi_iter.bi_sector << SECTOR_SHIFT); 2190 2191 } else { 2191 2192 rbd_assert(type == OBJ_REQUEST_PAGES); 2192 2193 pages = data_desc;
+3 -3
drivers/block/rsxx/dev.c
··· 174 174 if (!card) 175 175 goto req_err; 176 176 177 - if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk)) 177 + if (bio_end_sector(bio) > get_capacity(card->gendisk)) 178 178 goto req_err; 179 179 180 180 if (unlikely(card->halt)) { ··· 187 187 goto req_err; 188 188 } 189 189 190 - if (bio->bi_size == 0) { 190 + if (bio->bi_iter.bi_size == 0) { 191 191 dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); 192 192 goto req_err; 193 193 } ··· 208 208 209 209 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", 210 210 bio_data_dir(bio) ? 'W' : 'R', bio_meta, 211 - (u64)bio->bi_sector << 9, bio->bi_size); 211 + (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size); 212 212 213 213 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas, 214 214 bio_dma_done_cb, bio_meta);
+2 -2
drivers/block/rsxx/dma.c
··· 696 696 int st; 697 697 int i; 698 698 699 - addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */ 699 + addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ 700 700 atomic_set(n_dmas, 0); 701 701 702 702 for (i = 0; i < card->n_targets; i++) { ··· 705 705 } 706 706 707 707 if (bio->bi_rw & REQ_DISCARD) { 708 - bv_len = bio->bi_size; 708 + bv_len = bio->bi_iter.bi_size; 709 709 710 710 while (bv_len > 0) { 711 711 tgt = rsxx_get_dma_tgt(card, addr8);
+5 -4
drivers/block/umem.c
··· 352 352 bio = card->currentbio; 353 353 if (!bio && card->bio) { 354 354 card->currentbio = card->bio; 355 - card->current_idx = card->bio->bi_idx; 356 - card->current_sector = card->bio->bi_sector; 355 + card->current_idx = card->bio->bi_iter.bi_idx; 356 + card->current_sector = card->bio->bi_iter.bi_sector; 357 357 card->bio = card->bio->bi_next; 358 358 if (card->bio == NULL) 359 359 card->biotail = &card->bio; ··· 451 451 if (page->idx >= bio->bi_vcnt) { 452 452 page->bio = bio->bi_next; 453 453 if (page->bio) 454 - page->idx = page->bio->bi_idx; 454 + page->idx = page->bio->bi_iter.bi_idx; 455 455 } 456 456 457 457 pci_unmap_page(card->dev, desc->data_dma_handle, ··· 532 532 { 533 533 struct cardinfo *card = q->queuedata; 534 534 pr_debug("mm_make_request %llu %u\n", 535 - (unsigned long long)bio->bi_sector, bio->bi_size); 535 + (unsigned long long)bio->bi_iter.bi_sector, 536 + bio->bi_iter.bi_size); 536 537 537 538 spin_lock_irq(&card->lock); 538 539 *card->biotail = bio;
+1 -1
drivers/block/xen-blkback/blkback.c
··· 1257 1257 bio->bi_bdev = preq.bdev; 1258 1258 bio->bi_private = pending_req; 1259 1259 bio->bi_end_io = end_block_io_op; 1260 - bio->bi_sector = preq.sector_number; 1260 + bio->bi_iter.bi_sector = preq.sector_number; 1261 1261 } 1262 1262 1263 1263 preq.sector_number += seg[i].nsec;
+1 -1
drivers/block/xen-blkfront.c
··· 1547 1547 for (i = 0; i < pending; i++) { 1548 1548 offset = (i * segs * PAGE_SIZE) >> 9; 1549 1549 size = min((unsigned int)(segs * PAGE_SIZE) >> 9, 1550 - (unsigned int)(bio->bi_size >> 9) - offset); 1550 + (unsigned int)bio_sectors(bio) - offset); 1551 1551 cloned_bio = bio_clone(bio, GFP_NOIO); 1552 1552 BUG_ON(cloned_bio == NULL); 1553 1553 bio_trim(cloned_bio, offset, size);
+2 -2
drivers/md/bcache/btree.c
··· 299 299 300 300 bio = bch_bbio_alloc(b->c); 301 301 bio->bi_rw = REQ_META|READ_SYNC; 302 - bio->bi_size = KEY_SIZE(&b->key) << 9; 302 + bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; 303 303 bio->bi_end_io = btree_node_read_endio; 304 304 bio->bi_private = &cl; 305 305 ··· 395 395 b->bio->bi_end_io = btree_node_write_endio; 396 396 b->bio->bi_private = cl; 397 397 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; 398 - b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); 398 + b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c); 399 399 bch_bio_map(b->bio, i); 400 400 401 401 /*
+1 -1
drivers/md/bcache/debug.c
··· 195 195 dc->disk.c, 196 196 "verify failed at dev %s sector %llu", 197 197 bdevname(dc->bdev, name), 198 - (uint64_t) bio->bi_sector); 198 + (uint64_t) bio->bi_iter.bi_sector); 199 199 200 200 kunmap_atomic(p1); 201 201 }
+13 -13
drivers/md/bcache/io.c
··· 21 21 22 22 static void bch_generic_make_request_hack(struct bio *bio) 23 23 { 24 - if (bio->bi_idx) { 24 + if (bio->bi_iter.bi_idx) { 25 25 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); 26 26 27 27 memcpy(clone->bi_io_vec, 28 28 bio_iovec(bio), 29 29 bio_segments(bio) * sizeof(struct bio_vec)); 30 30 31 - clone->bi_sector = bio->bi_sector; 31 + clone->bi_iter.bi_sector = bio->bi_iter.bi_sector; 32 32 clone->bi_bdev = bio->bi_bdev; 33 33 clone->bi_rw = bio->bi_rw; 34 34 clone->bi_vcnt = bio_segments(bio); 35 - clone->bi_size = bio->bi_size; 35 + clone->bi_iter.bi_size = bio->bi_iter.bi_size; 36 36 37 37 clone->bi_private = bio; 38 38 clone->bi_end_io = bch_bi_idx_hack_endio; ··· 72 72 struct bio *bch_bio_split(struct bio *bio, int sectors, 73 73 gfp_t gfp, struct bio_set *bs) 74 74 { 75 - unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9; 75 + unsigned idx = bio->bi_iter.bi_idx, vcnt = 0, nbytes = sectors << 9; 76 76 struct bio_vec *bv; 77 77 struct bio *ret = NULL; 78 78 ··· 90 90 } 91 91 92 92 bio_for_each_segment(bv, bio, idx) { 93 - vcnt = idx - bio->bi_idx; 93 + vcnt = idx - bio->bi_iter.bi_idx; 94 94 95 95 if (!nbytes) { 96 96 ret = bio_alloc_bioset(gfp, vcnt, bs); ··· 119 119 } 120 120 out: 121 121 ret->bi_bdev = bio->bi_bdev; 122 - ret->bi_sector = bio->bi_sector; 123 - ret->bi_size = sectors << 9; 122 + ret->bi_iter.bi_sector = bio->bi_iter.bi_sector; 123 + ret->bi_iter.bi_size = sectors << 9; 124 124 ret->bi_rw = bio->bi_rw; 125 125 ret->bi_vcnt = vcnt; 126 126 ret->bi_max_vecs = vcnt; 127 127 128 - bio->bi_sector += sectors; 129 - bio->bi_size -= sectors << 9; 130 - bio->bi_idx = idx; 128 + bio->bi_iter.bi_sector += sectors; 129 + bio->bi_iter.bi_size -= sectors << 9; 130 + bio->bi_iter.bi_idx = idx; 131 131 132 132 if (bio_integrity(bio)) { 133 133 if (bio_integrity_clone(ret, bio, gfp)) { ··· 162 162 bio_for_each_segment(bv, bio, i) { 163 163 struct bvec_merge_data bvm = { 164 164 .bi_bdev = bio->bi_bdev, 165 - .bi_sector = bio->bi_sector, 165 + .bi_sector = bio->bi_iter.bi_sector, 166 166 .bi_size = ret << 9, 167 167 .bi_rw = bio->bi_rw, 168 168 }; ··· 272 272 { 273 273 struct bbio *b = container_of(bio, struct bbio, bio); 274 274 275 - bio->bi_sector = PTR_OFFSET(&b->key, 0); 276 - bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; 275 + bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); 276 + bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; 277 277 278 278 b->submit_time_us = local_clock_us(); 279 279 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
+6 -6
drivers/md/bcache/journal.c
··· 51 51 len = min_t(unsigned, left, PAGE_SECTORS * 8); 52 52 53 53 bio_reset(bio); 54 - bio->bi_sector = bucket + offset; 54 + bio->bi_iter.bi_sector = bucket + offset; 55 55 bio->bi_bdev = ca->bdev; 56 56 bio->bi_rw = READ; 57 - bio->bi_size = len << 9; 57 + bio->bi_iter.bi_size = len << 9; 58 58 59 59 bio->bi_end_io = journal_read_endio; 60 60 bio->bi_private = &cl; ··· 437 437 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); 438 438 439 439 bio_init(bio); 440 - bio->bi_sector = bucket_to_sector(ca->set, 440 + bio->bi_iter.bi_sector = bucket_to_sector(ca->set, 441 441 ca->sb.d[ja->discard_idx]); 442 442 bio->bi_bdev = ca->bdev; 443 443 bio->bi_rw = REQ_WRITE|REQ_DISCARD; 444 444 bio->bi_max_vecs = 1; 445 445 bio->bi_io_vec = bio->bi_inline_vecs; 446 - bio->bi_size = bucket_bytes(ca); 446 + bio->bi_iter.bi_size = bucket_bytes(ca); 447 447 bio->bi_end_io = journal_discard_endio; 448 448 449 449 closure_get(&ca->set->cl); ··· 608 608 atomic_long_add(sectors, &ca->meta_sectors_written); 609 609 610 610 bio_reset(bio); 611 - bio->bi_sector = PTR_OFFSET(k, i); 611 + bio->bi_iter.bi_sector = PTR_OFFSET(k, i); 612 612 bio->bi_bdev = ca->bdev; 613 613 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; 614 - bio->bi_size = sectors << 9; 614 + bio->bi_iter.bi_size = sectors << 9; 615 615 616 616 bio->bi_end_io = journal_write_endio; 617 617 bio->bi_private = w;
+2 -2
drivers/md/bcache/movinggc.c
··· 82 82 bio_get(bio); 83 83 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 84 84 85 - bio->bi_size = KEY_SIZE(&io->w->key) << 9; 85 + bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9; 86 86 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), 87 87 PAGE_SECTORS); 88 88 bio->bi_private = &io->cl; ··· 98 98 if (!op->error) { 99 99 moving_init(io); 100 100 101 - io->bio.bio.bi_sector = KEY_START(&io->w->key); 101 + io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); 102 102 op->write_prio = 1; 103 103 op->bio = &io->bio.bio; 104 104
+29 -29
drivers/md/bcache/request.c
··· 261 261 struct bio *bio = op->bio; 262 262 263 263 pr_debug("invalidating %i sectors from %llu", 264 - bio_sectors(bio), (uint64_t) bio->bi_sector); 264 + bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); 265 265 266 266 while (bio_sectors(bio)) { 267 267 unsigned sectors = min(bio_sectors(bio), ··· 270 270 if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) 271 271 goto out; 272 272 273 - bio->bi_sector += sectors; 274 - bio->bi_size -= sectors << 9; 273 + bio->bi_iter.bi_sector += sectors; 274 + bio->bi_iter.bi_size -= sectors << 9; 275 275 276 276 bch_keylist_add(&op->insert_keys, 277 - &KEY(op->inode, bio->bi_sector, sectors)); 277 + &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); 278 278 } 279 279 280 280 op->insert_data_done = true; ··· 364 364 k = op->insert_keys.top; 365 365 bkey_init(k); 366 366 SET_KEY_INODE(k, op->inode); 367 - SET_KEY_OFFSET(k, bio->bi_sector); 367 + SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); 368 368 369 369 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), 370 370 op->write_point, op->write_prio, ··· 522 522 (bio->bi_rw & REQ_WRITE))) 523 523 goto skip; 524 524 525 - if (bio->bi_sector & (c->sb.block_size - 1) || 525 + if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || 526 526 bio_sectors(bio) & (c->sb.block_size - 1)) { 527 527 pr_debug("skipping unaligned io"); 528 528 goto skip; ··· 546 546 547 547 spin_lock(&dc->io_lock); 548 548 549 - hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) 550 - if (i->last == bio->bi_sector && 549 + hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) 550 + if (i->last == bio->bi_iter.bi_sector && 551 551 time_before(jiffies, i->jiffies)) 552 552 goto found; 553 553 ··· 556 556 add_sequential(task); 557 557 i->sequential = 0; 558 558 found: 559 - if (i->sequential + bio->bi_size > i->sequential) 560 - i->sequential += bio->bi_size; 559 + if (i->sequential + bio->bi_iter.bi_size > i->sequential) 560 + i->sequential += bio->bi_iter.bi_size; 561 561 562 562 i->last = bio_end_sector(bio); 563 563 i->jiffies = jiffies + msecs_to_jiffies(5000); ··· 650 650 struct bkey *bio_key; 651 651 unsigned ptr; 652 652 653 - if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) 653 + if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) 654 654 return MAP_CONTINUE; 655 655 656 656 if (KEY_INODE(k) != s->iop.inode || 657 - KEY_START(k) > bio->bi_sector) { 657 + KEY_START(k) > bio->bi_iter.bi_sector) { 658 658 unsigned bio_sectors = bio_sectors(bio); 659 659 unsigned sectors = KEY_INODE(k) == s->iop.inode 660 660 ? min_t(uint64_t, INT_MAX, 661 - KEY_START(k) - bio->bi_sector) 661 + KEY_START(k) - bio->bi_iter.bi_sector) 662 662 : INT_MAX; 663 663 664 664 int ret = s->d->cache_miss(b, s, bio, sectors); ··· 681 681 s->read_dirty_data = true; 682 682 683 683 n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, 684 - KEY_OFFSET(k) - bio->bi_sector), 684 + KEY_OFFSET(k) - bio->bi_iter.bi_sector), 685 685 GFP_NOIO, s->d->bio_split); 686 686 687 687 bio_key = &container_of(n, struct bbio, bio)->key; 688 688 bch_bkey_copy_single_ptr(bio_key, k, ptr); 689 689 690 - bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); 690 + bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); 691 691 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); 692 692 693 693 n->bi_end_io = bch_cache_read_endio; ··· 714 714 struct bio *bio = &s->bio.bio; 715 715 716 716 int ret = bch_btree_map_keys(&s->op, s->iop.c, 717 - &KEY(s->iop.inode, bio->bi_sector, 0), 717 + &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), 718 718 cache_lookup_fn, MAP_END_KEY); 719 719 if (ret == -EAGAIN) 720 720 continue_at(cl, cache_lookup, bcache_wq); ··· 872 872 873 873 if (s->iop.bio) { 874 874 bio_reset(s->iop.bio); 875 - s->iop.bio->bi_sector = s->cache_miss->bi_sector; 875 + s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; 876 876 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; 877 - s->iop.bio->bi_size = s->insert_bio_sectors << 9; 877 + s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; 878 878 bch_bio_map(s->iop.bio, NULL); 879 879 880 880 bio_copy_data(s->cache_miss, s->iop.bio); ··· 937 937 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); 938 938 939 939 s->iop.replace_key = KEY(s->iop.inode, 940 - bio->bi_sector + s->insert_bio_sectors, 940 + bio->bi_iter.bi_sector + s->insert_bio_sectors, 941 941 s->insert_bio_sectors); 942 942 943 943 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); ··· 957 957 if (!cache_bio) 958 958 goto out_submit; 959 959 960 - cache_bio->bi_sector = miss->bi_sector; 961 - cache_bio->bi_bdev = miss->bi_bdev; 962 - cache_bio->bi_size = s->insert_bio_sectors << 9; 960 + cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; 961 + cache_bio->bi_bdev = miss->bi_bdev; 962 + cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; 963 963 964 964 cache_bio->bi_end_io = request_endio; 965 965 cache_bio->bi_private = &s->cl; ··· 1009 1009 { 1010 1010 struct closure *cl = &s->cl; 1011 1011 struct bio *bio = &s->bio.bio; 1012 - struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); 1012 + struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); 1013 1013 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); 1014 1014 1015 1015 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); ··· 1104 1104 part_stat_unlock(); 1105 1105 1106 1106 bio->bi_bdev = dc->bdev; 1107 - bio->bi_sector += dc->sb.data_offset; 1107 + bio->bi_iter.bi_sector += dc->sb.data_offset; 1108 1108 1109 1109 if (cached_dev_get(dc)) { 1110 1110 s = search_alloc(bio, d); 1111 1111 trace_bcache_request_start(s->d, bio); 1112 1112 1113 - if (!bio->bi_size) { 1113 + if (!bio->bi_iter.bi_size) { 1114 1114 /* 1115 1115 * can't call bch_journal_meta from under 1116 1116 * generic_make_request ··· 1197 1197 sectors -= j; 1198 1198 } 1199 1199 1200 - bio_advance(bio, min(sectors << 9, bio->bi_size)); 1200 + bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size)); 1201 1201 1202 - if (!bio->bi_size) 1202 + if (!bio->bi_iter.bi_size) 1203 1203 return MAP_DONE; 1204 1204 1205 1205 return MAP_CONTINUE; ··· 1233 1233 1234 1234 trace_bcache_request_start(s->d, bio); 1235 1235 1236 - if (!bio->bi_size) { 1236 + if (!bio->bi_iter.bi_size) { 1237 1237 /* 1238 1238 * can't call bch_journal_meta from under 1239 1239 * generic_make_request ··· 1243 1243 bcache_wq); 1244 1244 } else if (rw) { 1245 1245 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1246 - &KEY(d->id, bio->bi_sector, 0), 1246 + &KEY(d->id, bio->bi_iter.bi_sector, 0), 1247 1247 &KEY(d->id, bio_end_sector(bio), 0)); 1248 1248 1249 1249 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
+8 -8
drivers/md/bcache/super.c
··· 233 233 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); 234 234 unsigned i; 235 235 236 - bio->bi_sector = SB_SECTOR; 237 - bio->bi_rw = REQ_SYNC|REQ_META; 238 - bio->bi_size = SB_SIZE; 236 + bio->bi_iter.bi_sector = SB_SECTOR; 237 + bio->bi_rw = REQ_SYNC|REQ_META; 238 + bio->bi_iter.bi_size = SB_SIZE; 239 239 bch_bio_map(bio, NULL); 240 240 241 241 out->offset = cpu_to_le64(sb->offset); ··· 347 347 struct bio *bio = bch_bbio_alloc(c); 348 348 349 349 bio->bi_rw = REQ_SYNC|REQ_META|rw; 350 - bio->bi_size = KEY_SIZE(k) << 9; 350 + bio->bi_iter.bi_size = KEY_SIZE(k) << 9; 351 351 352 352 bio->bi_end_io = uuid_endio; 353 353 bio->bi_private = cl; ··· 503 503 504 504 closure_init_stack(cl); 505 505 506 - bio->bi_sector = bucket * ca->sb.bucket_size; 507 - bio->bi_bdev = ca->bdev; 508 - bio->bi_rw = REQ_SYNC|REQ_META|rw; 509 - bio->bi_size = bucket_bytes(ca); 506 + bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; 507 + bio->bi_bdev = ca->bdev; 508 + bio->bi_rw = REQ_SYNC|REQ_META|rw; 509 + bio->bi_iter.bi_size = bucket_bytes(ca); 510 510 511 511 bio->bi_end_io = prio_endio; 512 512 bio->bi_private = ca;
+2 -2
drivers/md/bcache/util.c
··· 218 218 219 219 void bch_bio_map(struct bio *bio, void *base) 220 220 { 221 - size_t size = bio->bi_size; 221 + size_t size = bio->bi_iter.bi_size; 222 222 struct bio_vec *bv = bio->bi_io_vec; 223 223 224 - BUG_ON(!bio->bi_size); 224 + BUG_ON(!bio->bi_iter.bi_size); 225 225 BUG_ON(bio->bi_vcnt); 226 226 227 227 bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
+3 -3
drivers/md/bcache/writeback.c
··· 113 113 if (!io->dc->writeback_percent) 114 114 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 115 115 116 - bio->bi_size = KEY_SIZE(&w->key) << 9; 116 + bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; 117 117 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); 118 118 bio->bi_private = w; 119 119 bio->bi_io_vec = bio->bi_inline_vecs; ··· 186 186 187 187 dirty_init(w); 188 188 io->bio.bi_rw = WRITE; 189 - io->bio.bi_sector = KEY_START(&w->key); 189 + io->bio.bi_iter.bi_sector = KEY_START(&w->key); 190 190 io->bio.bi_bdev = io->dc->bdev; 191 191 io->bio.bi_end_io = dirty_endio; 192 192 ··· 255 255 io->dc = dc; 256 256 257 257 dirty_init(w); 258 - io->bio.bi_sector = PTR_OFFSET(&w->key, 0); 258 + io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); 259 259 io->bio.bi_bdev = PTR_CACHE(dc->disk.c, 260 260 &w->key, 0)->bdev; 261 261 io->bio.bi_rw = READ;
+1 -1
drivers/md/bcache/writeback.h
··· 50 50 return false; 51 51 52 52 if (dc->partial_stripes_expensive && 53 - bcache_dev_stripe_dirty(dc, bio->bi_sector, 53 + bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, 54 54 bio_sectors(bio))) 55 55 return true; 56 56
+6 -6
drivers/md/dm-bio-record.h
··· 40 40 { 41 41 unsigned i; 42 42 43 - bd->bi_sector = bio->bi_sector; 43 + bd->bi_sector = bio->bi_iter.bi_sector; 44 44 bd->bi_bdev = bio->bi_bdev; 45 - bd->bi_size = bio->bi_size; 46 - bd->bi_idx = bio->bi_idx; 45 + bd->bi_size = bio->bi_iter.bi_size; 46 + bd->bi_idx = bio->bi_iter.bi_idx; 47 47 bd->bi_flags = bio->bi_flags; 48 48 49 49 for (i = 0; i < bio->bi_vcnt; i++) { ··· 56 56 { 57 57 unsigned i; 58 58 59 - bio->bi_sector = bd->bi_sector; 59 + bio->bi_iter.bi_sector = bd->bi_sector; 60 60 bio->bi_bdev = bd->bi_bdev; 61 - bio->bi_size = bd->bi_size; 62 - bio->bi_idx = bd->bi_idx; 61 + bio->bi_iter.bi_size = bd->bi_size; 62 + bio->bi_iter.bi_idx = bd->bi_idx; 63 63 bio->bi_flags = bd->bi_flags; 64 64 65 65 for (i = 0; i < bio->bi_vcnt; i++) {
+1 -1
drivers/md/dm-bufio.c
··· 538 538 bio_init(&b->bio); 539 539 b->bio.bi_io_vec = b->bio_vec; 540 540 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; 541 - b->bio.bi_sector = block << b->c->sectors_per_block_bits; 541 + b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; 542 542 b->bio.bi_bdev = b->c->bdev; 543 543 b->bio.bi_end_io = end_io; 544 544
+2 -2
drivers/md/dm-cache-policy-mq.c
··· 72 72 73 73 static void iot_update_stats(struct io_tracker *t, struct bio *bio) 74 74 { 75 - if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1) 75 + if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1) 76 76 t->nr_seq_samples++; 77 77 else { 78 78 /* ··· 87 87 t->nr_rand_samples++; 88 88 } 89 89 90 - t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1); 90 + t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1); 91 91 } 92 92 93 93 static void iot_check_for_pattern_switch(struct io_tracker *t)
+12 -10
drivers/md/dm-cache-target.c
··· 664 664 static void remap_to_cache(struct cache *cache, struct bio *bio, 665 665 dm_cblock_t cblock) 666 666 { 667 - sector_t bi_sector = bio->bi_sector; 667 + sector_t bi_sector = bio->bi_iter.bi_sector; 668 668 669 669 bio->bi_bdev = cache->cache_dev->bdev; 670 670 if (!block_size_is_power_of_two(cache)) 671 - bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) + 672 - sector_div(bi_sector, cache->sectors_per_block); 671 + bio->bi_iter.bi_sector = 672 + (from_cblock(cblock) * cache->sectors_per_block) + 673 + sector_div(bi_sector, cache->sectors_per_block); 673 674 else 674 - bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) | 675 - (bi_sector & (cache->sectors_per_block - 1)); 675 + bio->bi_iter.bi_sector = 676 + (from_cblock(cblock) << cache->sectors_per_block_shift) | 677 + (bi_sector & (cache->sectors_per_block - 1)); 676 678 } 677 679 678 680 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) ··· 714 712 715 713 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) 716 714 { 717 - sector_t block_nr = bio->bi_sector; 715 + sector_t block_nr = bio->bi_iter.bi_sector; 718 716 719 717 if (!block_size_is_power_of_two(cache)) 720 718 (void) sector_div(block_nr, cache->sectors_per_block); ··· 1029 1027 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) 1030 1028 { 1031 1029 return (bio_data_dir(bio) == WRITE) && 1032 - (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); 1030 + (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); 1033 1031 } 1034 1032 1035 1033 static void avoid_copy(struct dm_cache_migration *mg) ··· 1254 1252 size_t pb_data_size = get_per_bio_data_size(cache); 1255 1253 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1256 1254 1257 - BUG_ON(bio->bi_size); 1255 + BUG_ON(bio->bi_iter.bi_size); 1258 1256 if (!pb->req_nr) 1259 1257 remap_to_origin(cache, bio); 1260 1258 else ··· 1277 1275 */ 1278 1276 static void process_discard_bio(struct cache *cache, struct bio *bio) 1279 1277 { 1280 - dm_block_t start_block = dm_sector_div_up(bio->bi_sector, 1278 + dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, 1281 1279 cache->discard_block_size); 1282 - dm_block_t end_block = bio->bi_sector + bio_sectors(bio); 1280 + dm_block_t end_block = bio_end_sector(bio); 1283 1281 dm_block_t b; 1284 1282 1285 1283 end_block = block_div(end_block, cache->discard_block_size);
+10 -9
drivers/md/dm-crypt.c
··· 828 828 ctx->bio_out = bio_out; 829 829 ctx->offset_in = 0; 830 830 ctx->offset_out = 0; 831 - ctx->idx_in = bio_in ? bio_in->bi_idx : 0; 832 - ctx->idx_out = bio_out ? bio_out->bi_idx : 0; 831 + ctx->idx_in = bio_in ? bio_in->bi_iter.bi_idx : 0; 832 + ctx->idx_out = bio_out ? bio_out->bi_iter.bi_idx : 0; 833 833 ctx->cc_sector = sector + cc->iv_offset; 834 834 init_completion(&ctx->restart); 835 835 } ··· 1021 1021 size -= len; 1022 1022 } 1023 1023 1024 - if (!clone->bi_size) { 1024 + if (!clone->bi_iter.bi_size) { 1025 1025 bio_put(clone); 1026 1026 return NULL; 1027 1027 } ··· 1161 1161 crypt_inc_pending(io); 1162 1162 1163 1163 clone_init(io, clone); 1164 - clone->bi_sector = cc->start + io->sector; 1164 + clone->bi_iter.bi_sector = cc->start + io->sector; 1165 1165 1166 1166 generic_make_request(clone); 1167 1167 return 0; ··· 1209 1209 /* crypt_convert should have filled the clone bio */ 1210 1210 BUG_ON(io->ctx.idx_out < clone->bi_vcnt); 1211 1211 1212 - clone->bi_sector = cc->start + io->sector; 1212 + clone->bi_iter.bi_sector = cc->start + io->sector; 1213 1213 1214 1214 if (async) 1215 1215 kcryptd_queue_io(io); ··· 1224 1224 struct dm_crypt_io *new_io; 1225 1225 int crypt_finished; 1226 1226 unsigned out_of_pages = 0; 1227 - unsigned remaining = io->base_bio->bi_size; 1227 + unsigned remaining = io->base_bio->bi_iter.bi_size; 1228 1228 sector_t sector = io->sector; 1229 1229 int r; 1230 1230 ··· 1248 1248 io->ctx.bio_out = clone; 1249 1249 io->ctx.idx_out = 0; 1250 1250 1251 - remaining -= clone->bi_size; 1251 + remaining -= clone->bi_iter.bi_size; 1252 1252 sector += bio_sectors(clone); 1253 1253 1254 1254 crypt_inc_pending(io); ··· 1869 1869 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { 1870 1870 bio->bi_bdev = cc->dev->bdev; 1871 1871 if (bio_sectors(bio)) 1872 - bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); 1872 + bio->bi_iter.bi_sector = cc->start + 1873 + dm_target_offset(ti, bio->bi_iter.bi_sector); 1873 1874 return DM_MAPIO_REMAPPED; 1874 1875 } 1875 1876 1876 - io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector)); 1877 + io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); 1877 1878 1878 1879 if (bio_data_dir(io->base_bio) == READ) { 1879 1880 if (kcryptd_io_read(io, GFP_NOWAIT))
+4 -3
drivers/md/dm-delay.c
··· 281 281 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { 282 282 bio->bi_bdev = dc->dev_write->bdev; 283 283 if (bio_sectors(bio)) 284 - bio->bi_sector = dc->start_write + 285 - dm_target_offset(ti, bio->bi_sector); 284 + bio->bi_iter.bi_sector = dc->start_write + 285 + dm_target_offset(ti, bio->bi_iter.bi_sector); 286 286 287 287 return delay_bio(dc, dc->write_delay, bio); 288 288 } 289 289 290 290 bio->bi_bdev = dc->dev_read->bdev; 291 - bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector); 291 + bio->bi_iter.bi_sector = dc->start_read + 292 + dm_target_offset(ti, bio->bi_iter.bi_sector); 292 293 293 294 return delay_bio(dc, dc->read_delay, bio); 294 295 }
+4 -3
drivers/md/dm-flakey.c
··· 248 248 249 249 bio->bi_bdev = fc->dev->bdev; 250 250 if (bio_sectors(bio)) 251 - bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); 251 + bio->bi_iter.bi_sector = 252 + flakey_map_sector(ti, bio->bi_iter.bi_sector); 252 253 } 253 254 254 255 static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) ··· 266 265 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " 267 266 "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", 268 267 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, 269 - (bio_data_dir(bio) == WRITE) ? 'w' : 'r', 270 - bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes); 268 + (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw, 269 + (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); 271 270 } 272 271 } 273 272
+3 -3
drivers/md/dm-io.c
··· 304 304 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); 305 305 306 306 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); 307 - bio->bi_sector = where->sector + (where->count - remaining); 307 + bio->bi_iter.bi_sector = where->sector + (where->count - remaining); 308 308 bio->bi_bdev = where->bdev; 309 309 bio->bi_end_io = endio; 310 310 store_io_and_region_in_bio(bio, io, region); 311 311 312 312 if (rw & REQ_DISCARD) { 313 313 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); 314 - bio->bi_size = num_sectors << SECTOR_SHIFT; 314 + bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 315 315 remaining -= num_sectors; 316 316 } else if (rw & REQ_WRITE_SAME) { 317 317 /* ··· 320 320 dp->get_page(dp, &page, &len, &offset); 321 321 bio_add_page(bio, page, logical_block_size, offset); 322 322 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); 323 - bio->bi_size = num_sectors << SECTOR_SHIFT; 323 + bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 324 324 325 325 offset = 0; 326 326 remaining -= num_sectors;
+2 -1
drivers/md/dm-linear.c
··· 85 85 86 86 bio->bi_bdev = lc->dev->bdev; 87 87 if (bio_sectors(bio)) 88 - bio->bi_sector = linear_map_sector(ti, bio->bi_sector); 88 + bio->bi_iter.bi_sector = 89 + linear_map_sector(ti, bio->bi_iter.bi_sector); 89 90 } 90 91 91 92 static int linear_map(struct dm_target *ti, struct bio *bio)
+8 -8
drivers/md/dm-raid1.c
··· 432 432 region_t region = dm_rh_bio_to_region(ms->rh, bio); 433 433 434 434 if (log->type->in_sync(log, region, 0)) 435 - return choose_mirror(ms, bio->bi_sector) ? 1 : 0; 435 + return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; 436 436 437 437 return 0; 438 438 } ··· 442 442 */ 443 443 static sector_t map_sector(struct mirror *m, struct bio *bio) 444 444 { 445 - if (unlikely(!bio->bi_size)) 445 + if (unlikely(!bio->bi_iter.bi_size)) 446 446 return 0; 447 - return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector); 447 + return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); 448 448 } 449 449 450 450 static void map_bio(struct mirror *m, struct bio *bio) 451 451 { 452 452 bio->bi_bdev = m->dev->bdev; 453 - bio->bi_sector = map_sector(m, bio); 453 + bio->bi_iter.bi_sector = map_sector(m, bio); 454 454 } 455 455 456 456 static void map_region(struct dm_io_region *io, struct mirror *m, ··· 527 527 struct dm_io_request io_req = { 528 528 .bi_rw = READ, 529 529 .mem.type = DM_IO_BVEC, 530 - .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 530 + .mem.ptr.bvec = bio->bi_io_vec + bio->bi_iter.bi_idx, 531 531 .notify.fn = read_callback, 532 532 .notify.context = bio, 533 533 .client = m->ms->io_client, ··· 559 559 * We can only read balance if the region is in sync. 560 560 */ 561 561 if (likely(region_in_sync(ms, region, 1))) 562 - m = choose_mirror(ms, bio->bi_sector); 562 + m = choose_mirror(ms, bio->bi_iter.bi_sector); 563 563 else if (m && atomic_read(&m->error_count)) 564 564 m = NULL; 565 565 ··· 630 630 struct dm_io_request io_req = { 631 631 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), 632 632 .mem.type = DM_IO_BVEC, 633 - .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 633 + .mem.ptr.bvec = bio->bi_io_vec + bio->bi_iter.bi_idx, 634 634 .notify.fn = write_callback, 635 635 .notify.context = bio, 636 636 .client = ms->io_client, ··· 1181 1181 * The region is in-sync and we can perform reads directly. 1182 1182 * Store enough information so we can retry if it fails. 1183 1183 */ 1184 - m = choose_mirror(ms, bio->bi_sector); 1184 + m = choose_mirror(ms, bio->bi_iter.bi_sector); 1185 1185 if (unlikely(!m)) 1186 1186 return -EIO; 1187 1187
+2 -1
drivers/md/dm-region-hash.c
··· 126 126 127 127 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) 128 128 { 129 - return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); 129 + return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - 130 + rh->target_begin); 130 131 } 131 132 EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); 132 133
+9 -9
drivers/md/dm-snap.c
··· 1562 1562 struct bio *bio, chunk_t chunk) 1563 1563 { 1564 1564 bio->bi_bdev = s->cow->bdev; 1565 - bio->bi_sector = chunk_to_sector(s->store, 1566 - dm_chunk_number(e->new_chunk) + 1567 - (chunk - e->old_chunk)) + 1568 - (bio->bi_sector & 1569 - s->store->chunk_mask); 1565 + bio->bi_iter.bi_sector = 1566 + chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + 1567 + (chunk - e->old_chunk)) + 1568 + (bio->bi_iter.bi_sector & s->store->chunk_mask); 1570 1569 } 1571 1570 1572 1571 static int snapshot_map(struct dm_target *ti, struct bio *bio) ··· 1583 1584 return DM_MAPIO_REMAPPED; 1584 1585 } 1585 1586 1586 - chunk = sector_to_chunk(s->store, bio->bi_sector); 1587 + chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); 1587 1588 1588 1589 /* Full snapshots are not usable */ 1589 1590 /* To get here the table must be live so s->active is always set. */ ··· 1644 1645 r = DM_MAPIO_SUBMITTED; 1645 1646 1646 1647 if (!pe->started && 1647 - bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) { 1648 + bio->bi_iter.bi_size == 1649 + (s->store->chunk_size << SECTOR_SHIFT)) { 1648 1650 pe->started = 1; 1649 1651 up_write(&s->lock); 1650 1652 start_full_bio(pe, bio); ··· 1701 1701 return DM_MAPIO_REMAPPED; 1702 1702 } 1703 1703 1704 - chunk = sector_to_chunk(s->store, bio->bi_sector); 1704 + chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); 1705 1705 1706 1706 down_write(&s->lock); 1707 1707 ··· 2038 2038 down_read(&_origins_lock); 2039 2039 o = __lookup_origin(origin->bdev); 2040 2040 if (o) 2041 - r = __origin_write(&o->snapshots, bio->bi_sector, bio); 2041 + r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); 2042 2042 up_read(&_origins_lock); 2043 2043 2044 2044 return r;
+8 -5
drivers/md/dm-stripe.c
··· 259 259 { 260 260 sector_t begin, end; 261 261 262 - stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin); 262 + stripe_map_range_sector(sc, bio->bi_iter.bi_sector, 263 + target_stripe, &begin); 263 264 stripe_map_range_sector(sc, bio_end_sector(bio), 264 265 target_stripe, &end); 265 266 if (begin < end) { 266 267 bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; 267 - bio->bi_sector = begin + sc->stripe[target_stripe].physical_start; 268 - bio->bi_size = to_bytes(end - begin); 268 + bio->bi_iter.bi_sector = begin + 269 + sc->stripe[target_stripe].physical_start; 270 + bio->bi_iter.bi_size = to_bytes(end - begin); 269 271 return DM_MAPIO_REMAPPED; 270 272 } else { 271 273 /* The range doesn't map to the target stripe */ ··· 295 293 return stripe_map_range(sc, bio, target_bio_nr); 296 294 } 297 295 298 - stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector); 296 + stripe_map_sector(sc, bio->bi_iter.bi_sector, 297 + &stripe, &bio->bi_iter.bi_sector); 299 298 300 - bio->bi_sector += sc->stripe[stripe].physical_start; 299 + bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; 301 300 bio->bi_bdev = sc->stripe[stripe].dev->bdev; 302 301 303 302 return DM_MAPIO_REMAPPED;
+2 -2
drivers/md/dm-switch.c
··· 311 311 static int switch_map(struct dm_target *ti, struct bio *bio) 312 312 { 313 313 struct switch_ctx *sctx = ti->private; 314 - sector_t offset = dm_target_offset(ti, bio->bi_sector); 314 + sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); 315 315 unsigned path_nr = switch_get_path_nr(sctx, offset); 316 316 317 317 bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; 318 - bio->bi_sector = sctx->path_list[path_nr].start + offset; 318 + bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; 319 319 320 320 return DM_MAPIO_REMAPPED; 321 321 }
+12 -10
drivers/md/dm-thin.c
··· 413 413 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) 414 414 { 415 415 struct pool *pool = tc->pool; 416 - sector_t block_nr = bio->bi_sector; 416 + sector_t block_nr = bio->bi_iter.bi_sector; 417 417 418 418 if (block_size_is_power_of_two(pool)) 419 419 block_nr >>= pool->sectors_per_block_shift; ··· 426 426 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) 427 427 { 428 428 struct pool *pool = tc->pool; 429 - sector_t bi_sector = bio->bi_sector; 429 + sector_t bi_sector = bio->bi_iter.bi_sector; 430 430 431 431 bio->bi_bdev = tc->pool_dev->bdev; 432 432 if (block_size_is_power_of_two(pool)) 433 - bio->bi_sector = (block << pool->sectors_per_block_shift) | 434 - (bi_sector & (pool->sectors_per_block - 1)); 433 + bio->bi_iter.bi_sector = 434 + (block << pool->sectors_per_block_shift) | 435 + (bi_sector & (pool->sectors_per_block - 1)); 435 436 else 436 - bio->bi_sector = (block * pool->sectors_per_block) + 437 + bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + 437 438 sector_div(bi_sector, pool->sectors_per_block); 438 439 } 439 440 ··· 722 721 */ 723 722 static int io_overlaps_block(struct pool *pool, struct bio *bio) 724 723 { 725 - return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT); 724 + return bio->bi_iter.bi_size == 725 + (pool->sectors_per_block << SECTOR_SHIFT); 726 726 } 727 727 728 728 static int io_overwrites_block(struct pool *pool, struct bio *bio) ··· 1132 1130 if (bio_detain(pool, &key, bio, &cell)) 1133 1131 return; 1134 1132 1135 - if (bio_data_dir(bio) == WRITE && bio->bi_size) 1133 + if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) 1136 1134 break_sharing(tc, bio, block, &key, lookup_result, cell); 1137 1135 else { 1138 1136 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); ··· 1155 1153 /* 1156 1154 * Remap empty bios (flushes) immediately, without provisioning. 1157 1155 */ 1158 - if (!bio->bi_size) { 1156 + if (!bio->bi_iter.bi_size) { 1159 1157 inc_all_io_entry(pool, bio); 1160 1158 cell_defer_no_holder(tc, cell); 1161 1159 ··· 1255 1253 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1256 1254 switch (r) { 1257 1255 case 0: 1258 - if (lookup_result.shared && (rw == WRITE) && bio->bi_size) 1256 + if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) 1259 1257 bio_io_error(bio); 1260 1258 else { 1261 1259 inc_all_io_entry(tc->pool, bio); ··· 2869 2867 2870 2868 static int thin_map(struct dm_target *ti, struct bio *bio) 2871 2869 { 2872 - bio->bi_sector = dm_target_offset(ti, bio->bi_sector); 2870 + bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); 2873 2871 2874 2872 return thin_bio_map(ti, bio); 2875 2873 }
+4 -4
drivers/md/dm-verity.c
··· 493 493 struct dm_verity_io *io; 494 494 495 495 bio->bi_bdev = v->data_dev->bdev; 496 - bio->bi_sector = verity_map_sector(v, bio->bi_sector); 496 + bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector); 497 497 498 - if (((unsigned)bio->bi_sector | bio_sectors(bio)) & 498 + if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & 499 499 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) { 500 500 DMERR_LIMIT("unaligned io"); 501 501 return -EIO; ··· 514 514 io->v = v; 515 515 io->orig_bi_end_io = bio->bi_end_io; 516 516 io->orig_bi_private = bio->bi_private; 517 - io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); 518 - io->n_blocks = bio->bi_size >> v->data_dev_block_bits; 517 + io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); 518 + io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits; 519 519 520 520 bio->bi_end_io = verity_end_io; 521 521 bio->bi_private = io;
+13 -12
drivers/md/dm.c
··· 575 575 atomic_inc_return(&md->pending[rw])); 576 576 577 577 if (unlikely(dm_stats_used(&md->stats))) 578 - dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, 578 + dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 579 579 bio_sectors(bio), false, 0, &io->stats_aux); 580 580 } 581 581 ··· 593 593 part_stat_unlock(); 594 594 595 595 if (unlikely(dm_stats_used(&md->stats))) 596 - dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, 596 + dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 597 597 bio_sectors(bio), true, duration, &io->stats_aux); 598 598 599 599 /* ··· 742 742 if (io_error == DM_ENDIO_REQUEUE) 743 743 return; 744 744 745 - if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) { 745 + if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { 746 746 /* 747 747 * Preflush done for flush with data, reissue 748 748 * without REQ_FLUSH. ··· 797 797 struct dm_rq_clone_bio_info *info = clone->bi_private; 798 798 struct dm_rq_target_io *tio = info->tio; 799 799 struct bio *bio = info->orig; 800 - unsigned int nr_bytes = info->orig->bi_size; 800 + unsigned int nr_bytes = info->orig->bi_iter.bi_size; 801 801 802 802 bio_put(clone); 803 803 ··· 1128 1128 * this io. 1129 1129 */ 1130 1130 atomic_inc(&tio->io->io_count); 1131 - sector = clone->bi_sector; 1131 + sector = clone->bi_iter.bi_sector; 1132 1132 r = ti->type->map(ti, clone); 1133 1133 if (r == DM_MAPIO_REMAPPED) { 1134 1134 /* the bio has been remapped so dispatch it */ ··· 1160 1160 1161 1161 static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) 1162 1162 { 1163 - bio->bi_sector = sector; 1164 - bio->bi_size = to_bytes(len); 1163 + bio->bi_iter.bi_sector = sector; 1164 + bio->bi_iter.bi_size = to_bytes(len); 1165 1165 } 1166 1166 1167 1167 static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count) 1168 1168 { 1169 - bio->bi_idx = idx; 1169 + bio->bi_iter.bi_idx = idx; 1170 1170 bio->bi_vcnt = idx + bv_count; 1171 1171 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 1172 1172 } ··· 1202 1202 clone->bi_rw = bio->bi_rw; 1203 1203 clone->bi_vcnt = 1; 1204 1204 clone->bi_io_vec->bv_offset = offset; 1205 - clone->bi_io_vec->bv_len = clone->bi_size; 1205 + clone->bi_io_vec->bv_len = clone->bi_iter.bi_size; 1206 1206 clone->bi_flags |= 1 << BIO_CLONED; 1207 1207 1208 1208 clone_bio_integrity(bio, clone, idx, len, offset, 1); ··· 1222 1222 bio_setup_sector(clone, sector, len); 1223 1223 bio_setup_bv(clone, idx, bv_count); 1224 1224 1225 - if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) 1225 + if (idx != bio->bi_iter.bi_idx || 1226 + clone->bi_iter.bi_size < bio->bi_iter.bi_size) 1226 1227 trim = 1; 1227 1228 clone_bio_integrity(bio, clone, idx, len, 0, trim); 1228 1229 } ··· 1511 1510 ci.io->bio = bio; 1512 1511 ci.io->md = md; 1513 1512 spin_lock_init(&ci.io->endio_lock); 1514 - ci.sector = bio->bi_sector; 1515 - ci.idx = bio->bi_idx; 1513 + ci.sector = bio->bi_iter.bi_sector; 1514 + ci.idx = bio->bi_iter.bi_idx; 1516 1515 1517 1516 start_io_acct(ci.io); 1518 1517
+12 -7
drivers/md/faulty.c
··· 74 74 { 75 75 struct bio *b = bio->bi_private; 76 76 77 - b->bi_size = bio->bi_size; 78 - b->bi_sector = bio->bi_sector; 77 + b->bi_iter.bi_size = bio->bi_iter.bi_size; 78 + b->bi_iter.bi_sector = bio->bi_iter.bi_sector; 79 79 80 80 bio_put(bio); 81 81 ··· 185 185 return; 186 186 } 187 187 188 - if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE)) 188 + if (check_sector(conf, bio->bi_iter.bi_sector, 189 + bio_end_sector(bio), WRITE)) 189 190 failit = 1; 190 191 if (check_mode(conf, WritePersistent)) { 191 - add_sector(conf, bio->bi_sector, WritePersistent); 192 + add_sector(conf, bio->bi_iter.bi_sector, 193 + WritePersistent); 192 194 failit = 1; 193 195 } 194 196 if (check_mode(conf, WriteTransient)) 195 197 failit = 1; 196 198 } else { 197 199 /* read request */ 198 - if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ)) 200 + if (check_sector(conf, bio->bi_iter.bi_sector, 201 + bio_end_sector(bio), READ)) 199 202 failit = 1; 200 203 if (check_mode(conf, ReadTransient)) 201 204 failit = 1; 202 205 if (check_mode(conf, ReadPersistent)) { 203 - add_sector(conf, bio->bi_sector, ReadPersistent); 206 + add_sector(conf, bio->bi_iter.bi_sector, 207 + ReadPersistent); 204 208 failit = 1; 205 209 } 206 210 if (check_mode(conf, ReadFixable)) { 207 - add_sector(conf, bio->bi_sector, ReadFixable); 211 + add_sector(conf, bio->bi_iter.bi_sector, 212 + ReadFixable); 208 213 failit = 1; 209 214 } 210 215 }
+6 -6
drivers/md/linear.c
··· 297 297 } 298 298 299 299 rcu_read_lock(); 300 - tmp_dev = which_dev(mddev, bio->bi_sector); 300 + tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); 301 301 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; 302 302 303 303 304 - if (unlikely(bio->bi_sector >= (tmp_dev->end_sector) 305 - || (bio->bi_sector < start_sector))) { 304 + if (unlikely(bio->bi_iter.bi_sector >= (tmp_dev->end_sector) 305 + || (bio->bi_iter.bi_sector < start_sector))) { 306 306 char b[BDEVNAME_SIZE]; 307 307 308 308 printk(KERN_ERR 309 309 "md/linear:%s: make_request: Sector %llu out of bounds on " 310 310 "dev %s: %llu sectors, offset %llu\n", 311 311 mdname(mddev), 312 - (unsigned long long)bio->bi_sector, 312 + (unsigned long long)bio->bi_iter.bi_sector, 313 313 bdevname(tmp_dev->rdev->bdev, b), 314 314 (unsigned long long)tmp_dev->rdev->sectors, 315 315 (unsigned long long)start_sector); ··· 326 326 327 327 rcu_read_unlock(); 328 328 329 - bp = bio_split(bio, end_sector - bio->bi_sector); 329 + bp = bio_split(bio, end_sector - bio->bi_iter.bi_sector); 330 330 331 331 linear_make_request(mddev, &bp->bio1); 332 332 linear_make_request(mddev, &bp->bio2); ··· 335 335 } 336 336 337 337 bio->bi_bdev = tmp_dev->rdev->bdev; 338 - bio->bi_sector = bio->bi_sector - start_sector 338 + bio->bi_iter.bi_sector = bio->bi_iter.bi_sector - start_sector 339 339 + tmp_dev->rdev->data_offset; 340 340 rcu_read_unlock(); 341 341
+5 -5
drivers/md/md.c
··· 393 393 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 394 394 struct bio *bio = mddev->flush_bio; 395 395 396 - if (bio->bi_size == 0) 396 + if (bio->bi_iter.bi_size == 0) 397 397 /* an empty barrier - all done */ 398 398 bio_endio(bio, 0); 399 399 else { ··· 754 754 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 755 755 756 756 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 757 - bio->bi_sector = sector; 757 + bio->bi_iter.bi_sector = sector; 758 758 bio_add_page(bio, page, size, 0); 759 759 bio->bi_private = rdev; 760 760 bio->bi_end_io = super_written; ··· 785 785 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 786 786 rdev->meta_bdev : rdev->bdev; 787 787 if (metadata_op) 788 - bio->bi_sector = sector + rdev->sb_start; 788 + bio->bi_iter.bi_sector = sector + rdev->sb_start; 789 789 else if (rdev->mddev->reshape_position != MaxSector && 790 790 (rdev->mddev->reshape_backwards == 791 791 (sector >= rdev->mddev->reshape_position))) 792 - bio->bi_sector = sector + rdev->new_data_offset; 792 + bio->bi_iter.bi_sector = sector + rdev->new_data_offset; 793 793 else 794 - bio->bi_sector = sector + rdev->data_offset; 794 + bio->bi_iter.bi_sector = sector + rdev->data_offset; 795 795 bio_add_page(bio, page, size, 0); 796 796 submit_bio_wait(rw, bio); 797 797
+7 -6
drivers/md/multipath.c
··· 100 100 md_error (mp_bh->mddev, rdev); 101 101 printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", 102 102 bdevname(rdev->bdev,b), 103 - (unsigned long long)bio->bi_sector); 103 + (unsigned long long)bio->bi_iter.bi_sector); 104 104 multipath_reschedule_retry(mp_bh); 105 105 } else 106 106 multipath_end_bh_io(mp_bh, error); ··· 132 132 multipath = conf->multipaths + mp_bh->path; 133 133 134 134 mp_bh->bio = *bio; 135 - mp_bh->bio.bi_sector += multipath->rdev->data_offset; 135 + mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; 136 136 mp_bh->bio.bi_bdev = multipath->rdev->bdev; 137 137 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; 138 138 mp_bh->bio.bi_end_io = multipath_end_request; ··· 355 355 spin_unlock_irqrestore(&conf->device_lock, flags); 356 356 357 357 bio = &mp_bh->bio; 358 - bio->bi_sector = mp_bh->master_bio->bi_sector; 358 + bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; 359 359 360 360 if ((mp_bh->path = multipath_map (conf))<0) { 361 361 printk(KERN_ALERT "multipath: %s: unrecoverable IO read" 362 362 " error for block %llu\n", 363 363 bdevname(bio->bi_bdev,b), 364 - (unsigned long long)bio->bi_sector); 364 + (unsigned long long)bio->bi_iter.bi_sector); 365 365 multipath_end_bh_io(mp_bh, -EIO); 366 366 } else { 367 367 printk(KERN_ERR "multipath: %s: redirecting sector %llu" 368 368 " to another IO path\n", 369 369 bdevname(bio->bi_bdev,b), 370 - (unsigned long long)bio->bi_sector); 370 + (unsigned long long)bio->bi_iter.bi_sector); 371 371 *bio = *(mp_bh->master_bio); 372 - bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; 372 + bio->bi_iter.bi_sector += 373 + conf->multipaths[mp_bh->path].rdev->data_offset; 373 374 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; 374 375 bio->bi_rw |= REQ_FAILFAST_TRANSPORT; 375 376 bio->bi_end_io = multipath_end_request;
+9 -7
drivers/md/raid0.c
··· 501 501 unsigned int chunk_sects, struct bio *bio) 502 502 { 503 503 if (likely(is_power_of_2(chunk_sects))) { 504 - return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) 504 + return chunk_sects >= 505 + ((bio->bi_iter.bi_sector & (chunk_sects-1)) 505 506 + bio_sectors(bio)); 506 507 } else{ 507 - sector_t sector = bio->bi_sector; 508 + sector_t sector = bio->bi_iter.bi_sector; 508 509 return chunk_sects >= (sector_div(sector, chunk_sects) 509 510 + bio_sectors(bio)); 510 511 } ··· 525 524 526 525 chunk_sects = mddev->chunk_sectors; 527 526 if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { 528 - sector_t sector = bio->bi_sector; 527 + sector_t sector = bio->bi_iter.bi_sector; 529 528 struct bio_pair *bp; 530 529 /* Sanity check -- queue functions should prevent this happening */ 531 530 if (bio_segments(bio) > 1) ··· 545 544 return; 546 545 } 547 546 548 - sector_offset = bio->bi_sector; 547 + sector_offset = bio->bi_iter.bi_sector; 549 548 zone = find_zone(mddev->private, &sector_offset); 550 - tmp_dev = map_sector(mddev, zone, bio->bi_sector, 549 + tmp_dev = map_sector(mddev, zone, bio->bi_iter.bi_sector, 551 550 &sector_offset); 552 551 bio->bi_bdev = tmp_dev->bdev; 553 - bio->bi_sector = sector_offset + zone->dev_start + 552 + bio->bi_iter.bi_sector = sector_offset + zone->dev_start + 554 553 tmp_dev->data_offset; 555 554 556 555 if (unlikely((bio->bi_rw & REQ_DISCARD) && ··· 567 566 printk("md/raid0:%s: make_request bug: can't convert block across chunks" 568 567 " or bigger than %dk %llu %d\n", 569 568 mdname(mddev), chunk_sects / 2, 570 - (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); 569 + (unsigned long long)bio->bi_iter.bi_sector, 570 + bio_sectors(bio) / 2); 571 571 572 572 bio_io_error(bio); 573 573 return;
+39 -36
drivers/md/raid1.c
··· 229 229 int done; 230 230 struct r1conf *conf = r1_bio->mddev->private; 231 231 sector_t start_next_window = r1_bio->start_next_window; 232 - sector_t bi_sector = bio->bi_sector; 232 + sector_t bi_sector = bio->bi_iter.bi_sector; 233 233 234 234 if (bio->bi_phys_segments) { 235 235 unsigned long flags; ··· 265 265 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 266 266 pr_debug("raid1: sync end %s on sectors %llu-%llu\n", 267 267 (bio_data_dir(bio) == WRITE) ? "write" : "read", 268 - (unsigned long long) bio->bi_sector, 269 - (unsigned long long) bio->bi_sector + 270 - bio_sectors(bio) - 1); 268 + (unsigned long long) bio->bi_iter.bi_sector, 269 + (unsigned long long) bio_end_sector(bio) - 1); 271 270 272 271 call_bio_endio(r1_bio); 273 272 } ··· 465 466 struct bio *mbio = r1_bio->master_bio; 466 467 pr_debug("raid1: behind end write sectors" 467 468 " %llu-%llu\n", 468 - (unsigned long long) mbio->bi_sector, 469 - (unsigned long long) mbio->bi_sector + 470 - bio_sectors(mbio) - 1); 469 + (unsigned long long) mbio->bi_iter.bi_sector, 470 + (unsigned long long) bio_end_sector(mbio) - 1); 471 471 call_bio_endio(r1_bio); 472 472 } 473 473 } ··· 873 875 else if ((conf->next_resync - RESYNC_WINDOW_SECTORS 874 876 >= bio_end_sector(bio)) || 875 877 (conf->next_resync + NEXT_NORMALIO_DISTANCE 876 - <= bio->bi_sector)) 878 + <= bio->bi_iter.bi_sector)) 877 879 wait = false; 878 880 else 879 881 wait = true; ··· 911 913 912 914 if (bio && bio_data_dir(bio) == WRITE) { 913 915 if (conf->next_resync + NEXT_NORMALIO_DISTANCE 914 - <= bio->bi_sector) { 916 + <= bio->bi_iter.bi_sector) { 915 917 if (conf->start_next_window == MaxSector) 916 918 conf->start_next_window = 917 919 conf->next_resync + 918 920 NEXT_NORMALIO_DISTANCE; 919 921 920 922 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) 921 - <= bio->bi_sector) 923 + <= bio->bi_iter.bi_sector) 922 924 conf->next_window_requests++; 923 925 else 924 926 conf->current_window_requests++; 925 927 } 926 - if (bio->bi_sector >= conf->start_next_window) 928 + if (bio->bi_iter.bi_sector >= conf->start_next_window) 927 929 sector = conf->start_next_window; 928 930 } 929 931 ··· 1026 1028 if (bvecs[i].bv_page) 1027 1029 put_page(bvecs[i].bv_page); 1028 1030 kfree(bvecs); 1029 - pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 1031 + pr_debug("%dB behind alloc failed, doing sync I/O\n", 1032 + bio->bi_iter.bi_size); 1030 1033 } 1031 1034 1032 1035 struct raid1_plug_cb { ··· 1107 1108 1108 1109 if (bio_data_dir(bio) == WRITE && 1109 1110 bio_end_sector(bio) > mddev->suspend_lo && 1110 - bio->bi_sector < mddev->suspend_hi) { 1111 + bio->bi_iter.bi_sector < mddev->suspend_hi) { 1111 1112 /* As the suspend_* range is controlled by 1112 1113 * userspace, we want an interruptible 1113 1114 * wait. ··· 1118 1119 prepare_to_wait(&conf->wait_barrier, 1119 1120 &w, TASK_INTERRUPTIBLE); 1120 1121 if (bio_end_sector(bio) <= mddev->suspend_lo || 1121 - bio->bi_sector >= mddev->suspend_hi) 1122 + bio->bi_iter.bi_sector >= mddev->suspend_hi) 1122 1123 break; 1123 1124 schedule(); 1124 1125 } ··· 1140 1141 r1_bio->sectors = bio_sectors(bio); 1141 1142 r1_bio->state = 0; 1142 1143 r1_bio->mddev = mddev; 1143 - r1_bio->sector = bio->bi_sector; 1144 + r1_bio->sector = bio->bi_iter.bi_sector; 1144 1145 1145 1146 /* We might need to issue multiple reads to different 1146 1147 * devices if there are bad blocks around, so we keep ··· 1180 1181 r1_bio->read_disk = rdisk; 1181 1182 1182 1183 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1183 - bio_trim(read_bio, r1_bio->sector - bio->bi_sector, 1184 + bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, 1184 1185 max_sectors); 1185 1186 1186 1187 r1_bio->bios[rdisk] = read_bio; 1187 1188 1188 - read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 1189 + read_bio->bi_iter.bi_sector = r1_bio->sector + 1190 + mirror->rdev->data_offset; 1189 1191 read_bio->bi_bdev = mirror->rdev->bdev; 1190 1192 read_bio->bi_end_io = raid1_end_read_request; 1191 1193 read_bio->bi_rw = READ | do_sync; ··· 1198 1198 */ 1199 1199 1200 1200 sectors_handled = (r1_bio->sector + max_sectors 1201 - - bio->bi_sector); 1201 + - bio->bi_iter.bi_sector); 1202 1202 r1_bio->sectors = max_sectors; 1203 1203 spin_lock_irq(&conf->device_lock); 1204 1204 if (bio->bi_phys_segments == 0) ··· 1219 1219 r1_bio->sectors = bio_sectors(bio) - sectors_handled; 1220 1220 r1_bio->state = 0; 1221 1221 r1_bio->mddev = mddev; 1222 - r1_bio->sector = bio->bi_sector + sectors_handled; 1222 + r1_bio->sector = bio->bi_iter.bi_sector + 1223 + sectors_handled; 1223 1224 goto read_again; 1224 1225 } else 1225 1226 generic_make_request(read_bio); ··· 1323 1322 if (r1_bio->bios[j]) 1324 1323 rdev_dec_pending(conf->mirrors[j].rdev, mddev); 1325 1324 r1_bio->state = 0; 1326 - allow_barrier(conf, start_next_window, bio->bi_sector); 1325 + allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector); 1327 1326 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1328 1327 start_next_window = wait_barrier(conf, bio); 1329 1328 /* ··· 1350 1349 bio->bi_phys_segments++; 1351 1350 spin_unlock_irq(&conf->device_lock); 1352 1351 } 1353 - sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector; 1352 + sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector; 1354 1353 1355 1354 atomic_set(&r1_bio->remaining, 1); 1356 1355 atomic_set(&r1_bio->behind_remaining, 0); ··· 1362 1361 continue; 1363 1362 1364 1363 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1365 - bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors); 1364 + bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors); 1366 1365 1367 1366 if (first_clone) { 1368 1367 /* do behind I/O ? ··· 1396 1395 1397 1396 r1_bio->bios[i] = mbio; 1398 1397 1399 - mbio->bi_sector = (r1_bio->sector + 1398 + mbio->bi_iter.bi_sector = (r1_bio->sector + 1400 1399 conf->mirrors[i].rdev->data_offset); 1401 1400 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1402 1401 mbio->bi_end_io = raid1_end_write_request; ··· 1436 1435 r1_bio->sectors = bio_sectors(bio) - sectors_handled; 1437 1436 r1_bio->state = 0; 1438 1437 r1_bio->mddev = mddev; 1439 - r1_bio->sector = bio->bi_sector + sectors_handled; 1438 + r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled; 1440 1439 goto retry_write; 1441 1440 } 1442 1441 ··· 1960 1959 /* fixup the bio for reuse */ 1961 1960 bio_reset(b); 1962 1961 b->bi_vcnt = vcnt; 1963 - b->bi_size = r1_bio->sectors << 9; 1964 - b->bi_sector = r1_bio->sector + 1962 + b->bi_iter.bi_size = r1_bio->sectors << 9; 1963 + b->bi_iter.bi_sector = r1_bio->sector + 1965 1964 conf->mirrors[i].rdev->data_offset; 1966 1965 b->bi_bdev = conf->mirrors[i].rdev->bdev; 1967 1966 b->bi_end_io = end_sync_read; 1968 1967 b->bi_private = r1_bio; 1969 1968 1970 - size = b->bi_size; 1969 + size = b->bi_iter.bi_size; 1971 1970 for (j = 0; j < vcnt ; j++) { 1972 1971 struct bio_vec *bi; 1973 1972 bi = &b->bi_io_vec[j]; ··· 2222 2221 } 2223 2222 2224 2223 wbio->bi_rw = WRITE; 2225 - wbio->bi_sector = r1_bio->sector; 2226 - wbio->bi_size = r1_bio->sectors << 9; 2224 + wbio->bi_iter.bi_sector = r1_bio->sector; 2225 + wbio->bi_iter.bi_size = r1_bio->sectors << 9; 2227 2226 2228 2227 bio_trim(wbio, sector - r1_bio->sector, sectors); 2229 - wbio->bi_sector += rdev->data_offset; 2228 + wbio->bi_iter.bi_sector += rdev->data_offset; 2230 2229 wbio->bi_bdev = rdev->bdev; 2231 2230 if (submit_bio_wait(WRITE, wbio) == 0) 2232 2231 /* failure! */ ··· 2340 2339 } 2341 2340 r1_bio->read_disk = disk; 2342 2341 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); 2343 - bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors); 2342 + bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector, 2343 + max_sectors); 2344 2344 r1_bio->bios[r1_bio->read_disk] = bio; 2345 2345 rdev = conf->mirrors[disk].rdev; 2346 2346 printk_ratelimited(KERN_ERR ··· 2350 2348 mdname(mddev), 2351 2349 (unsigned long long)r1_bio->sector, 2352 2350 bdevname(rdev->bdev, b)); 2353 - bio->bi_sector = r1_bio->sector + rdev->data_offset; 2351 + bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; 2354 2352 bio->bi_bdev = rdev->bdev; 2355 2353 bio->bi_end_io = raid1_end_read_request; 2356 2354 bio->bi_rw = READ | do_sync; ··· 2359 2357 /* Drat - have to split this up more */ 2360 2358 struct bio *mbio = r1_bio->master_bio; 2361 2359 int sectors_handled = (r1_bio->sector + max_sectors 2362 - - mbio->bi_sector); 2360 + - mbio->bi_iter.bi_sector); 2363 2361 r1_bio->sectors = max_sectors; 2364 2362 spin_lock_irq(&conf->device_lock); 2365 2363 if (mbio->bi_phys_segments == 0) ··· 2377 2375 r1_bio->state = 0; 2378 2376 set_bit(R1BIO_ReadError, &r1_bio->state); 2379 2377 r1_bio->mddev = mddev; 2380 - r1_bio->sector = mbio->bi_sector + sectors_handled; 2378 + r1_bio->sector = mbio->bi_iter.bi_sector + 2379 + sectors_handled; 2381 2380 2382 2381 goto read_more; 2383 2382 } else ··· 2602 2599 } 2603 2600 if (bio->bi_end_io) { 2604 2601 atomic_inc(&rdev->nr_pending); 2605 - bio->bi_sector = sector_nr + rdev->data_offset; 2602 + bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; 2606 2603 bio->bi_bdev = rdev->bdev; 2607 2604 bio->bi_private = r1_bio; 2608 2605 } ··· 2702 2699 continue; 2703 2700 /* remove last page from this bio */ 2704 2701 bio->bi_vcnt--; 2705 - bio->bi_size -= len; 2702 + bio->bi_iter.bi_size -= len; 2706 2703 bio->bi_flags &= ~(1<< BIO_SEG_VALID); 2707 2704 } 2708 2705 goto bio_full;
+49 -42
drivers/md/raid10.c
··· 1182 1182 /* If this request crosses a chunk boundary, we need to 1183 1183 * split it. This will only happen for 1 PAGE (or less) requests. 1184 1184 */ 1185 - if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio) 1185 + if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + bio_sectors(bio) 1186 1186 > chunk_sects 1187 1187 && (conf->geo.near_copies < conf->geo.raid_disks 1188 1188 || conf->prev.near_copies < conf->prev.raid_disks))) { ··· 1193 1193 /* This is a one page bio that upper layers 1194 1194 * refuse to split for us, so we need to split it. 1195 1195 */ 1196 - bp = bio_split(bio, 1197 - chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); 1196 + bp = bio_split(bio, chunk_sects - 1197 + (bio->bi_iter.bi_sector & (chunk_sects - 1))); 1198 1198 1199 1199 /* Each of these 'make_request' calls will call 'wait_barrier'. 1200 1200 * If the first succeeds but the second blocks due to the resync ··· 1221 1221 bad_map: 1222 1222 printk("md/raid10:%s: make_request bug: can't convert block across chunks" 1223 1223 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, 1224 - (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); 1224 + (unsigned long long)bio->bi_iter.bi_sector, 1225 + bio_sectors(bio) / 2); 1225 1226 1226 1227 bio_io_error(bio); 1227 1228 return; ··· 1239 1238 1240 1239 sectors = bio_sectors(bio); 1241 1240 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1242 - bio->bi_sector < conf->reshape_progress && 1243 - bio->bi_sector + sectors > conf->reshape_progress) { 1241 + bio->bi_iter.bi_sector < conf->reshape_progress && 1242 + bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { 1244 1243 /* IO spans the reshape position. Need to wait for 1245 1244 * reshape to pass 1246 1245 */ 1247 1246 allow_barrier(conf); 1248 1247 wait_event(conf->wait_barrier, 1249 - conf->reshape_progress <= bio->bi_sector || 1250 - conf->reshape_progress >= bio->bi_sector + sectors); 1248 + conf->reshape_progress <= bio->bi_iter.bi_sector || 1249 + conf->reshape_progress >= bio->bi_iter.bi_sector + 1250 + sectors); 1251 1251 wait_barrier(conf); 1252 1252 } 1253 1253 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1254 1254 bio_data_dir(bio) == WRITE && 1255 1255 (mddev->reshape_backwards 1256 - ? (bio->bi_sector < conf->reshape_safe && 1257 - bio->bi_sector + sectors > conf->reshape_progress) 1258 - : (bio->bi_sector + sectors > conf->reshape_safe && 1259 - bio->bi_sector < conf->reshape_progress))) { 1256 + ? (bio->bi_iter.bi_sector < conf->reshape_safe && 1257 + bio->bi_iter.bi_sector + sectors > conf->reshape_progress) 1258 + : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && 1259 + bio->bi_iter.bi_sector < conf->reshape_progress))) { 1260 1260 /* Need to update reshape_position in metadata */ 1261 1261 mddev->reshape_position = conf->reshape_progress; 1262 1262 set_bit(MD_CHANGE_DEVS, &mddev->flags); ··· 1275 1273 r10_bio->sectors = sectors; 1276 1274 1277 1275 r10_bio->mddev = mddev; 1278 - r10_bio->sector = bio->bi_sector; 1276 + r10_bio->sector = bio->bi_iter.bi_sector; 1279 1277 r10_bio->state = 0; 1280 1278 1281 1279 /* We might need to issue multiple reads to different ··· 1304 1302 slot = r10_bio->read_slot; 1305 1303 1306 1304 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1307 - bio_trim(read_bio, r10_bio->sector - bio->bi_sector, 1305 + bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, 1308 1306 max_sectors); 1309 1307 1310 1308 r10_bio->devs[slot].bio = read_bio; 1311 1309 r10_bio->devs[slot].rdev = rdev; 1312 1310 1313 - read_bio->bi_sector = r10_bio->devs[slot].addr + 1311 + read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + 1314 1312 choose_data_offset(r10_bio, rdev); 1315 1313 read_bio->bi_bdev = rdev->bdev; 1316 1314 read_bio->bi_end_io = raid10_end_read_request; ··· 1322 1320 * need another r10_bio. 1323 1321 */ 1324 1322 sectors_handled = (r10_bio->sectors + max_sectors 1325 - - bio->bi_sector); 1323 + - bio->bi_iter.bi_sector); 1326 1324 r10_bio->sectors = max_sectors; 1327 1325 spin_lock_irq(&conf->device_lock); 1328 1326 if (bio->bi_phys_segments == 0) ··· 1343 1341 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1344 1342 r10_bio->state = 0; 1345 1343 r10_bio->mddev = mddev; 1346 - r10_bio->sector = bio->bi_sector + sectors_handled; 1344 + r10_bio->sector = bio->bi_iter.bi_sector + 1345 + sectors_handled; 1347 1346 goto read_again; 1348 1347 } else 1349 1348 generic_make_request(read_bio); ··· 1502 1499 bio->bi_phys_segments++; 1503 1500 spin_unlock_irq(&conf->device_lock); 1504 1501 } 1505 - sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector; 1502 + sectors_handled = r10_bio->sector + max_sectors - 1503 + bio->bi_iter.bi_sector; 1506 1504 1507 1505 atomic_set(&r10_bio->remaining, 1); 1508 1506 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); ··· 1514 1510 if (r10_bio->devs[i].bio) { 1515 1511 struct md_rdev *rdev = conf->mirrors[d].rdev; 1516 1512 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1517 - bio_trim(mbio, r10_bio->sector - bio->bi_sector, 1513 + bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, 1518 1514 max_sectors); 1519 1515 r10_bio->devs[i].bio = mbio; 1520 1516 1521 - mbio->bi_sector = (r10_bio->devs[i].addr+ 1517 + mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ 1522 1518 choose_data_offset(r10_bio, 1523 1519 rdev)); 1524 1520 mbio->bi_bdev = rdev->bdev; ··· 1557 1553 rdev = conf->mirrors[d].rdev; 1558 1554 } 1559 1555 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1560 - bio_trim(mbio, r10_bio->sector - bio->bi_sector, 1556 + bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, 1561 1557 max_sectors); 1562 1558 r10_bio->devs[i].repl_bio = mbio; 1563 1559 1564 - mbio->bi_sector = (r10_bio->devs[i].addr + 1560 + mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + 1565 1561 choose_data_offset( 1566 1562 r10_bio, rdev)); 1567 1563 mbio->bi_bdev = rdev->bdev; ··· 1595 1591 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1596 1592 1597 1593 r10_bio->mddev = mddev; 1598 - r10_bio->sector = bio->bi_sector + sectors_handled; 1594 + r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; 1599 1595 r10_bio->state = 0; 1600 1596 goto retry_write; 1601 1597 } ··· 2128 2124 bio_reset(tbio); 2129 2125 2130 2126 tbio->bi_vcnt = vcnt; 2131 - tbio->bi_size = r10_bio->sectors << 9; 2127 + tbio->bi_iter.bi_size = r10_bio->sectors << 9; 2132 2128 tbio->bi_rw = WRITE; 2133 2129 tbio->bi_private = r10_bio; 2134 - tbio->bi_sector = r10_bio->devs[i].addr; 2130 + tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; 2135 2131 2136 2132 for (j=0; j < vcnt ; j++) { 2137 2133 tbio->bi_io_vec[j].bv_offset = 0; ··· 2148 2144 atomic_inc(&r10_bio->remaining); 2149 2145 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); 2150 2146 2151 - tbio->bi_sector += conf->mirrors[d].rdev->data_offset; 2147 + tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; 2152 2148 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 2153 2149 generic_make_request(tbio); 2154 2150 } ··· 2618 2614 sectors = sect_to_write; 2619 2615 /* Write at 'sector' for 'sectors' */ 2620 2616 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 2621 - bio_trim(wbio, sector - bio->bi_sector, sectors); 2622 - wbio->bi_sector = (r10_bio->devs[i].addr+ 2617 + bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); 2618 + wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ 2623 2619 choose_data_offset(r10_bio, rdev) + 2624 2620 (sector - r10_bio->sector)); 2625 2621 wbio->bi_bdev = rdev->bdev; ··· 2691 2687 (unsigned long long)r10_bio->sector); 2692 2688 bio = bio_clone_mddev(r10_bio->master_bio, 2693 2689 GFP_NOIO, mddev); 2694 - bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors); 2690 + bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); 2695 2691 r10_bio->devs[slot].bio = bio; 2696 2692 r10_bio->devs[slot].rdev = rdev; 2697 - bio->bi_sector = r10_bio->devs[slot].addr 2693 + bio->bi_iter.bi_sector = r10_bio->devs[slot].addr 2698 2694 + choose_data_offset(r10_bio, rdev); 2699 2695 bio->bi_bdev = rdev->bdev; 2700 2696 bio->bi_rw = READ | do_sync; ··· 2705 2701 struct bio *mbio = r10_bio->master_bio; 2706 2702 int sectors_handled = 2707 2703 r10_bio->sector + max_sectors 2708 - - mbio->bi_sector; 2704 + - mbio->bi_iter.bi_sector; 2709 2705 r10_bio->sectors = max_sectors; 2710 2706 spin_lock_irq(&conf->device_lock); 2711 2707 if (mbio->bi_phys_segments == 0) ··· 2723 2719 set_bit(R10BIO_ReadError, 2724 2720 &r10_bio->state); 2725 2721 r10_bio->mddev = mddev; 2726 - r10_bio->sector = mbio->bi_sector 2722 + r10_bio->sector = mbio->bi_iter.bi_sector 2727 2723 + sectors_handled; 2728 2724 2729 2725 goto read_more; ··· 3161 3157 bio->bi_end_io = end_sync_read; 3162 3158 bio->bi_rw = READ; 3163 3159 from_addr = r10_bio->devs[j].addr; 3164 - bio->bi_sector = from_addr + rdev->data_offset; 3160 + bio->bi_iter.bi_sector = from_addr + 3161 + rdev->data_offset; 3165 3162 bio->bi_bdev = rdev->bdev; 3166 3163 atomic_inc(&rdev->nr_pending); 3167 3164 /* and we write to 'i' (if not in_sync) */ ··· 3186 3181 bio->bi_private = r10_bio; 3187 3182 bio->bi_end_io = end_sync_write; 3188 3183 bio->bi_rw = WRITE; 3189 - bio->bi_sector = to_addr 3184 + bio->bi_iter.bi_sector = to_addr 3190 3185 + rdev->data_offset; 3191 3186 bio->bi_bdev = rdev->bdev; 3192 3187 atomic_inc(&r10_bio->remaining); ··· 3215 3210 bio->bi_private = r10_bio; 3216 3211 bio->bi_end_io = end_sync_write; 3217 3212 bio->bi_rw = WRITE; 3218 - bio->bi_sector = to_addr + rdev->data_offset; 3213 + bio->bi_iter.bi_sector = to_addr + 3214 + rdev->data_offset; 3219 3215 bio->bi_bdev = rdev->bdev; 3220 3216 atomic_inc(&r10_bio->remaining); 3221 3217 break; ··· 3334 3328 bio->bi_private = r10_bio; 3335 3329 bio->bi_end_io = end_sync_read; 3336 3330 bio->bi_rw = READ; 3337 - bio->bi_sector = sector + 3331 + bio->bi_iter.bi_sector = sector + 3338 3332 conf->mirrors[d].rdev->data_offset; 3339 3333 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 3340 3334 count++; ··· 3356 3350 bio->bi_private = r10_bio; 3357 3351 bio->bi_end_io = end_sync_write; 3358 3352 bio->bi_rw = WRITE; 3359 - bio->bi_sector = sector + 3353 + bio->bi_iter.bi_sector = sector + 3360 3354 conf->mirrors[d].replacement->data_offset; 3361 3355 bio->bi_bdev = conf->mirrors[d].replacement->bdev; 3362 3356 count++; ··· 3403 3397 bio2 = bio2->bi_next) { 3404 3398 /* remove last page from this bio */ 3405 3399 bio2->bi_vcnt--; 3406 - bio2->bi_size -= len; 3400 + bio2->bi_iter.bi_size -= len; 3407 3401 bio2->bi_flags &= ~(1<< BIO_SEG_VALID); 3408 3402 } 3409 3403 goto bio_full; ··· 4423 4417 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); 4424 4418 4425 4419 read_bio->bi_bdev = rdev->bdev; 4426 - read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr 4420 + read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr 4427 4421 + rdev->data_offset); 4428 4422 read_bio->bi_private = r10_bio; 4429 4423 read_bio->bi_end_io = end_sync_read; ··· 4431 4425 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); 4432 4426 read_bio->bi_flags |= 1 << BIO_UPTODATE; 4433 4427 read_bio->bi_vcnt = 0; 4434 - read_bio->bi_size = 0; 4428 + read_bio->bi_iter.bi_size = 0; 4435 4429 r10_bio->master_bio = read_bio; 4436 4430 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; 4437 4431 ··· 4457 4451 4458 4452 bio_reset(b); 4459 4453 b->bi_bdev = rdev2->bdev; 4460 - b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset; 4454 + b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + 4455 + rdev2->new_data_offset; 4461 4456 b->bi_private = r10_bio; 4462 4457 b->bi_end_io = end_reshape_write; 4463 4458 b->bi_rw = WRITE; ··· 4485 4478 bio2 = bio2->bi_next) { 4486 4479 /* Remove last page from this bio */ 4487 4480 bio2->bi_vcnt--; 4488 - bio2->bi_size -= len; 4481 + bio2->bi_iter.bi_size -= len; 4489 4482 bio2->bi_flags &= ~(1<<BIO_SEG_VALID); 4490 4483 } 4491 4484 goto bio_full;
+37 -35
drivers/md/raid5.c
··· 133 133 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 134 134 { 135 135 int sectors = bio_sectors(bio); 136 - if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) 136 + if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) 137 137 return bio->bi_next; 138 138 else 139 139 return NULL; ··· 225 225 226 226 return_bi = bi->bi_next; 227 227 bi->bi_next = NULL; 228 - bi->bi_size = 0; 228 + bi->bi_iter.bi_size = 0; 229 229 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 230 230 bi, 0); 231 231 bio_endio(bi, 0); ··· 854 854 bi->bi_rw, i); 855 855 atomic_inc(&sh->count); 856 856 if (use_new_offset(conf, sh)) 857 - bi->bi_sector = (sh->sector 857 + bi->bi_iter.bi_sector = (sh->sector 858 858 + rdev->new_data_offset); 859 859 else 860 - bi->bi_sector = (sh->sector 860 + bi->bi_iter.bi_sector = (sh->sector 861 861 + rdev->data_offset); 862 862 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 863 863 bi->bi_rw |= REQ_NOMERGE; ··· 865 865 bi->bi_vcnt = 1; 866 866 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 867 867 bi->bi_io_vec[0].bv_offset = 0; 868 - bi->bi_size = STRIPE_SIZE; 868 + bi->bi_iter.bi_size = STRIPE_SIZE; 869 869 /* 870 870 * If this is discard request, set bi_vcnt 0. We don't 871 871 * want to confuse SCSI because SCSI will replace payload ··· 901 901 rbi->bi_rw, i); 902 902 atomic_inc(&sh->count); 903 903 if (use_new_offset(conf, sh)) 904 - rbi->bi_sector = (sh->sector 904 + rbi->bi_iter.bi_sector = (sh->sector 905 905 + rrdev->new_data_offset); 906 906 else 907 - rbi->bi_sector = (sh->sector 907 + rbi->bi_iter.bi_sector = (sh->sector 908 908 + rrdev->data_offset); 909 909 rbi->bi_vcnt = 1; 910 910 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 911 911 rbi->bi_io_vec[0].bv_offset = 0; 912 - rbi->bi_size = STRIPE_SIZE; 912 + rbi->bi_iter.bi_size = STRIPE_SIZE; 913 913 /* 914 914 * If this is discard request, set bi_vcnt 0. We don't 915 915 * want to confuse SCSI because SCSI will replace payload ··· 944 944 struct async_submit_ctl submit; 945 945 enum async_tx_flags flags = 0; 946 946 947 - if (bio->bi_sector >= sector) 948 - page_offset = (signed)(bio->bi_sector - sector) * 512; 947 + if (bio->bi_iter.bi_sector >= sector) 948 + page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; 949 949 else 950 - page_offset = (signed)(sector - bio->bi_sector) * -512; 950 + page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; 951 951 952 952 if (frombio) 953 953 flags |= ASYNC_TX_FENCE; ··· 1014 1014 BUG_ON(!dev->read); 1015 1015 rbi = dev->read; 1016 1016 dev->read = NULL; 1017 - while (rbi && rbi->bi_sector < 1017 + while (rbi && rbi->bi_iter.bi_sector < 1018 1018 dev->sector + STRIPE_SECTORS) { 1019 1019 rbi2 = r5_next_bio(rbi, dev->sector); 1020 1020 if (!raid5_dec_bi_active_stripes(rbi)) { ··· 1050 1050 dev->read = rbi = dev->toread; 1051 1051 dev->toread = NULL; 1052 1052 spin_unlock_irq(&sh->stripe_lock); 1053 - while (rbi && rbi->bi_sector < 1053 + while (rbi && rbi->bi_iter.bi_sector < 1054 1054 dev->sector + STRIPE_SECTORS) { 1055 1055 tx = async_copy_data(0, rbi, dev->page, 1056 1056 dev->sector, tx); ··· 1392 1392 wbi = dev->written = chosen; 1393 1393 spin_unlock_irq(&sh->stripe_lock); 1394 1394 1395 - while (wbi && wbi->bi_sector < 1395 + while (wbi && wbi->bi_iter.bi_sector < 1396 1396 dev->sector + STRIPE_SECTORS) { 1397 1397 if (wbi->bi_rw & REQ_FUA) 1398 1398 set_bit(R5_WantFUA, &dev->flags); ··· 2616 2616 int firstwrite=0; 2617 2617 2618 2618 pr_debug("adding bi b#%llu to stripe s#%llu\n", 2619 - (unsigned long long)bi->bi_sector, 2619 + (unsigned long long)bi->bi_iter.bi_sector, 2620 2620 (unsigned long long)sh->sector); 2621 2621 2622 2622 /* ··· 2634 2634 firstwrite = 1; 2635 2635 } else 2636 2636 bip = &sh->dev[dd_idx].toread; 2637 - while (*bip && (*bip)->bi_sector < bi->bi_sector) { 2638 - if (bio_end_sector(*bip) > bi->bi_sector) 2637 + while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { 2638 + if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) 2639 2639 goto overlap; 2640 2640 bip = & (*bip)->bi_next; 2641 2641 } 2642 - if (*bip && (*bip)->bi_sector < bio_end_sector(bi)) 2642 + if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) 2643 2643 goto overlap; 2644 2644 2645 2645 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); ··· 2653 2653 sector_t sector = sh->dev[dd_idx].sector; 2654 2654 for (bi=sh->dev[dd_idx].towrite; 2655 2655 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 2656 - bi && bi->bi_sector <= sector; 2656 + bi && bi->bi_iter.bi_sector <= sector; 2657 2657 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 2658 2658 if (bio_end_sector(bi) >= sector) 2659 2659 sector = bio_end_sector(bi); ··· 2663 2663 } 2664 2664 2665 2665 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2666 - (unsigned long long)(*bip)->bi_sector, 2666 + (unsigned long long)(*bip)->bi_iter.bi_sector, 2667 2667 (unsigned long long)sh->sector, dd_idx); 2668 2668 spin_unlock_irq(&sh->stripe_lock); 2669 2669 ··· 2738 2738 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2739 2739 wake_up(&conf->wait_for_overlap); 2740 2740 2741 - while (bi && bi->bi_sector < 2741 + while (bi && bi->bi_iter.bi_sector < 2742 2742 sh->dev[i].sector + STRIPE_SECTORS) { 2743 2743 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2744 2744 clear_bit(BIO_UPTODATE, &bi->bi_flags); ··· 2757 2757 bi = sh->dev[i].written; 2758 2758 sh->dev[i].written = NULL; 2759 2759 if (bi) bitmap_end = 1; 2760 - while (bi && bi->bi_sector < 2760 + while (bi && bi->bi_iter.bi_sector < 2761 2761 sh->dev[i].sector + STRIPE_SECTORS) { 2762 2762 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2763 2763 clear_bit(BIO_UPTODATE, &bi->bi_flags); ··· 2781 2781 spin_unlock_irq(&sh->stripe_lock); 2782 2782 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2783 2783 wake_up(&conf->wait_for_overlap); 2784 - while (bi && bi->bi_sector < 2784 + while (bi && bi->bi_iter.bi_sector < 2785 2785 sh->dev[i].sector + STRIPE_SECTORS) { 2786 2786 struct bio *nextbi = 2787 2787 r5_next_bio(bi, sh->dev[i].sector); ··· 3005 3005 clear_bit(R5_UPTODATE, &dev->flags); 3006 3006 wbi = dev->written; 3007 3007 dev->written = NULL; 3008 - while (wbi && wbi->bi_sector < 3008 + while (wbi && wbi->bi_iter.bi_sector < 3009 3009 dev->sector + STRIPE_SECTORS) { 3010 3010 wbi2 = r5_next_bio(wbi, dev->sector); 3011 3011 if (!raid5_dec_bi_active_stripes(wbi)) { ··· 4097 4097 4098 4098 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 4099 4099 { 4100 - sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 4100 + sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); 4101 4101 unsigned int chunk_sectors = mddev->chunk_sectors; 4102 4102 unsigned int bio_sectors = bio_sectors(bio); 4103 4103 ··· 4234 4234 /* 4235 4235 * compute position 4236 4236 */ 4237 - align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, 4238 - 0, 4239 - &dd_idx, NULL); 4237 + align_bi->bi_iter.bi_sector = 4238 + raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 4239 + 0, &dd_idx, NULL); 4240 4240 4241 4241 end_sector = bio_end_sector(align_bi); 4242 4242 rcu_read_lock(); ··· 4261 4261 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 4262 4262 4263 4263 if (!bio_fits_rdev(align_bi) || 4264 - is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi), 4264 + is_badblock(rdev, align_bi->bi_iter.bi_sector, 4265 + bio_sectors(align_bi), 4265 4266 &first_bad, &bad_sectors)) { 4266 4267 /* too big in some way, or has a known bad block */ 4267 4268 bio_put(align_bi); ··· 4271 4270 } 4272 4271 4273 4272 /* No reshape active, so we can trust rdev->data_offset */ 4274 - align_bi->bi_sector += rdev->data_offset; 4273 + align_bi->bi_iter.bi_sector += rdev->data_offset; 4275 4274 4276 4275 spin_lock_irq(&conf->device_lock); 4277 4276 wait_event_lock_irq(conf->wait_for_stripe, ··· 4283 4282 if (mddev->gendisk) 4284 4283 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 4285 4284 align_bi, disk_devt(mddev->gendisk), 4286 - raid_bio->bi_sector); 4285 + raid_bio->bi_iter.bi_sector); 4287 4286 generic_make_request(align_bi); 4288 4287 return 1; 4289 4288 } else { ··· 4466 4465 /* Skip discard while reshape is happening */ 4467 4466 return; 4468 4467 4469 - logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4470 - last_sector = bi->bi_sector + (bi->bi_size>>9); 4468 + logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4469 + last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); 4471 4470 4472 4471 bi->bi_next = NULL; 4473 4472 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ ··· 4571 4570 return; 4572 4571 } 4573 4572 4574 - logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4573 + logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4575 4574 last_sector = bio_end_sector(bi); 4576 4575 bi->bi_next = NULL; 4577 4576 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ ··· 5055 5054 int remaining; 5056 5055 int handled = 0; 5057 5056 5058 - logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5057 + logical_sector = raid_bio->bi_iter.bi_sector & 5058 + ~((sector_t)STRIPE_SECTORS-1); 5059 5059 sector = raid5_compute_sector(conf, logical_sector, 5060 5060 0, &dd_idx, NULL); 5061 5061 last_sector = bio_end_sector(raid_bio);
+3 -2
drivers/s390/block/dcssblk.c
··· 819 819 dev_info = bio->bi_bdev->bd_disk->private_data; 820 820 if (dev_info == NULL) 821 821 goto fail; 822 - if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) 822 + if ((bio->bi_iter.bi_sector & 7) != 0 || 823 + (bio->bi_iter.bi_size & 4095) != 0) 823 824 /* Request is not page-aligned. */ 824 825 goto fail; 825 826 if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { ··· 843 842 } 844 843 } 845 844 846 - index = (bio->bi_sector >> 3); 845 + index = (bio->bi_iter.bi_sector >> 3); 847 846 bio_for_each_segment(bvec, bio, i) { 848 847 page_addr = (unsigned long) 849 848 page_address(bvec->bv_page) + bvec->bv_offset;
+5 -4
drivers/s390/block/xpram.c
··· 190 190 unsigned long bytes; 191 191 int i; 192 192 193 - if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) 193 + if ((bio->bi_iter.bi_sector & 7) != 0 || 194 + (bio->bi_iter.bi_size & 4095) != 0) 194 195 /* Request is not page-aligned. */ 195 196 goto fail; 196 - if ((bio->bi_size >> 12) > xdev->size) 197 + if ((bio->bi_iter.bi_size >> 12) > xdev->size) 197 198 /* Request size is no page-aligned. */ 198 199 goto fail; 199 - if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset) 200 + if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset) 200 201 goto fail; 201 - index = (bio->bi_sector >> 3) + xdev->offset; 202 + index = (bio->bi_iter.bi_sector >> 3) + xdev->offset; 202 203 bio_for_each_segment(bvec, bio, i) { 203 204 page_addr = (unsigned long) 204 205 kmap(bvec->bv_page) + bvec->bv_offset;
+1 -1
drivers/scsi/osd/osd_initiator.c
··· 731 731 732 732 bio->bi_rw &= ~REQ_WRITE; 733 733 or->in.bio = bio; 734 - or->in.total_bytes = bio->bi_size; 734 + or->in.total_bytes = bio->bi_iter.bi_size; 735 735 return 0; 736 736 } 737 737
+7 -5
drivers/staging/lustre/lustre/llite/lloop.c
··· 220 220 for (bio = head; bio != NULL; bio = bio->bi_next) { 221 221 LASSERT(rw == bio->bi_rw); 222 222 223 - offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset; 223 + offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; 224 224 bio_for_each_segment(bvec, bio, i) { 225 225 BUG_ON(bvec->bv_offset != 0); 226 226 BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE); ··· 313 313 bio = &lo->lo_bio; 314 314 while (*bio && (*bio)->bi_rw == rw) { 315 315 CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n", 316 - (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size, 316 + (unsigned long long)(*bio)->bi_iter.bi_sector, 317 + (*bio)->bi_iter.bi_size, 317 318 page_count, (*bio)->bi_vcnt); 318 319 if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS) 319 320 break; ··· 348 347 goto err; 349 348 350 349 CDEBUG(D_INFO, "submit bio sector %llu size %u\n", 351 - (unsigned long long)old_bio->bi_sector, old_bio->bi_size); 350 + (unsigned long long)old_bio->bi_iter.bi_sector, 351 + old_bio->bi_iter.bi_size); 352 352 353 353 spin_lock_irq(&lo->lo_lock); 354 354 inactive = (lo->lo_state != LLOOP_BOUND); ··· 369 367 loop_add_bio(lo, old_bio); 370 368 return; 371 369 err: 372 - cfs_bio_io_error(old_bio, old_bio->bi_size); 370 + cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size); 373 371 } 374 372 375 373 ··· 380 378 while (bio) { 381 379 struct bio *tmp = bio->bi_next; 382 380 bio->bi_next = NULL; 383 - cfs_bio_endio(bio, bio->bi_size, ret); 381 + cfs_bio_endio(bio, bio->bi_iter.bi_size, ret); 384 382 bio = tmp; 385 383 } 386 384 }
+8 -6
drivers/staging/zram/zram_drv.c
··· 171 171 u64 start, end, bound; 172 172 173 173 /* unaligned request */ 174 - if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) 174 + if (unlikely(bio->bi_iter.bi_sector & 175 + (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) 175 176 return 0; 176 - if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) 177 + if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) 177 178 return 0; 178 179 179 - start = bio->bi_sector; 180 - end = start + (bio->bi_size >> SECTOR_SHIFT); 180 + start = bio->bi_iter.bi_sector; 181 + end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT); 181 182 bound = zram->disksize >> SECTOR_SHIFT; 182 183 /* out of range range */ 183 184 if (unlikely(start >= bound || end > bound || start > end)) ··· 685 684 break; 686 685 } 687 686 688 - index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; 689 - offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; 687 + index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; 688 + offset = (bio->bi_iter.bi_sector & 689 + (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; 690 690 691 691 bio_for_each_segment(bvec, bio, i) { 692 692 int max_transfer_size = PAGE_SIZE - offset;
+1 -1
drivers/target/target_core_iblock.c
··· 319 319 bio->bi_bdev = ib_dev->ibd_bd; 320 320 bio->bi_private = cmd; 321 321 bio->bi_end_io = &iblock_bio_done; 322 - bio->bi_sector = lba; 322 + bio->bi_iter.bi_sector = lba; 323 323 324 324 return bio; 325 325 }
+4 -4
fs/bio-integrity.c
··· 215 215 { 216 216 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 217 217 218 - BUG_ON(bio->bi_size == 0); 218 + BUG_ON(bio->bi_iter.bi_size == 0); 219 219 220 - return bi->tag_size * (bio->bi_size / bi->sector_size); 220 + return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size); 221 221 } 222 222 EXPORT_SYMBOL(bio_integrity_tag_size); 223 223 ··· 300 300 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 301 301 struct blk_integrity_exchg bix; 302 302 struct bio_vec *bv; 303 - sector_t sector = bio->bi_sector; 303 + sector_t sector = bio->bi_iter.bi_sector; 304 304 unsigned int i, sectors, total; 305 305 void *prot_buf = bio->bi_integrity->bip_buf; 306 306 ··· 387 387 bip->bip_owns_buf = 1; 388 388 bip->bip_buf = buf; 389 389 bip->bip_size = len; 390 - bip->bip_sector = bio->bi_sector; 390 + bip->bip_sector = bio->bi_iter.bi_sector; 391 391 392 392 /* Map it */ 393 393 offset = offset_in_page(buf);
+29 -27
fs/bio.c
··· 532 532 * most users will be overriding ->bi_bdev with a new target, 533 533 * so we don't set nor calculate new physical/hw segment counts here 534 534 */ 535 - bio->bi_sector = bio_src->bi_sector; 535 + bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; 536 536 bio->bi_bdev = bio_src->bi_bdev; 537 537 bio->bi_flags |= 1 << BIO_CLONED; 538 538 bio->bi_rw = bio_src->bi_rw; 539 539 bio->bi_vcnt = bio_src->bi_vcnt; 540 - bio->bi_size = bio_src->bi_size; 541 - bio->bi_idx = bio_src->bi_idx; 540 + bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; 541 + bio->bi_iter.bi_idx = bio_src->bi_iter.bi_idx; 542 542 } 543 543 EXPORT_SYMBOL(__bio_clone); 544 544 ··· 612 612 if (unlikely(bio_flagged(bio, BIO_CLONED))) 613 613 return 0; 614 614 615 - if (((bio->bi_size + len) >> 9) > max_sectors) 615 + if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) 616 616 return 0; 617 617 618 618 /* ··· 635 635 simulate merging updated prev_bvec 636 636 as new bvec. */ 637 637 .bi_bdev = bio->bi_bdev, 638 - .bi_sector = bio->bi_sector, 639 - .bi_size = bio->bi_size - prev_bv_len, 638 + .bi_sector = bio->bi_iter.bi_sector, 639 + .bi_size = bio->bi_iter.bi_size - 640 + prev_bv_len, 640 641 .bi_rw = bio->bi_rw, 641 642 }; 642 643 ··· 685 684 if (q->merge_bvec_fn) { 686 685 struct bvec_merge_data bvm = { 687 686 .bi_bdev = bio->bi_bdev, 688 - .bi_sector = bio->bi_sector, 689 - .bi_size = bio->bi_size, 687 + .bi_sector = bio->bi_iter.bi_sector, 688 + .bi_size = bio->bi_iter.bi_size, 690 689 .bi_rw = bio->bi_rw, 691 690 }; 692 691 ··· 709 708 bio->bi_vcnt++; 710 709 bio->bi_phys_segments++; 711 710 done: 712 - bio->bi_size += len; 711 + bio->bi_iter.bi_size += len; 713 712 return len; 714 713 } 715 714 ··· 808 807 if (bio_integrity(bio)) 809 808 bio_integrity_advance(bio, bytes); 810 809 811 - bio->bi_sector += bytes >> 9; 812 - bio->bi_size -= bytes; 810 + bio->bi_iter.bi_sector += bytes >> 9; 811 + bio->bi_iter.bi_size -= bytes; 813 812 814 813 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) 815 814 return; 816 815 817 816 while (bytes) { 818 - if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { 817 + if (unlikely(bio->bi_iter.bi_idx >= bio->bi_vcnt)) { 819 818 WARN_ONCE(1, "bio idx %d >= vcnt %d\n", 820 - bio->bi_idx, bio->bi_vcnt); 819 + bio->bi_iter.bi_idx, bio->bi_vcnt); 821 820 break; 822 821 } 823 822 824 823 if (bytes >= bio_iovec(bio)->bv_len) { 825 824 bytes -= bio_iovec(bio)->bv_len; 826 - bio->bi_idx++; 825 + bio->bi_iter.bi_idx++; 827 826 } else { 828 827 bio_iovec(bio)->bv_len -= bytes; 829 828 bio_iovec(bio)->bv_offset += bytes; ··· 1486 1485 if (IS_ERR(bio)) 1487 1486 return bio; 1488 1487 1489 - if (bio->bi_size == len) 1488 + if (bio->bi_iter.bi_size == len) 1490 1489 return bio; 1491 1490 1492 1491 /* ··· 1764 1763 return bp; 1765 1764 1766 1765 trace_block_split(bdev_get_queue(bi->bi_bdev), bi, 1767 - bi->bi_sector + first_sectors); 1766 + bi->bi_iter.bi_sector + first_sectors); 1768 1767 1769 1768 BUG_ON(bio_segments(bi) > 1); 1770 1769 atomic_set(&bp->cnt, 3); 1771 1770 bp->error = 0; 1772 1771 bp->bio1 = *bi; 1773 1772 bp->bio2 = *bi; 1774 - bp->bio2.bi_sector += first_sectors; 1775 - bp->bio2.bi_size -= first_sectors << 9; 1776 - bp->bio1.bi_size = first_sectors << 9; 1773 + bp->bio2.bi_iter.bi_sector += first_sectors; 1774 + bp->bio2.bi_iter.bi_size -= first_sectors << 9; 1775 + bp->bio1.bi_iter.bi_size = first_sectors << 9; 1777 1776 1778 1777 if (bi->bi_vcnt != 0) { 1779 1778 bp->bv1 = *bio_iovec(bi); ··· 1822 1821 int sofar = 0; 1823 1822 1824 1823 size <<= 9; 1825 - if (offset == 0 && size == bio->bi_size) 1824 + if (offset == 0 && size == bio->bi_iter.bi_size) 1826 1825 return; 1827 1826 1828 1827 clear_bit(BIO_SEG_VALID, &bio->bi_flags); 1829 1828 1830 1829 bio_advance(bio, offset << 9); 1831 1830 1832 - bio->bi_size = size; 1831 + bio->bi_iter.bi_size = size; 1833 1832 1834 1833 /* avoid any complications with bi_idx being non-zero*/ 1835 - if (bio->bi_idx) { 1836 - memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, 1837 - (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); 1838 - bio->bi_vcnt -= bio->bi_idx; 1839 - bio->bi_idx = 0; 1834 + if (bio->bi_iter.bi_idx) { 1835 + memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_iter.bi_idx, 1836 + (bio->bi_vcnt - bio->bi_iter.bi_idx) * 1837 + sizeof(struct bio_vec)); 1838 + bio->bi_vcnt -= bio->bi_iter.bi_idx; 1839 + bio->bi_iter.bi_idx = 0; 1840 1840 } 1841 1841 /* Make sure vcnt and last bv are not too big */ 1842 1842 bio_for_each_segment(bvec, bio, i) { ··· 1873 1871 sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue); 1874 1872 sectors = 0; 1875 1873 1876 - if (index >= bio->bi_idx) 1874 + if (index >= bio->bi_iter.bi_idx) 1877 1875 index = bio->bi_vcnt - 1; 1878 1876 1879 1877 bio_for_each_segment_all(bv, bio, i) {
+4 -4
fs/btrfs/check-integrity.c
··· 1695 1695 return -1; 1696 1696 } 1697 1697 bio->bi_bdev = block_ctx->dev->bdev; 1698 - bio->bi_sector = dev_bytenr >> 9; 1698 + bio->bi_iter.bi_sector = dev_bytenr >> 9; 1699 1699 1700 1700 for (j = i; j < num_pages; j++) { 1701 1701 ret = bio_add_page(bio, block_ctx->pagev[j], ··· 3013 3013 int bio_is_patched; 3014 3014 char **mapped_datav; 3015 3015 3016 - dev_bytenr = 512 * bio->bi_sector; 3016 + dev_bytenr = 512 * bio->bi_iter.bi_sector; 3017 3017 bio_is_patched = 0; 3018 3018 if (dev_state->state->print_mask & 3019 3019 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) ··· 3021 3021 "submit_bio(rw=0x%x, bi_vcnt=%u," 3022 3022 " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", 3023 3023 rw, bio->bi_vcnt, 3024 - (unsigned long long)bio->bi_sector, dev_bytenr, 3025 - bio->bi_bdev); 3024 + (unsigned long long)bio->bi_iter.bi_sector, 3025 + dev_bytenr, bio->bi_bdev); 3026 3026 3027 3027 mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt, 3028 3028 GFP_NOFS);
+9 -8
fs/btrfs/compression.c
··· 172 172 goto out; 173 173 174 174 inode = cb->inode; 175 - ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9); 175 + ret = check_compressed_csum(inode, cb, 176 + (u64)bio->bi_iter.bi_sector << 9); 176 177 if (ret) 177 178 goto csum_failed; 178 179 ··· 371 370 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { 372 371 page = compressed_pages[pg_index]; 373 372 page->mapping = inode->i_mapping; 374 - if (bio->bi_size) 373 + if (bio->bi_iter.bi_size) 375 374 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, 376 375 PAGE_CACHE_SIZE, 377 376 bio, 0); ··· 505 504 506 505 if (!em || last_offset < em->start || 507 506 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 508 - (em->block_start >> 9) != cb->orig_bio->bi_sector) { 507 + (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { 509 508 free_extent_map(em); 510 509 unlock_extent(tree, last_offset, end); 511 510 unlock_page(page); ··· 551 550 * in it. We don't actually do IO on those pages but allocate new ones 552 551 * to hold the compressed pages on disk. 553 552 * 554 - * bio->bi_sector points to the compressed extent on disk 553 + * bio->bi_iter.bi_sector points to the compressed extent on disk 555 554 * bio->bi_io_vec points to all of the inode pages 556 555 * bio->bi_vcnt is a count of pages 557 556 * ··· 572 571 struct page *page; 573 572 struct block_device *bdev; 574 573 struct bio *comp_bio; 575 - u64 cur_disk_byte = (u64)bio->bi_sector << 9; 574 + u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; 576 575 u64 em_len; 577 576 u64 em_start; 578 577 struct extent_map *em; ··· 658 657 page->mapping = inode->i_mapping; 659 658 page->index = em_start >> PAGE_CACHE_SHIFT; 660 659 661 - if (comp_bio->bi_size) 660 + if (comp_bio->bi_iter.bi_size) 662 661 ret = tree->ops->merge_bio_hook(READ, page, 0, 663 662 PAGE_CACHE_SIZE, 664 663 comp_bio, 0); ··· 686 685 comp_bio, sums); 687 686 BUG_ON(ret); /* -ENOMEM */ 688 687 } 689 - sums += (comp_bio->bi_size + root->sectorsize - 1) / 690 - root->sectorsize; 688 + sums += (comp_bio->bi_iter.bi_size + 689 + root->sectorsize - 1) / root->sectorsize; 691 690 692 691 ret = btrfs_map_bio(root, READ, comp_bio, 693 692 mirror_num, 0);
+7 -7
fs/btrfs/extent_io.c
··· 1984 1984 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); 1985 1985 if (!bio) 1986 1986 return -EIO; 1987 - bio->bi_size = 0; 1987 + bio->bi_iter.bi_size = 0; 1988 1988 map_length = length; 1989 1989 1990 1990 ret = btrfs_map_block(fs_info, WRITE, logical, ··· 1995 1995 } 1996 1996 BUG_ON(mirror_num != bbio->mirror_num); 1997 1997 sector = bbio->stripes[mirror_num-1].physical >> 9; 1998 - bio->bi_sector = sector; 1998 + bio->bi_iter.bi_sector = sector; 1999 1999 dev = bbio->stripes[mirror_num-1].dev; 2000 2000 kfree(bbio); 2001 2001 if (!dev || !dev->bdev || !dev->writeable) { ··· 2268 2268 return -EIO; 2269 2269 } 2270 2270 bio->bi_end_io = failed_bio->bi_end_io; 2271 - bio->bi_sector = failrec->logical >> 9; 2271 + bio->bi_iter.bi_sector = failrec->logical >> 9; 2272 2272 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 2273 - bio->bi_size = 0; 2273 + bio->bi_iter.bi_size = 0; 2274 2274 2275 2275 btrfs_failed_bio = btrfs_io_bio(failed_bio); 2276 2276 if (btrfs_failed_bio->csum) { ··· 2412 2412 struct inode *inode = page->mapping->host; 2413 2413 2414 2414 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, " 2415 - "mirror=%lu\n", (u64)bio->bi_sector, err, 2415 + "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err, 2416 2416 io_bio->mirror_num); 2417 2417 tree = &BTRFS_I(inode)->io_tree; 2418 2418 ··· 2543 2543 2544 2544 if (bio) { 2545 2545 bio->bi_bdev = bdev; 2546 - bio->bi_sector = first_sector; 2546 + bio->bi_iter.bi_sector = first_sector; 2547 2547 btrfs_bio = btrfs_io_bio(bio); 2548 2548 btrfs_bio->csum = NULL; 2549 2549 btrfs_bio->csum_allocated = NULL; ··· 2637 2637 if (bio_ret && *bio_ret) { 2638 2638 bio = *bio_ret; 2639 2639 if (old_compressed) 2640 - contig = bio->bi_sector == sector; 2640 + contig = bio->bi_iter.bi_sector == sector; 2641 2641 else 2642 2642 contig = bio_end_sector(bio) == sector; 2643 2643
+10 -9
fs/btrfs/file-item.c
··· 182 182 if (!path) 183 183 return -ENOMEM; 184 184 185 - nblocks = bio->bi_size >> inode->i_sb->s_blocksize_bits; 185 + nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; 186 186 if (!dst) { 187 187 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { 188 188 btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size, ··· 201 201 csum = (u8 *)dst; 202 202 } 203 203 204 - if (bio->bi_size > PAGE_CACHE_SIZE * 8) 204 + if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8) 205 205 path->reada = 2; 206 206 207 207 WARN_ON(bio->bi_vcnt <= 0); ··· 217 217 path->skip_locking = 1; 218 218 } 219 219 220 - disk_bytenr = (u64)bio->bi_sector << 9; 220 + disk_bytenr = (u64)bio->bi_iter.bi_sector << 9; 221 221 if (dio) 222 222 offset = logical_offset; 223 223 while (bio_index < bio->bi_vcnt) { ··· 302 302 struct btrfs_dio_private *dip, struct bio *bio, 303 303 u64 offset) 304 304 { 305 - int len = (bio->bi_sector << 9) - dip->disk_bytenr; 305 + int len = (bio->bi_iter.bi_sector << 9) - dip->disk_bytenr; 306 306 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 307 307 int ret; 308 308 ··· 447 447 u64 offset; 448 448 449 449 WARN_ON(bio->bi_vcnt <= 0); 450 - sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS); 450 + sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size), 451 + GFP_NOFS); 451 452 if (!sums) 452 453 return -ENOMEM; 453 454 454 - sums->len = bio->bi_size; 455 + sums->len = bio->bi_iter.bi_size; 455 456 INIT_LIST_HEAD(&sums->list); 456 457 457 458 if (contig) ··· 462 461 463 462 ordered = btrfs_lookup_ordered_extent(inode, offset); 464 463 BUG_ON(!ordered); /* Logic error */ 465 - sums->bytenr = (u64)bio->bi_sector << 9; 464 + sums->bytenr = (u64)bio->bi_iter.bi_sector << 9; 466 465 index = 0; 467 466 468 467 while (bio_index < bio->bi_vcnt) { ··· 477 476 btrfs_add_ordered_sum(inode, ordered, sums); 478 477 btrfs_put_ordered_extent(ordered); 479 478 480 - bytes_left = bio->bi_size - total_bytes; 479 + bytes_left = bio->bi_iter.bi_size - total_bytes; 481 480 482 481 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), 483 482 GFP_NOFS); ··· 485 484 sums->len = bytes_left; 486 485 ordered = btrfs_lookup_ordered_extent(inode, offset); 487 486 BUG_ON(!ordered); /* Logic error */ 488 - sums->bytenr = ((u64)bio->bi_sector << 9) + 487 + sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) + 489 488 total_bytes; 490 489 index = 0; 491 490 }
+12 -10
fs/btrfs/inode.c
··· 1577 1577 unsigned long bio_flags) 1578 1578 { 1579 1579 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 1580 - u64 logical = (u64)bio->bi_sector << 9; 1580 + u64 logical = (u64)bio->bi_iter.bi_sector << 9; 1581 1581 u64 length = 0; 1582 1582 u64 map_length; 1583 1583 int ret; ··· 1585 1585 if (bio_flags & EXTENT_BIO_COMPRESSED) 1586 1586 return 0; 1587 1587 1588 - length = bio->bi_size; 1588 + length = bio->bi_iter.bi_size; 1589 1589 map_length = length; 1590 1590 ret = btrfs_map_block(root->fs_info, rw, logical, 1591 1591 &map_length, NULL, 0); ··· 6894 6894 printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " 6895 6895 "sector %#Lx len %u err no %d\n", 6896 6896 btrfs_ino(dip->inode), bio->bi_rw, 6897 - (unsigned long long)bio->bi_sector, bio->bi_size, err); 6897 + (unsigned long long)bio->bi_iter.bi_sector, 6898 + bio->bi_iter.bi_size, err); 6898 6899 dip->errors = 1; 6899 6900 6900 6901 /* ··· 6986 6985 struct bio *bio; 6987 6986 struct bio *orig_bio = dip->orig_bio; 6988 6987 struct bio_vec *bvec = orig_bio->bi_io_vec; 6989 - u64 start_sector = orig_bio->bi_sector; 6988 + u64 start_sector = orig_bio->bi_iter.bi_sector; 6990 6989 u64 file_offset = dip->logical_offset; 6991 6990 u64 submit_len = 0; 6992 6991 u64 map_length; ··· 6994 6993 int ret = 0; 6995 6994 int async_submit = 0; 6996 6995 6997 - map_length = orig_bio->bi_size; 6996 + map_length = orig_bio->bi_iter.bi_size; 6998 6997 ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, 6999 6998 &map_length, NULL, 0); 7000 6999 if (ret) { ··· 7002 7001 return -EIO; 7003 7002 } 7004 7003 7005 - if (map_length >= orig_bio->bi_size) { 7004 + if (map_length >= orig_bio->bi_iter.bi_size) { 7006 7005 bio = orig_bio; 7007 7006 goto submit; 7008 7007 } ··· 7054 7053 bio->bi_private = dip; 7055 7054 bio->bi_end_io = btrfs_end_dio_bio; 7056 7055 7057 - map_length = orig_bio->bi_size; 7056 + map_length = orig_bio->bi_iter.bi_size; 7058 7057 ret = btrfs_map_block(root->fs_info, rw, 7059 7058 start_sector << 9, 7060 7059 &map_length, NULL, 0); ··· 7112 7111 7113 7112 if (!skip_sum && !write) { 7114 7113 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 7115 - sum_len = dio_bio->bi_size >> inode->i_sb->s_blocksize_bits; 7114 + sum_len = dio_bio->bi_iter.bi_size >> 7115 + inode->i_sb->s_blocksize_bits; 7116 7116 sum_len *= csum_size; 7117 7117 } else { 7118 7118 sum_len = 0; ··· 7128 7126 dip->private = dio_bio->bi_private; 7129 7127 dip->inode = inode; 7130 7128 dip->logical_offset = file_offset; 7131 - dip->bytes = dio_bio->bi_size; 7132 - dip->disk_bytenr = (u64)dio_bio->bi_sector << 9; 7129 + dip->bytes = dio_bio->bi_iter.bi_size; 7130 + dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; 7133 7131 io_bio->bi_private = dip; 7134 7132 dip->errors = 0; 7135 7133 dip->orig_bio = io_bio;
+11 -11
fs/btrfs/raid56.c
··· 1032 1032 1033 1033 /* see if we can add this page onto our existing bio */ 1034 1034 if (last) { 1035 - last_end = (u64)last->bi_sector << 9; 1036 - last_end += last->bi_size; 1035 + last_end = (u64)last->bi_iter.bi_sector << 9; 1036 + last_end += last->bi_iter.bi_size; 1037 1037 1038 1038 /* 1039 1039 * we can't merge these if they are from different ··· 1053 1053 if (!bio) 1054 1054 return -ENOMEM; 1055 1055 1056 - bio->bi_size = 0; 1056 + bio->bi_iter.bi_size = 0; 1057 1057 bio->bi_bdev = stripe->dev->bdev; 1058 - bio->bi_sector = disk_start >> 9; 1058 + bio->bi_iter.bi_sector = disk_start >> 9; 1059 1059 set_bit(BIO_UPTODATE, &bio->bi_flags); 1060 1060 1061 1061 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); ··· 1111 1111 1112 1112 spin_lock_irq(&rbio->bio_list_lock); 1113 1113 bio_list_for_each(bio, &rbio->bio_list) { 1114 - start = (u64)bio->bi_sector << 9; 1114 + start = (u64)bio->bi_iter.bi_sector << 9; 1115 1115 stripe_offset = start - rbio->raid_map[0]; 1116 1116 page_index = stripe_offset >> PAGE_CACHE_SHIFT; 1117 1117 ··· 1272 1272 static int find_bio_stripe(struct btrfs_raid_bio *rbio, 1273 1273 struct bio *bio) 1274 1274 { 1275 - u64 physical = bio->bi_sector; 1275 + u64 physical = bio->bi_iter.bi_sector; 1276 1276 u64 stripe_start; 1277 1277 int i; 1278 1278 struct btrfs_bio_stripe *stripe; ··· 1298 1298 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, 1299 1299 struct bio *bio) 1300 1300 { 1301 - u64 logical = bio->bi_sector; 1301 + u64 logical = bio->bi_iter.bi_sector; 1302 1302 u64 stripe_start; 1303 1303 int i; 1304 1304 ··· 1602 1602 plug_list); 1603 1603 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, 1604 1604 plug_list); 1605 - u64 a_sector = ra->bio_list.head->bi_sector; 1606 - u64 b_sector = rb->bio_list.head->bi_sector; 1605 + u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; 1606 + u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; 1607 1607 1608 1608 if (a_sector < b_sector) 1609 1609 return -1; ··· 1691 1691 if (IS_ERR(rbio)) 1692 1692 return PTR_ERR(rbio); 1693 1693 bio_list_add(&rbio->bio_list, bio); 1694 - rbio->bio_list_bytes = bio->bi_size; 1694 + rbio->bio_list_bytes = bio->bi_iter.bi_size; 1695 1695 1696 1696 /* 1697 1697 * don't plug on full rbios, just get them out the door ··· 2044 2044 2045 2045 rbio->read_rebuild = 1; 2046 2046 bio_list_add(&rbio->bio_list, bio); 2047 - rbio->bio_list_bytes = bio->bi_size; 2047 + rbio->bio_list_bytes = bio->bi_iter.bi_size; 2048 2048 2049 2049 rbio->faila = find_logical_bio_stripe(rbio, bio); 2050 2050 if (rbio->faila == -1) {
+6 -6
fs/btrfs/scrub.c
··· 1308 1308 continue; 1309 1309 } 1310 1310 bio->bi_bdev = page->dev->bdev; 1311 - bio->bi_sector = page->physical >> 9; 1311 + bio->bi_iter.bi_sector = page->physical >> 9; 1312 1312 1313 1313 bio_add_page(bio, page->page, PAGE_SIZE, 0); 1314 1314 if (btrfsic_submit_bio_wait(READ, bio)) ··· 1427 1427 if (!bio) 1428 1428 return -EIO; 1429 1429 bio->bi_bdev = page_bad->dev->bdev; 1430 - bio->bi_sector = page_bad->physical >> 9; 1430 + bio->bi_iter.bi_sector = page_bad->physical >> 9; 1431 1431 1432 1432 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); 1433 1433 if (PAGE_SIZE != ret) { ··· 1520 1520 bio->bi_private = sbio; 1521 1521 bio->bi_end_io = scrub_wr_bio_end_io; 1522 1522 bio->bi_bdev = sbio->dev->bdev; 1523 - bio->bi_sector = sbio->physical >> 9; 1523 + bio->bi_iter.bi_sector = sbio->physical >> 9; 1524 1524 sbio->err = 0; 1525 1525 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != 1526 1526 spage->physical_for_dev_replace || ··· 1926 1926 bio->bi_private = sbio; 1927 1927 bio->bi_end_io = scrub_bio_end_io; 1928 1928 bio->bi_bdev = sbio->dev->bdev; 1929 - bio->bi_sector = sbio->physical >> 9; 1929 + bio->bi_iter.bi_sector = sbio->physical >> 9; 1930 1930 sbio->err = 0; 1931 1931 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != 1932 1932 spage->physical || ··· 3371 3371 spin_unlock(&sctx->stat_lock); 3372 3372 return -ENOMEM; 3373 3373 } 3374 - bio->bi_size = 0; 3375 - bio->bi_sector = physical_for_dev_replace >> 9; 3374 + bio->bi_iter.bi_size = 0; 3375 + bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; 3376 3376 bio->bi_bdev = dev->bdev; 3377 3377 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 3378 3378 if (ret != PAGE_CACHE_SIZE) {
+6 -6
fs/btrfs/volumes.c
··· 5411 5411 if (!q->merge_bvec_fn) 5412 5412 return 1; 5413 5413 5414 - bvm.bi_size = bio->bi_size - prev->bv_len; 5414 + bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len; 5415 5415 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) 5416 5416 return 0; 5417 5417 return 1; ··· 5426 5426 bio->bi_private = bbio; 5427 5427 btrfs_io_bio(bio)->stripe_index = dev_nr; 5428 5428 bio->bi_end_io = btrfs_end_bio; 5429 - bio->bi_sector = physical >> 9; 5429 + bio->bi_iter.bi_sector = physical >> 9; 5430 5430 #ifdef DEBUG 5431 5431 { 5432 5432 struct rcu_string *name; ··· 5464 5464 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) { 5465 5465 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len, 5466 5466 bvec->bv_offset) < bvec->bv_len) { 5467 - u64 len = bio->bi_size; 5467 + u64 len = bio->bi_iter.bi_size; 5468 5468 5469 5469 atomic_inc(&bbio->stripes_pending); 5470 5470 submit_stripe_bio(root, bbio, bio, physical, dev_nr, ··· 5486 5486 bio->bi_private = bbio->private; 5487 5487 bio->bi_end_io = bbio->end_io; 5488 5488 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 5489 - bio->bi_sector = logical >> 9; 5489 + bio->bi_iter.bi_sector = logical >> 9; 5490 5490 kfree(bbio); 5491 5491 bio_endio(bio, -EIO); 5492 5492 } ··· 5497 5497 { 5498 5498 struct btrfs_device *dev; 5499 5499 struct bio *first_bio = bio; 5500 - u64 logical = (u64)bio->bi_sector << 9; 5500 + u64 logical = (u64)bio->bi_iter.bi_sector << 9; 5501 5501 u64 length = 0; 5502 5502 u64 map_length; 5503 5503 u64 *raid_map = NULL; ··· 5506 5506 int total_devs = 1; 5507 5507 struct btrfs_bio *bbio = NULL; 5508 5508 5509 - length = bio->bi_size; 5509 + length = bio->bi_iter.bi_size; 5510 5510 map_length = length; 5511 5511 5512 5512 ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
+6 -6
fs/buffer.c
··· 2982 2982 * let it through, and the IO layer will turn it into 2983 2983 * an EIO. 2984 2984 */ 2985 - if (unlikely(bio->bi_sector >= maxsector)) 2985 + if (unlikely(bio->bi_iter.bi_sector >= maxsector)) 2986 2986 return; 2987 2987 2988 - maxsector -= bio->bi_sector; 2989 - bytes = bio->bi_size; 2988 + maxsector -= bio->bi_iter.bi_sector; 2989 + bytes = bio->bi_iter.bi_size; 2990 2990 if (likely((bytes >> 9) <= maxsector)) 2991 2991 return; 2992 2992 ··· 2994 2994 bytes = maxsector << 9; 2995 2995 2996 2996 /* Truncate the bio.. */ 2997 - bio->bi_size = bytes; 2997 + bio->bi_iter.bi_size = bytes; 2998 2998 bio->bi_io_vec[0].bv_len = bytes; 2999 2999 3000 3000 /* ..and clear the end of the buffer for reads */ ··· 3029 3029 */ 3030 3030 bio = bio_alloc(GFP_NOIO, 1); 3031 3031 3032 - bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 3032 + bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 3033 3033 bio->bi_bdev = bh->b_bdev; 3034 3034 bio->bi_io_vec[0].bv_page = bh->b_page; 3035 3035 bio->bi_io_vec[0].bv_len = bh->b_size; 3036 3036 bio->bi_io_vec[0].bv_offset = bh_offset(bh); 3037 3037 3038 3038 bio->bi_vcnt = 1; 3039 - bio->bi_size = bh->b_size; 3039 + bio->bi_iter.bi_size = bh->b_size; 3040 3040 3041 3041 bio->bi_end_io = end_bio_bh_io_sync; 3042 3042 bio->bi_private = bh;
+2 -2
fs/direct-io.c
··· 375 375 bio = bio_alloc(GFP_KERNEL, nr_vecs); 376 376 377 377 bio->bi_bdev = bdev; 378 - bio->bi_sector = first_sector; 378 + bio->bi_iter.bi_sector = first_sector; 379 379 if (dio->is_async) 380 380 bio->bi_end_io = dio_bio_end_aio; 381 381 else ··· 719 719 if (sdio->bio) { 720 720 loff_t cur_offset = sdio->cur_page_fs_offset; 721 721 loff_t bio_next_offset = sdio->logical_offset_in_bio + 722 - sdio->bio->bi_size; 722 + sdio->bio->bi_iter.bi_size; 723 723 724 724 /* 725 725 * See whether this new request is contiguous with the old.
+2 -2
fs/ext4/page-io.c
··· 298 298 static void ext4_end_bio(struct bio *bio, int error) 299 299 { 300 300 ext4_io_end_t *io_end = bio->bi_private; 301 - sector_t bi_sector = bio->bi_sector; 301 + sector_t bi_sector = bio->bi_iter.bi_sector; 302 302 303 303 BUG_ON(!io_end); 304 304 bio->bi_end_io = NULL; ··· 366 366 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); 367 367 if (!bio) 368 368 return -ENOMEM; 369 - bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 369 + bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 370 370 bio->bi_bdev = bh->b_bdev; 371 371 bio->bi_end_io = ext4_end_bio; 372 372 bio->bi_private = ext4_get_io_end(io->io_end);
+1 -1
fs/f2fs/data.c
··· 386 386 bio = f2fs_bio_alloc(bdev, 1); 387 387 388 388 /* Initialize the bio */ 389 - bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); 389 + bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); 390 390 bio->bi_end_io = read_end_io; 391 391 392 392 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
+1 -1
fs/f2fs/segment.c
··· 682 682 683 683 bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); 684 684 sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks); 685 - sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); 685 + sbi->bio[type]->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); 686 686 sbi->bio[type]->bi_private = priv; 687 687 /* 688 688 * The end_io will be assigned at the sumbission phase.
+1 -1
fs/gfs2/lops.c
··· 272 272 nrvecs = max(nrvecs/2, 1U); 273 273 } 274 274 275 - bio->bi_sector = blkno * (sb->s_blocksize >> 9); 275 + bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9); 276 276 bio->bi_bdev = sb->s_bdev; 277 277 bio->bi_end_io = gfs2_end_log_write; 278 278 bio->bi_private = sdp;
+1 -1
fs/gfs2/ops_fstype.c
··· 224 224 lock_page(page); 225 225 226 226 bio = bio_alloc(GFP_NOFS, 1); 227 - bio->bi_sector = sector * (sb->s_blocksize >> 9); 227 + bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); 228 228 bio->bi_bdev = sb->s_bdev; 229 229 bio_add_page(bio, page, PAGE_SIZE, 0); 230 230
+1 -1
fs/hfsplus/wrapper.c
··· 63 63 sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1); 64 64 65 65 bio = bio_alloc(GFP_NOIO, 1); 66 - bio->bi_sector = sector; 66 + bio->bi_iter.bi_sector = sector; 67 67 bio->bi_bdev = sb->s_bdev; 68 68 69 69 if (!(rw & WRITE) && data)
+6 -6
fs/jfs/jfs_logmgr.c
··· 1998 1998 1999 1999 bio = bio_alloc(GFP_NOFS, 1); 2000 2000 2001 - bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); 2001 + bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); 2002 2002 bio->bi_bdev = log->bdev; 2003 2003 bio->bi_io_vec[0].bv_page = bp->l_page; 2004 2004 bio->bi_io_vec[0].bv_len = LOGPSIZE; 2005 2005 bio->bi_io_vec[0].bv_offset = bp->l_offset; 2006 2006 2007 2007 bio->bi_vcnt = 1; 2008 - bio->bi_size = LOGPSIZE; 2008 + bio->bi_iter.bi_size = LOGPSIZE; 2009 2009 2010 2010 bio->bi_end_io = lbmIODone; 2011 2011 bio->bi_private = bp; 2012 2012 /*check if journaling to disk has been disabled*/ 2013 2013 if (log->no_integrity) { 2014 - bio->bi_size = 0; 2014 + bio->bi_iter.bi_size = 0; 2015 2015 lbmIODone(bio, 0); 2016 2016 } else { 2017 2017 submit_bio(READ_SYNC, bio); ··· 2144 2144 jfs_info("lbmStartIO\n"); 2145 2145 2146 2146 bio = bio_alloc(GFP_NOFS, 1); 2147 - bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); 2147 + bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); 2148 2148 bio->bi_bdev = log->bdev; 2149 2149 bio->bi_io_vec[0].bv_page = bp->l_page; 2150 2150 bio->bi_io_vec[0].bv_len = LOGPSIZE; 2151 2151 bio->bi_io_vec[0].bv_offset = bp->l_offset; 2152 2152 2153 2153 bio->bi_vcnt = 1; 2154 - bio->bi_size = LOGPSIZE; 2154 + bio->bi_iter.bi_size = LOGPSIZE; 2155 2155 2156 2156 bio->bi_end_io = lbmIODone; 2157 2157 bio->bi_private = bp; 2158 2158 2159 2159 /* check if journaling to disk has been disabled */ 2160 2160 if (log->no_integrity) { 2161 - bio->bi_size = 0; 2161 + bio->bi_iter.bi_size = 0; 2162 2162 lbmIODone(bio, 0); 2163 2163 } else { 2164 2164 submit_bio(WRITE_SYNC, bio);
+5 -4
fs/jfs/jfs_metapage.c
··· 416 416 * count from hitting zero before we're through 417 417 */ 418 418 inc_io(page); 419 - if (!bio->bi_size) 419 + if (!bio->bi_iter.bi_size) 420 420 goto dump_bio; 421 421 submit_bio(WRITE, bio); 422 422 nr_underway++; ··· 438 438 439 439 bio = bio_alloc(GFP_NOFS, 1); 440 440 bio->bi_bdev = inode->i_sb->s_bdev; 441 - bio->bi_sector = pblock << (inode->i_blkbits - 9); 441 + bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); 442 442 bio->bi_end_io = metapage_write_end_io; 443 443 bio->bi_private = page; 444 444 ··· 452 452 if (bio) { 453 453 if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes) 454 454 goto add_failed; 455 - if (!bio->bi_size) 455 + if (!bio->bi_iter.bi_size) 456 456 goto dump_bio; 457 457 458 458 submit_bio(WRITE, bio); ··· 517 517 518 518 bio = bio_alloc(GFP_NOFS, 1); 519 519 bio->bi_bdev = inode->i_sb->s_bdev; 520 - bio->bi_sector = pblock << (inode->i_blkbits - 9); 520 + bio->bi_iter.bi_sector = 521 + pblock << (inode->i_blkbits - 9); 521 522 bio->bi_end_io = metapage_read_end_io; 522 523 bio->bi_private = page; 523 524 len = xlen << inode->i_blkbits;
+10 -10
fs/logfs/dev_bdev.c
··· 26 26 bio_vec.bv_len = PAGE_SIZE; 27 27 bio_vec.bv_offset = 0; 28 28 bio.bi_vcnt = 1; 29 - bio.bi_size = PAGE_SIZE; 30 29 bio.bi_bdev = bdev; 31 - bio.bi_sector = page->index * (PAGE_SIZE >> 9); 30 + bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9); 31 + bio.bi_iter.bi_size = PAGE_SIZE; 32 32 33 33 return submit_bio_wait(rw, &bio); 34 34 } ··· 92 92 if (i >= max_pages) { 93 93 /* Block layer cannot split bios :( */ 94 94 bio->bi_vcnt = i; 95 - bio->bi_size = i * PAGE_SIZE; 95 + bio->bi_iter.bi_size = i * PAGE_SIZE; 96 96 bio->bi_bdev = super->s_bdev; 97 - bio->bi_sector = ofs >> 9; 97 + bio->bi_iter.bi_sector = ofs >> 9; 98 98 bio->bi_private = sb; 99 99 bio->bi_end_io = writeseg_end_io; 100 100 atomic_inc(&super->s_pending_writes); ··· 119 119 unlock_page(page); 120 120 } 121 121 bio->bi_vcnt = nr_pages; 122 - bio->bi_size = nr_pages * PAGE_SIZE; 122 + bio->bi_iter.bi_size = nr_pages * PAGE_SIZE; 123 123 bio->bi_bdev = super->s_bdev; 124 - bio->bi_sector = ofs >> 9; 124 + bio->bi_iter.bi_sector = ofs >> 9; 125 125 bio->bi_private = sb; 126 126 bio->bi_end_io = writeseg_end_io; 127 127 atomic_inc(&super->s_pending_writes); ··· 184 184 if (i >= max_pages) { 185 185 /* Block layer cannot split bios :( */ 186 186 bio->bi_vcnt = i; 187 - bio->bi_size = i * PAGE_SIZE; 187 + bio->bi_iter.bi_size = i * PAGE_SIZE; 188 188 bio->bi_bdev = super->s_bdev; 189 - bio->bi_sector = ofs >> 9; 189 + bio->bi_iter.bi_sector = ofs >> 9; 190 190 bio->bi_private = sb; 191 191 bio->bi_end_io = erase_end_io; 192 192 atomic_inc(&super->s_pending_writes); ··· 205 205 bio->bi_io_vec[i].bv_offset = 0; 206 206 } 207 207 bio->bi_vcnt = nr_pages; 208 - bio->bi_size = nr_pages * PAGE_SIZE; 208 + bio->bi_iter.bi_size = nr_pages * PAGE_SIZE; 209 209 bio->bi_bdev = super->s_bdev; 210 - bio->bi_sector = ofs >> 9; 210 + bio->bi_iter.bi_sector = ofs >> 9; 211 211 bio->bi_private = sb; 212 212 bio->bi_end_io = erase_end_io; 213 213 atomic_inc(&super->s_pending_writes);
+1 -1
fs/mpage.c
··· 93 93 94 94 if (bio) { 95 95 bio->bi_bdev = bdev; 96 - bio->bi_sector = first_sector; 96 + bio->bi_iter.bi_sector = first_sector; 97 97 } 98 98 return bio; 99 99 }
+5 -4
fs/nfs/blocklayout/blocklayout.c
··· 134 134 if (bio) { 135 135 get_parallel(bio->bi_private); 136 136 dprintk("%s submitting %s bio %u@%llu\n", __func__, 137 - rw == READ ? "read" : "write", 138 - bio->bi_size, (unsigned long long)bio->bi_sector); 137 + rw == READ ? "read" : "write", bio->bi_iter.bi_size, 138 + (unsigned long long)bio->bi_iter.bi_sector); 139 139 submit_bio(rw, bio); 140 140 } 141 141 return NULL; ··· 156 156 } 157 157 158 158 if (bio) { 159 - bio->bi_sector = isect - be->be_f_offset + be->be_v_offset; 159 + bio->bi_iter.bi_sector = isect - be->be_f_offset + 160 + be->be_v_offset; 160 161 bio->bi_bdev = be->be_mdev; 161 162 bio->bi_end_io = end_io; 162 163 bio->bi_private = par; ··· 512 511 isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) + 513 512 (offset / SECTOR_SIZE); 514 513 515 - bio->bi_sector = isect - be->be_f_offset + be->be_v_offset; 514 + bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset; 516 515 bio->bi_bdev = be->be_mdev; 517 516 bio->bi_end_io = bl_read_single_end_io; 518 517
+2 -1
fs/nilfs2/segbuf.c
··· 416 416 } 417 417 if (likely(bio)) { 418 418 bio->bi_bdev = nilfs->ns_bdev; 419 - bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9); 419 + bio->bi_iter.bi_sector = 420 + start << (nilfs->ns_blocksize_bits - 9); 420 421 } 421 422 return bio; 422 423 }
+1 -1
fs/ocfs2/cluster/heartbeat.c
··· 413 413 } 414 414 415 415 /* Must put everything in 512 byte sectors for the bio... */ 416 - bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9); 416 + bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); 417 417 bio->bi_bdev = reg->hr_bdev; 418 418 bio->bi_private = wc; 419 419 bio->bi_end_io = o2hb_bio_end_io;
+1 -1
fs/xfs/xfs_aops.c
··· 407 407 struct bio *bio = bio_alloc(GFP_NOIO, nvecs); 408 408 409 409 ASSERT(bio->bi_private == NULL); 410 - bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 410 + bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 411 411 bio->bi_bdev = bh->b_bdev; 412 412 return bio; 413 413 }
+2 -2
fs/xfs/xfs_buf.c
··· 1255 1255 1256 1256 bio = bio_alloc(GFP_NOIO, nr_pages); 1257 1257 bio->bi_bdev = bp->b_target->bt_bdev; 1258 - bio->bi_sector = sector; 1258 + bio->bi_iter.bi_sector = sector; 1259 1259 bio->bi_end_io = xfs_buf_bio_end_io; 1260 1260 bio->bi_private = bp; 1261 1261 ··· 1277 1277 total_nr_pages--; 1278 1278 } 1279 1279 1280 - if (likely(bio->bi_size)) { 1280 + if (likely(bio->bi_iter.bi_size)) { 1281 1281 if (xfs_buf_is_vmapped(bp)) { 1282 1282 flush_kernel_vmap_range(bp->b_addr, 1283 1283 xfs_buf_vmap_len(bp));
+8 -8
include/linux/bio.h
··· 62 62 * on highmem page vectors 63 63 */ 64 64 #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) 65 - #define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx) 65 + #define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_iter.bi_idx) 66 66 #define bio_page(bio) bio_iovec((bio))->bv_page 67 67 #define bio_offset(bio) bio_iovec((bio))->bv_offset 68 - #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 69 - #define bio_sectors(bio) ((bio)->bi_size >> 9) 70 - #define bio_end_sector(bio) ((bio)->bi_sector + bio_sectors((bio))) 68 + #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_iter.bi_idx) 69 + #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) 70 + #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) 71 71 72 72 static inline unsigned int bio_cur_bytes(struct bio *bio) 73 73 { 74 74 if (bio->bi_vcnt) 75 75 return bio_iovec(bio)->bv_len; 76 76 else /* dataless requests such as discard */ 77 - return bio->bi_size; 77 + return bio->bi_iter.bi_size; 78 78 } 79 79 80 80 static inline void *bio_data(struct bio *bio) ··· 108 108 */ 109 109 110 110 #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1) 111 - #define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx) 111 + #define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_iter.bi_idx) 112 112 113 113 /* Default implementation of BIOVEC_PHYS_MERGEABLE */ 114 114 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ ··· 150 150 i++) 151 151 152 152 #define bio_for_each_segment(bvl, bio, i) \ 153 - for (i = (bio)->bi_idx; \ 153 + for (i = (bio)->bi_iter.bi_idx; \ 154 154 bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \ 155 155 i++) 156 156 ··· 365 365 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags) 366 366 367 367 #define bio_kmap_irq(bio, flags) \ 368 - __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) 368 + __bio_kmap_irq((bio), (bio)->bi_iter.bi_idx, (flags)) 369 369 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) 370 370 371 371 /*
+12 -7
include/linux/blk_types.h
··· 28 28 unsigned int bv_offset; 29 29 }; 30 30 31 + struct bvec_iter { 32 + sector_t bi_sector; /* device address in 512 byte 33 + sectors */ 34 + unsigned int bi_size; /* residual I/O count */ 35 + 36 + unsigned int bi_idx; /* current index into bvl_vec */ 37 + }; 38 + 31 39 /* 32 40 * main unit of I/O for the block layer and lower layers (ie drivers and 33 41 * stacking drivers) 34 42 */ 35 43 struct bio { 36 - sector_t bi_sector; /* device address in 512 byte 37 - sectors */ 38 44 struct bio *bi_next; /* request queue link */ 39 45 struct block_device *bi_bdev; 40 46 unsigned long bi_flags; /* status, command, etc */ ··· 48 42 * top bits priority 49 43 */ 50 44 51 - unsigned short bi_vcnt; /* how many bio_vec's */ 52 - unsigned short bi_idx; /* current index into bvl_vec */ 45 + struct bvec_iter bi_iter; 53 46 54 47 /* Number of segments in this BIO after 55 48 * physical address coalescing is performed. 56 49 */ 57 50 unsigned int bi_phys_segments; 58 - 59 - unsigned int bi_size; /* residual I/O count */ 60 51 61 52 /* 62 53 * To keep track of the max segment size, we account for the ··· 77 74 struct bio_integrity_payload *bi_integrity; /* data integrity */ 78 75 #endif 79 76 77 + unsigned short bi_vcnt; /* how many bio_vec's */ 78 + 80 79 /* 81 80 * Everything starting with bi_max_vecs will be preserved by bio_reset() 82 81 */ 83 82 84 - unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ 83 + unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ 85 84 86 85 atomic_t bi_cnt; /* pin count */ 87 86
+13 -13
include/trace/events/bcache.h
··· 24 24 __entry->dev = bio->bi_bdev->bd_dev; 25 25 __entry->orig_major = d->disk->major; 26 26 __entry->orig_minor = d->disk->first_minor; 27 - __entry->sector = bio->bi_sector; 28 - __entry->orig_sector = bio->bi_sector - 16; 29 - __entry->nr_sector = bio->bi_size >> 9; 30 - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 27 + __entry->sector = bio->bi_iter.bi_sector; 28 + __entry->orig_sector = bio->bi_iter.bi_sector - 16; 29 + __entry->nr_sector = bio->bi_iter.bi_size >> 9; 30 + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 31 31 ), 32 32 33 33 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", ··· 99 99 100 100 TP_fast_assign( 101 101 __entry->dev = bio->bi_bdev->bd_dev; 102 - __entry->sector = bio->bi_sector; 103 - __entry->nr_sector = bio->bi_size >> 9; 104 - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 102 + __entry->sector = bio->bi_iter.bi_sector; 103 + __entry->nr_sector = bio->bi_iter.bi_size >> 9; 104 + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 105 105 ), 106 106 107 107 TP_printk("%d,%d %s %llu + %u", ··· 134 134 135 135 TP_fast_assign( 136 136 __entry->dev = bio->bi_bdev->bd_dev; 137 - __entry->sector = bio->bi_sector; 138 - __entry->nr_sector = bio->bi_size >> 9; 139 - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 137 + __entry->sector = bio->bi_iter.bi_sector; 138 + __entry->nr_sector = bio->bi_iter.bi_size >> 9; 139 + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 140 140 __entry->cache_hit = hit; 141 141 __entry->bypass = bypass; 142 142 ), ··· 162 162 163 163 TP_fast_assign( 164 164 __entry->dev = bio->bi_bdev->bd_dev; 165 - __entry->sector = bio->bi_sector; 166 - __entry->nr_sector = bio->bi_size >> 9; 167 - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 165 + __entry->sector = bio->bi_iter.bi_sector; 166 + __entry->nr_sector = bio->bi_iter.bi_size >> 9; 167 + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 168 168 __entry->writeback = writeback; 169 169 __entry->bypass = bypass; 170 170 ),
+13 -13
include/trace/events/block.h
··· 243 243 TP_fast_assign( 244 244 __entry->dev = bio->bi_bdev ? 245 245 bio->bi_bdev->bd_dev : 0; 246 - __entry->sector = bio->bi_sector; 246 + __entry->sector = bio->bi_iter.bi_sector; 247 247 __entry->nr_sector = bio_sectors(bio); 248 - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 248 + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 249 249 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 250 250 ), 251 251 ··· 280 280 281 281 TP_fast_assign( 282 282 __entry->dev = bio->bi_bdev->bd_dev; 283 - __entry->sector = bio->bi_sector; 283 + __entry->sector = bio->bi_iter.bi_sector; 284 284 __entry->nr_sector = bio_sectors(bio); 285 285 __entry->error = error; 286 - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 286 + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 287 287 ), 288 288 289 289 TP_printk("%d,%d %s %llu + %u [%d]", ··· 308 308 309 309 TP_fast_assign( 310 310 __entry->dev = bio->bi_bdev->bd_dev; 311 - __entry->sector = bio->bi_sector; 311 + __entry->sector = bio->bi_iter.bi_sector; 312 312 __entry->nr_sector = bio_sectors(bio); 313 - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 313 + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 314 314 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 315 315 ), 316 316 ··· 375 375 376 376 TP_fast_assign( 377 377 __entry->dev = bio->bi_bdev->bd_dev; 378 - __entry->sector = bio->bi_sector; 378 + __entry->sector = bio->bi_iter.bi_sector; 379 379 __entry->nr_sector = bio_sectors(bio); 380 - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 380 + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 381 381 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 382 382 ), 383 383 ··· 403 403 404 404 TP_fast_assign( 405 405 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; 406 - __entry->sector = bio ? bio->bi_sector : 0; 406 + __entry->sector = bio ? bio->bi_iter.bi_sector : 0; 407 407 __entry->nr_sector = bio ? bio_sectors(bio) : 0; 408 408 blk_fill_rwbs(__entry->rwbs, 409 409 bio ? bio->bi_rw : 0, __entry->nr_sector); ··· 538 538 539 539 TP_fast_assign( 540 540 __entry->dev = bio->bi_bdev->bd_dev; 541 - __entry->sector = bio->bi_sector; 541 + __entry->sector = bio->bi_iter.bi_sector; 542 542 __entry->new_sector = new_sector; 543 - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 543 + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 544 544 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 545 545 ), 546 546 ··· 579 579 580 580 TP_fast_assign( 581 581 __entry->dev = bio->bi_bdev->bd_dev; 582 - __entry->sector = bio->bi_sector; 582 + __entry->sector = bio->bi_iter.bi_sector; 583 583 __entry->nr_sector = bio_sectors(bio); 584 584 __entry->old_dev = dev; 585 585 __entry->old_sector = from; 586 - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 586 + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 587 587 ), 588 588 589 589 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
+2 -2
include/trace/events/f2fs.h
··· 616 616 __entry->dev = sb->s_dev; 617 617 __entry->btype = btype; 618 618 __entry->sync = sync; 619 - __entry->sector = bio->bi_sector; 620 - __entry->size = bio->bi_size; 619 + __entry->sector = bio->bi_iter.bi_sector; 620 + __entry->size = bio->bi_iter.bi_size; 621 621 ), 622 622 623 623 TP_printk("dev = (%d,%d), type = %s, io = %s, sector = %lld, size = %u",
+1 -1
kernel/power/block_io.c
··· 32 32 struct bio *bio; 33 33 34 34 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); 35 - bio->bi_sector = sector; 35 + bio->bi_iter.bi_sector = sector; 36 36 bio->bi_bdev = bdev; 37 37 bio->bi_end_io = end_swap_bio_read; 38 38
+8 -7
kernel/trace/blktrace.c
··· 781 781 if (!error && !bio_flagged(bio, BIO_UPTODATE)) 782 782 error = EIO; 783 783 784 - __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, 785 - error, 0, NULL); 784 + __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 785 + bio->bi_rw, what, error, 0, NULL); 786 786 } 787 787 788 788 static void blk_add_trace_bio_bounce(void *ignore, ··· 885 885 if (bt) { 886 886 __be64 rpdu = cpu_to_be64(pdu); 887 887 888 - __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, 889 - BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE), 888 + __blk_add_trace(bt, bio->bi_iter.bi_sector, 889 + bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT, 890 + !bio_flagged(bio, BIO_UPTODATE), 890 891 sizeof(rpdu), &rpdu); 891 892 } 892 893 } ··· 919 918 r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev); 920 919 r.sector_from = cpu_to_be64(from); 921 920 922 - __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, 923 - BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), 924 - sizeof(r), &r); 921 + __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 922 + bio->bi_rw, BLK_TA_REMAP, 923 + !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); 925 924 } 926 925 927 926 /**
+5 -5
mm/page_io.c
··· 31 31 32 32 bio = bio_alloc(gfp_flags, 1); 33 33 if (bio) { 34 - bio->bi_sector = map_swap_page(page, &bio->bi_bdev); 35 - bio->bi_sector <<= PAGE_SHIFT - 9; 34 + bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); 35 + bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; 36 36 bio->bi_io_vec[0].bv_page = page; 37 37 bio->bi_io_vec[0].bv_len = PAGE_SIZE; 38 38 bio->bi_io_vec[0].bv_offset = 0; 39 39 bio->bi_vcnt = 1; 40 - bio->bi_size = PAGE_SIZE; 40 + bio->bi_iter.bi_size = PAGE_SIZE; 41 41 bio->bi_end_io = end_io; 42 42 } 43 43 return bio; ··· 62 62 printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n", 63 63 imajor(bio->bi_bdev->bd_inode), 64 64 iminor(bio->bi_bdev->bd_inode), 65 - (unsigned long long)bio->bi_sector); 65 + (unsigned long long)bio->bi_iter.bi_sector); 66 66 ClearPageReclaim(page); 67 67 } 68 68 end_page_writeback(page); ··· 80 80 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", 81 81 imajor(bio->bi_bdev->bd_inode), 82 82 iminor(bio->bi_bdev->bd_inode), 83 - (unsigned long long)bio->bi_sector); 83 + (unsigned long long)bio->bi_iter.bi_sector); 84 84 goto out; 85 85 } 86 86