Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: replace bi_bdev with a gendisk pointer and partitions index

This way we don't need a block_device structure to submit I/O. The
block_device has different life time rules from the gendisk and
request_queue and is usually only available when the block device node
is open. Other callers need to explicitly create one (e.g. the lightnvm
passthrough code, or the new nvme multipathing code).

For the actual I/O path all that we need is the gendisk, which exists
once per block device. But given that the block layer also does
partition remapping we additionally need a partition index, which is
used for said remapping in generic_make_request.

Note that all the block drivers generally want request_queue or
sometimes the gendisk, so this removes a layer of indirection all
over the stack.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
74d46992 c2ee070f

+358 -357
+1 -1
arch/powerpc/sysdev/axonram.c
··· 110 110 static blk_qc_t 111 111 axon_ram_make_request(struct request_queue *queue, struct bio *bio) 112 112 { 113 - struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; 113 + struct axon_ram_bank *bank = bio->bi_disk->private_data; 114 114 unsigned long phys_mem, phys_end; 115 115 void *user_mem; 116 116 struct bio_vec vec;
+8 -10
block/bio-integrity.c
··· 146 146 iv = bip->bip_vec + bip->bip_vcnt; 147 147 148 148 if (bip->bip_vcnt && 149 - bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev), 149 + bvec_gap_to_prev(bio->bi_disk->queue, 150 150 &bip->bip_vec[bip->bip_vcnt - 1], offset)) 151 151 return 0; 152 152 ··· 190 190 static blk_status_t bio_integrity_process(struct bio *bio, 191 191 struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn) 192 192 { 193 - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 193 + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 194 194 struct blk_integrity_iter iter; 195 195 struct bvec_iter bviter; 196 196 struct bio_vec bv; ··· 199 199 void *prot_buf = page_address(bip->bip_vec->bv_page) + 200 200 bip->bip_vec->bv_offset; 201 201 202 - iter.disk_name = bio->bi_bdev->bd_disk->disk_name; 202 + iter.disk_name = bio->bi_disk->disk_name; 203 203 iter.interval = 1 << bi->interval_exp; 204 204 iter.seed = proc_iter->bi_sector; 205 205 iter.prot_buf = prot_buf; ··· 236 236 bool bio_integrity_prep(struct bio *bio) 237 237 { 238 238 struct bio_integrity_payload *bip; 239 - struct blk_integrity *bi; 240 - struct request_queue *q; 239 + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 240 + struct request_queue *q = bio->bi_disk->queue; 241 241 void *buf; 242 242 unsigned long start, end; 243 243 unsigned int len, nr_pages; ··· 245 245 unsigned int intervals; 246 246 blk_status_t status; 247 247 248 - bi = bdev_get_integrity(bio->bi_bdev); 249 248 if (!bi) 250 249 return true; 251 250 252 - q = bdev_get_queue(bio->bi_bdev); 253 251 if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE) 254 252 return true; 255 253 ··· 352 354 struct bio_integrity_payload *bip = 353 355 container_of(work, struct bio_integrity_payload, bip_work); 354 356 struct bio *bio = bip->bip_bio; 355 - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 357 + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 356 358 struct bvec_iter iter = bio->bi_iter; 357 359 358 360 /* ··· 409 411 void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) 410 412 { 411 413 struct bio_integrity_payload *bip = bio_integrity(bio); 412 - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 414 + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 413 415 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); 414 416 415 417 bip->bip_iter.bi_sector += bytes_done >> 9; ··· 426 428 void bio_integrity_trim(struct bio *bio) 427 429 { 428 430 struct bio_integrity_payload *bip = bio_integrity(bio); 429 - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 431 + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 430 432 431 433 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); 432 434 }
+5 -5
block/bio.c
··· 593 593 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio)); 594 594 595 595 /* 596 - * most users will be overriding ->bi_bdev with a new target, 596 + * most users will be overriding ->bi_disk with a new target, 597 597 * so we don't set nor calculate new physical/hw segment counts here 598 598 */ 599 - bio->bi_bdev = bio_src->bi_bdev; 599 + bio->bi_disk = bio_src->bi_disk; 600 600 bio_set_flag(bio, BIO_CLONED); 601 601 bio->bi_opf = bio_src->bi_opf; 602 602 bio->bi_write_hint = bio_src->bi_write_hint; ··· 681 681 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); 682 682 if (!bio) 683 683 return NULL; 684 - bio->bi_bdev = bio_src->bi_bdev; 684 + bio->bi_disk = bio_src->bi_disk; 685 685 bio->bi_opf = bio_src->bi_opf; 686 686 bio->bi_write_hint = bio_src->bi_write_hint; 687 687 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; ··· 1830 1830 goto again; 1831 1831 } 1832 1832 1833 - if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { 1834 - trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio, 1833 + if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) { 1834 + trace_block_bio_complete(bio->bi_disk->queue, bio, 1835 1835 blk_status_to_errno(bio->bi_status)); 1836 1836 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 1837 1837 }
+49 -51
block/blk-core.c
··· 1910 1910 return BLK_QC_T_NONE; 1911 1911 } 1912 1912 1913 - /* 1914 - * If bio->bi_dev is a partition, remap the location 1915 - */ 1916 - static inline void blk_partition_remap(struct bio *bio) 1917 - { 1918 - struct block_device *bdev = bio->bi_bdev; 1919 - 1920 - /* 1921 - * Zone reset does not include bi_size so bio_sectors() is always 0. 1922 - * Include a test for the reset op code and perform the remap if needed. 1923 - */ 1924 - if (bdev != bdev->bd_contains && 1925 - (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)) { 1926 - struct hd_struct *p = bdev->bd_part; 1927 - 1928 - bio->bi_iter.bi_sector += p->start_sect; 1929 - bio->bi_bdev = bdev->bd_contains; 1930 - 1931 - trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1932 - bdev->bd_dev, 1933 - bio->bi_iter.bi_sector - p->start_sect); 1934 - } 1935 - } 1936 - 1937 1913 static void handle_bad_sector(struct bio *bio) 1938 1914 { 1939 1915 char b[BDEVNAME_SIZE]; 1940 1916 1941 1917 printk(KERN_INFO "attempt to access beyond end of device\n"); 1942 1918 printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", 1943 - bdevname(bio->bi_bdev, b), 1944 - bio->bi_opf, 1919 + bio_devname(bio, b), bio->bi_opf, 1945 1920 (unsigned long long)bio_end_sector(bio), 1946 - (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); 1921 + (long long)get_capacity(bio->bi_disk)); 1947 1922 } 1948 1923 1949 1924 #ifdef CONFIG_FAIL_MAKE_REQUEST ··· 1957 1982 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1958 1983 1959 1984 /* 1985 + * Remap block n of partition p to block n+start(p) of the disk. 1986 + */ 1987 + static inline int blk_partition_remap(struct bio *bio) 1988 + { 1989 + struct hd_struct *p; 1990 + int ret = 0; 1991 + 1992 + /* 1993 + * Zone reset does not include bi_size so bio_sectors() is always 0. 1994 + * Include a test for the reset op code and perform the remap if needed. 1995 + */ 1996 + if (!bio->bi_partno || 1997 + (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET)) 1998 + return 0; 1999 + 2000 + rcu_read_lock(); 2001 + p = __disk_get_part(bio->bi_disk, bio->bi_partno); 2002 + if (likely(p && !should_fail_request(p, bio->bi_iter.bi_size))) { 2003 + bio->bi_iter.bi_sector += p->start_sect; 2004 + bio->bi_partno = 0; 2005 + trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p), 2006 + bio->bi_iter.bi_sector - p->start_sect); 2007 + } else { 2008 + printk("%s: fail for partition %d\n", __func__, bio->bi_partno); 2009 + ret = -EIO; 2010 + } 2011 + rcu_read_unlock(); 2012 + 2013 + return ret; 2014 + } 2015 + 2016 + /* 1960 2017 * Check whether this bio extends beyond the end of the device. 1961 2018 */ 1962 2019 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) ··· 1999 1992 return 0; 2000 1993 2001 1994 /* Test device or partition size, when known. */ 2002 - maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1995 + maxsector = get_capacity(bio->bi_disk); 2003 1996 if (maxsector) { 2004 1997 sector_t sector = bio->bi_iter.bi_sector; 2005 1998 ··· 2024 2017 int nr_sectors = bio_sectors(bio); 2025 2018 blk_status_t status = BLK_STS_IOERR; 2026 2019 char b[BDEVNAME_SIZE]; 2027 - struct hd_struct *part; 2028 2020 2029 2021 might_sleep(); 2030 2022 2031 2023 if (bio_check_eod(bio, nr_sectors)) 2032 2024 goto end_io; 2033 2025 2034 - q = bdev_get_queue(bio->bi_bdev); 2026 + q = bio->bi_disk->queue; 2035 2027 if (unlikely(!q)) { 2036 2028 printk(KERN_ERR 2037 2029 "generic_make_request: Trying to access " 2038 2030 "nonexistent block-device %s (%Lu)\n", 2039 - bdevname(bio->bi_bdev, b), 2040 - (long long) bio->bi_iter.bi_sector); 2031 + bio_devname(bio, b), (long long)bio->bi_iter.bi_sector); 2041 2032 goto end_io; 2042 2033 } 2043 2034 ··· 2047 2042 if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q)) 2048 2043 goto not_supported; 2049 2044 2050 - part = bio->bi_bdev->bd_part; 2051 - if (should_fail_request(part, bio->bi_iter.bi_size) || 2052 - should_fail_request(&part_to_disk(part)->part0, 2053 - bio->bi_iter.bi_size)) 2045 + if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size)) 2054 2046 goto end_io; 2055 2047 2056 - /* 2057 - * If this device has partitions, remap block n 2058 - * of partition p to block n+start(p) of the disk. 2059 - */ 2060 - blk_partition_remap(bio); 2048 + if (blk_partition_remap(bio)) 2049 + goto end_io; 2061 2050 2062 2051 if (bio_check_eod(bio, nr_sectors)) 2063 2052 goto end_io; ··· 2080 2081 goto not_supported; 2081 2082 break; 2082 2083 case REQ_OP_WRITE_SAME: 2083 - if (!bdev_write_same(bio->bi_bdev)) 2084 + if (!q->limits.max_write_same_sectors) 2084 2085 goto not_supported; 2085 2086 break; 2086 2087 case REQ_OP_ZONE_REPORT: 2087 2088 case REQ_OP_ZONE_RESET: 2088 - if (!bdev_is_zoned(bio->bi_bdev)) 2089 + if (!blk_queue_is_zoned(q)) 2089 2090 goto not_supported; 2090 2091 break; 2091 2092 case REQ_OP_WRITE_ZEROES: 2092 - if (!bdev_write_zeroes_sectors(bio->bi_bdev)) 2093 + if (!q->limits.max_write_zeroes_sectors) 2093 2094 goto not_supported; 2094 2095 break; 2095 2096 default: ··· 2196 2197 bio_list_init(&bio_list_on_stack[0]); 2197 2198 current->bio_list = bio_list_on_stack; 2198 2199 do { 2199 - struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2200 + struct request_queue *q = bio->bi_disk->queue; 2200 2201 2201 2202 if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) { 2202 2203 struct bio_list lower, same; ··· 2214 2215 bio_list_init(&lower); 2215 2216 bio_list_init(&same); 2216 2217 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) 2217 - if (q == bdev_get_queue(bio->bi_bdev)) 2218 + if (q == bio->bi_disk->queue) 2218 2219 bio_list_add(&same, bio); 2219 2220 else 2220 2221 bio_list_add(&lower, bio); ··· 2257 2258 unsigned int count; 2258 2259 2259 2260 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 2260 - count = bdev_logical_block_size(bio->bi_bdev) >> 9; 2261 + count = queue_logical_block_size(bio->bi_disk->queue); 2261 2262 else 2262 2263 count = bio_sectors(bio); 2263 2264 ··· 2274 2275 current->comm, task_pid_nr(current), 2275 2276 op_is_write(bio_op(bio)) ? "WRITE" : "READ", 2276 2277 (unsigned long long)bio->bi_iter.bi_sector, 2277 - bdevname(bio->bi_bdev, b), 2278 - count); 2278 + bio_devname(bio, b), count); 2279 2279 } 2280 2280 } 2281 2281 ··· 3047 3049 rq->__data_len = bio->bi_iter.bi_size; 3048 3050 rq->bio = rq->biotail = bio; 3049 3051 3050 - if (bio->bi_bdev) 3051 - rq->rq_disk = bio->bi_bdev->bd_disk; 3052 + if (bio->bi_disk) 3053 + rq->rq_disk = bio->bi_disk; 3052 3054 } 3053 3055 3054 3056 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+1 -1
block/blk-flush.c
··· 525 525 return -ENXIO; 526 526 527 527 bio = bio_alloc(gfp_mask, 0); 528 - bio->bi_bdev = bdev; 528 + bio_set_dev(bio, bdev); 529 529 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 530 530 531 531 ret = submit_bio_wait(bio);
+4 -4
block/blk-lib.c
··· 77 77 78 78 bio = next_bio(bio, 0, gfp_mask); 79 79 bio->bi_iter.bi_sector = sector; 80 - bio->bi_bdev = bdev; 80 + bio_set_dev(bio, bdev); 81 81 bio_set_op_attrs(bio, op, 0); 82 82 83 83 bio->bi_iter.bi_size = req_sects << 9; ··· 168 168 while (nr_sects) { 169 169 bio = next_bio(bio, 1, gfp_mask); 170 170 bio->bi_iter.bi_sector = sector; 171 - bio->bi_bdev = bdev; 171 + bio_set_dev(bio, bdev); 172 172 bio->bi_vcnt = 1; 173 173 bio->bi_io_vec->bv_page = page; 174 174 bio->bi_io_vec->bv_offset = 0; ··· 241 241 while (nr_sects) { 242 242 bio = next_bio(bio, 0, gfp_mask); 243 243 bio->bi_iter.bi_sector = sector; 244 - bio->bi_bdev = bdev; 244 + bio_set_dev(bio, bdev); 245 245 bio->bi_opf = REQ_OP_WRITE_ZEROES; 246 246 if (flags & BLKDEV_ZERO_NOUNMAP) 247 247 bio->bi_opf |= REQ_NOUNMAP; ··· 323 323 bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), 324 324 gfp_mask); 325 325 bio->bi_iter.bi_sector = sector; 326 - bio->bi_bdev = bdev; 326 + bio_set_dev(bio, bdev); 327 327 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 328 328 329 329 while (nr_sects != 0) {
+1 -1
block/blk-merge.c
··· 786 786 return false; 787 787 788 788 /* must be same device and not a special request */ 789 - if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) 789 + if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq)) 790 790 return false; 791 791 792 792 /* only merge integrity protected bio into ditto rq */
+2 -2
block/blk-zoned.c
··· 116 116 if (!bio) 117 117 return -ENOMEM; 118 118 119 - bio->bi_bdev = bdev; 119 + bio_set_dev(bio, bdev); 120 120 bio->bi_iter.bi_sector = blk_zone_start(q, sector); 121 121 bio_set_op_attrs(bio, REQ_OP_ZONE_REPORT, 0); 122 122 ··· 234 234 235 235 bio = bio_alloc(gfp_mask, 0); 236 236 bio->bi_iter.bi_sector = sector; 237 - bio->bi_bdev = bdev; 237 + bio_set_dev(bio, bdev); 238 238 bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0); 239 239 240 240 ret = submit_bio_wait(bio);
+2 -3
drivers/block/brd.c
··· 294 294 295 295 static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) 296 296 { 297 - struct block_device *bdev = bio->bi_bdev; 298 - struct brd_device *brd = bdev->bd_disk->private_data; 297 + struct brd_device *brd = bio->bi_disk->private_data; 299 298 struct bio_vec bvec; 300 299 sector_t sector; 301 300 struct bvec_iter iter; 302 301 303 302 sector = bio->bi_iter.bi_sector; 304 - if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) 303 + if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) 305 304 goto io_error; 306 305 307 306 bio_for_each_segment(bvec, bio, iter) {
+1 -1
drivers/block/drbd/drbd_actlog.c
··· 151 151 op_flags |= REQ_SYNC; 152 152 153 153 bio = bio_alloc_drbd(GFP_NOIO); 154 - bio->bi_bdev = bdev->md_bdev; 154 + bio_set_dev(bio, bdev->md_bdev); 155 155 bio->bi_iter.bi_sector = sector; 156 156 err = -EIO; 157 157 if (bio_add_page(bio, device->md_io.page, size, 0) != size)
+1 -1
drivers/block/drbd/drbd_bitmap.c
··· 1019 1019 bm_store_page_idx(page, page_nr); 1020 1020 } else 1021 1021 page = b->bm_pages[page_nr]; 1022 - bio->bi_bdev = device->ldev->md_bdev; 1022 + bio_set_dev(bio, device->ldev->md_bdev); 1023 1023 bio->bi_iter.bi_sector = on_disk_sector; 1024 1024 /* bio_add_page of a single page to an empty bio will always succeed, 1025 1025 * according to api. Do we want to assert that? */
+2 -2
drivers/block/drbd/drbd_int.h
··· 1628 1628 int fault_type, struct bio *bio) 1629 1629 { 1630 1630 __release(local); 1631 - if (!bio->bi_bdev) { 1632 - drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n"); 1631 + if (!bio->bi_disk) { 1632 + drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n"); 1633 1633 bio->bi_status = BLK_STS_IOERR; 1634 1634 bio_endio(bio); 1635 1635 return;
+2 -2
drivers/block/drbd/drbd_receiver.c
··· 1265 1265 1266 1266 octx->device = device; 1267 1267 octx->ctx = ctx; 1268 - bio->bi_bdev = device->ldev->backing_bdev; 1268 + bio_set_dev(bio, device->ldev->backing_bdev); 1269 1269 bio->bi_private = octx; 1270 1270 bio->bi_end_io = one_flush_endio; 1271 1271 bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH; ··· 1548 1548 } 1549 1549 /* > peer_req->i.sector, unless this is the first bio */ 1550 1550 bio->bi_iter.bi_sector = sector; 1551 - bio->bi_bdev = device->ldev->backing_bdev; 1551 + bio_set_dev(bio, device->ldev->backing_bdev); 1552 1552 bio_set_op_attrs(bio, op, op_flags); 1553 1553 bio->bi_private = peer_req; 1554 1554 bio->bi_end_io = drbd_peer_request_endio;
+1 -1
drivers/block/drbd/drbd_req.c
··· 1179 1179 else 1180 1180 type = DRBD_FAULT_DT_RD; 1181 1181 1182 - bio->bi_bdev = device->ldev->backing_bdev; 1182 + bio_set_dev(bio, device->ldev->backing_bdev); 1183 1183 1184 1184 /* State may have changed since we grabbed our reference on the 1185 1185 * ->ldev member. Double check, and short-circuit to endio.
+1 -1
drivers/block/drbd/drbd_worker.c
··· 1513 1513 drbd_al_begin_io(device, &req->i); 1514 1514 1515 1515 drbd_req_make_private_bio(req, req->master_bio); 1516 - req->private_bio->bi_bdev = device->ldev->backing_bdev; 1516 + bio_set_dev(req->private_bio, device->ldev->backing_bdev); 1517 1517 generic_make_request(req->private_bio); 1518 1518 1519 1519 return 0;
+1 -1
drivers/block/floppy.c
··· 4134 4134 cbdata.drive = drive; 4135 4135 4136 4136 bio_init(&bio, &bio_vec, 1); 4137 - bio.bi_bdev = bdev; 4137 + bio_set_dev(&bio, bdev); 4138 4138 bio_add_page(&bio, page, size, 0); 4139 4139 4140 4140 bio.bi_iter.bi_sector = 0;
+5 -6
drivers/block/pktcdvd.c
··· 1028 1028 bio = pkt->r_bios[f]; 1029 1029 bio_reset(bio); 1030 1030 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); 1031 - bio->bi_bdev = pd->bdev; 1031 + bio_set_dev(bio, pd->bdev); 1032 1032 bio->bi_end_io = pkt_end_io_read; 1033 1033 bio->bi_private = pkt; 1034 1034 ··· 1122 1122 pkt->sector = new_sector; 1123 1123 1124 1124 bio_reset(pkt->bio); 1125 - pkt->bio->bi_bdev = pd->bdev; 1125 + bio_set_set(pkt->bio, pd->bdev); 1126 1126 bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0); 1127 1127 pkt->bio->bi_iter.bi_sector = new_sector; 1128 1128 pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; ··· 1267 1267 1268 1268 bio_reset(pkt->w_bio); 1269 1269 pkt->w_bio->bi_iter.bi_sector = pkt->sector; 1270 - pkt->w_bio->bi_bdev = pd->bdev; 1270 + bio_set_dev(pkt->w_bio, pd->bdev); 1271 1271 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; 1272 1272 pkt->w_bio->bi_private = pkt; 1273 1273 ··· 2314 2314 2315 2315 psd->pd = pd; 2316 2316 psd->bio = bio; 2317 - cloned_bio->bi_bdev = pd->bdev; 2317 + bio_set_dev(cloned_bio, pd->bdev); 2318 2318 cloned_bio->bi_private = psd; 2319 2319 cloned_bio->bi_end_io = pkt_end_io_read_cloned; 2320 2320 pd->stats.secs_r += bio_sectors(bio); ··· 2415 2415 2416 2416 pd = q->queuedata; 2417 2417 if (!pd) { 2418 - pr_err("%s incorrect request queue\n", 2419 - bdevname(bio->bi_bdev, b)); 2418 + pr_err("%s incorrect request queue\n", bio_devname(bio, b)); 2420 2419 goto end_io; 2421 2420 } 2422 2421
+2 -2
drivers/block/xen-blkback/blkback.c
··· 1363 1363 goto fail_put_bio; 1364 1364 1365 1365 biolist[nbio++] = bio; 1366 - bio->bi_bdev = preq.bdev; 1366 + bio_set_dev(bio, preq.bdev); 1367 1367 bio->bi_private = pending_req; 1368 1368 bio->bi_end_io = end_block_io_op; 1369 1369 bio->bi_iter.bi_sector = preq.sector_number; ··· 1382 1382 goto fail_put_bio; 1383 1383 1384 1384 biolist[nbio++] = bio; 1385 - bio->bi_bdev = preq.bdev; 1385 + bio_set_dev(bio, preq.bdev); 1386 1386 bio->bi_private = pending_req; 1387 1387 bio->bi_end_io = end_block_io_op; 1388 1388 bio_set_op_attrs(bio, operation, operation_flags);
+1 -1
drivers/md/bcache/debug.c
··· 49 49 v->keys.ops = b->keys.ops; 50 50 51 51 bio = bch_bbio_alloc(b->c); 52 - bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; 52 + bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev); 53 53 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); 54 54 bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; 55 55 bio->bi_opf = REQ_OP_READ | REQ_META;
+1 -1
drivers/md/bcache/io.c
··· 34 34 struct bbio *b = container_of(bio, struct bbio, bio); 35 35 36 36 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); 37 - bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; 37 + bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev); 38 38 39 39 b->submit_time_us = local_clock_us(); 40 40 closure_bio_submit(bio, bio->bi_private);
+3 -3
drivers/md/bcache/journal.c
··· 53 53 54 54 bio_reset(bio); 55 55 bio->bi_iter.bi_sector = bucket + offset; 56 - bio->bi_bdev = ca->bdev; 56 + bio_set_dev(bio, ca->bdev); 57 57 bio->bi_iter.bi_size = len << 9; 58 58 59 59 bio->bi_end_io = journal_read_endio; ··· 452 452 bio_set_op_attrs(bio, REQ_OP_DISCARD, 0); 453 453 bio->bi_iter.bi_sector = bucket_to_sector(ca->set, 454 454 ca->sb.d[ja->discard_idx]); 455 - bio->bi_bdev = ca->bdev; 455 + bio_set_dev(bio, ca->bdev); 456 456 bio->bi_iter.bi_size = bucket_bytes(ca); 457 457 bio->bi_end_io = journal_discard_endio; 458 458 ··· 623 623 624 624 bio_reset(bio); 625 625 bio->bi_iter.bi_sector = PTR_OFFSET(k, i); 626 - bio->bi_bdev = ca->bdev; 626 + bio_set_dev(bio, ca->bdev); 627 627 bio->bi_iter.bi_size = sectors << 9; 628 628 629 629 bio->bi_end_io = journal_write_endio;
+8 -8
drivers/md/bcache/request.c
··· 607 607 static void bio_complete(struct search *s) 608 608 { 609 609 if (s->orig_bio) { 610 - struct request_queue *q = bdev_get_queue(s->orig_bio->bi_bdev); 610 + struct request_queue *q = s->orig_bio->bi_disk->queue; 611 611 generic_end_io_acct(q, bio_data_dir(s->orig_bio), 612 612 &s->d->disk->part0, s->start_time); 613 613 ··· 735 735 if (s->iop.bio) { 736 736 bio_reset(s->iop.bio); 737 737 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; 738 - s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; 738 + bio_copy_dev(s->iop.bio, s->cache_miss); 739 739 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; 740 740 bch_bio_map(s->iop.bio, NULL); 741 741 ··· 794 794 !(bio->bi_opf & REQ_META) && 795 795 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) 796 796 reada = min_t(sector_t, dc->readahead >> 9, 797 - bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); 797 + get_capacity(bio->bi_disk) - bio_end_sector(bio)); 798 798 799 799 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); 800 800 ··· 820 820 goto out_submit; 821 821 822 822 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; 823 - cache_bio->bi_bdev = miss->bi_bdev; 823 + bio_copy_dev(cache_bio, miss); 824 824 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; 825 825 826 826 cache_bio->bi_end_io = request_endio; ··· 919 919 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, 920 920 dc->disk.bio_split); 921 921 922 - flush->bi_bdev = bio->bi_bdev; 922 + bio_copy_dev(flush, bio); 923 923 flush->bi_end_io = request_endio; 924 924 flush->bi_private = cl; 925 925 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; ··· 956 956 struct bio *bio) 957 957 { 958 958 struct search *s; 959 - struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; 959 + struct bcache_device *d = bio->bi_disk->private_data; 960 960 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 961 961 int rw = bio_data_dir(bio); 962 962 963 963 generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); 964 964 965 - bio->bi_bdev = dc->bdev; 965 + bio_set_dev(bio, dc->bdev); 966 966 bio->bi_iter.bi_sector += dc->sb.data_offset; 967 967 968 968 if (cached_dev_get(dc)) { ··· 1072 1072 { 1073 1073 struct search *s; 1074 1074 struct closure *cl; 1075 - struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; 1075 + struct bcache_device *d = bio->bi_disk->private_data; 1076 1076 int rw = bio_data_dir(bio); 1077 1077 1078 1078 generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
+3 -3
drivers/md/bcache/super.c
··· 257 257 closure_init(cl, parent); 258 258 259 259 bio_reset(bio); 260 - bio->bi_bdev = dc->bdev; 260 + bio_set_dev(bio, dc->bdev); 261 261 bio->bi_end_io = write_bdev_super_endio; 262 262 bio->bi_private = dc; 263 263 ··· 303 303 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 304 304 305 305 bio_reset(bio); 306 - bio->bi_bdev = ca->bdev; 306 + bio_set_dev(bio, ca->bdev); 307 307 bio->bi_end_io = write_super_endio; 308 308 bio->bi_private = ca; 309 309 ··· 508 508 closure_init_stack(cl); 509 509 510 510 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; 511 - bio->bi_bdev = ca->bdev; 511 + bio_set_dev(bio, ca->bdev); 512 512 bio->bi_iter.bi_size = bucket_bytes(ca); 513 513 514 514 bio->bi_end_io = prio_endio;
+2 -3
drivers/md/bcache/writeback.c
··· 181 181 dirty_init(w); 182 182 bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0); 183 183 io->bio.bi_iter.bi_sector = KEY_START(&w->key); 184 - io->bio.bi_bdev = io->dc->bdev; 184 + bio_set_dev(&io->bio, io->dc->bdev); 185 185 io->bio.bi_end_io = dirty_endio; 186 186 187 187 closure_bio_submit(&io->bio, cl); ··· 250 250 dirty_init(w); 251 251 bio_set_op_attrs(&io->bio, REQ_OP_READ, 0); 252 252 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); 253 - io->bio.bi_bdev = PTR_CACHE(dc->disk.c, 254 - &w->key, 0)->bdev; 253 + bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev); 255 254 io->bio.bi_end_io = read_dirty_endio; 256 255 257 256 if (bio_alloc_pages(&io->bio, GFP_KERNEL))
+6 -3
drivers/md/dm-bio-record.h
··· 18 18 */ 19 19 20 20 struct dm_bio_details { 21 - struct block_device *bi_bdev; 21 + struct gendisk *bi_disk; 22 + u8 bi_partno; 22 23 unsigned long bi_flags; 23 24 struct bvec_iter bi_iter; 24 25 }; 25 26 26 27 static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) 27 28 { 28 - bd->bi_bdev = bio->bi_bdev; 29 + bd->bi_disk = bio->bi_disk; 30 + bd->bi_partno = bio->bi_partno; 29 31 bd->bi_flags = bio->bi_flags; 30 32 bd->bi_iter = bio->bi_iter; 31 33 } 32 34 33 35 static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) 34 36 { 35 - bio->bi_bdev = bd->bi_bdev; 37 + bio->bi_disk = bd->bi_disk; 38 + bio->bi_partno = bd->bi_partno; 36 39 bio->bi_flags = bd->bi_flags; 37 40 bio->bi_iter = bd->bi_iter; 38 41 }
+1 -1
drivers/md/dm-bufio.c
··· 616 616 617 617 bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS); 618 618 b->bio.bi_iter.bi_sector = sector; 619 - b->bio.bi_bdev = b->c->bdev; 619 + bio_set_dev(&b->bio, b->c->bdev); 620 620 b->bio.bi_end_io = inline_endio; 621 621 /* 622 622 * Use of .bi_private isn't a problem here because
+2 -2
drivers/md/dm-cache-target.c
··· 833 833 *--------------------------------------------------------------*/ 834 834 static void remap_to_origin(struct cache *cache, struct bio *bio) 835 835 { 836 - bio->bi_bdev = cache->origin_dev->bdev; 836 + bio_set_dev(bio, cache->origin_dev->bdev); 837 837 } 838 838 839 839 static void remap_to_cache(struct cache *cache, struct bio *bio, ··· 842 842 sector_t bi_sector = bio->bi_iter.bi_sector; 843 843 sector_t block = from_cblock(cblock); 844 844 845 - bio->bi_bdev = cache->cache_dev->bdev; 845 + bio_set_dev(bio, cache->cache_dev->bdev); 846 846 if (!block_size_is_power_of_two(cache)) 847 847 bio->bi_iter.bi_sector = 848 848 (block * cache->sectors_per_block) +
+2 -2
drivers/md/dm-crypt.c
··· 1544 1544 1545 1545 clone->bi_private = io; 1546 1546 clone->bi_end_io = crypt_endio; 1547 - clone->bi_bdev = cc->dev->bdev; 1547 + bio_set_dev(clone, cc->dev->bdev); 1548 1548 clone->bi_opf = io->base_bio->bi_opf; 1549 1549 } 1550 1550 ··· 2793 2793 */ 2794 2794 if (unlikely(bio->bi_opf & REQ_PREFLUSH || 2795 2795 bio_op(bio) == REQ_OP_DISCARD)) { 2796 - bio->bi_bdev = cc->dev->bdev; 2796 + bio_set_dev(bio, cc->dev->bdev); 2797 2797 if (bio_sectors(bio)) 2798 2798 bio->bi_iter.bi_sector = cc->start + 2799 2799 dm_target_offset(ti, bio->bi_iter.bi_sector);
+2 -2
drivers/md/dm-delay.c
··· 282 282 struct delay_c *dc = ti->private; 283 283 284 284 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { 285 - bio->bi_bdev = dc->dev_write->bdev; 285 + bio_set_dev(bio, dc->dev_write->bdev); 286 286 if (bio_sectors(bio)) 287 287 bio->bi_iter.bi_sector = dc->start_write + 288 288 dm_target_offset(ti, bio->bi_iter.bi_sector); ··· 290 290 return delay_bio(dc, dc->write_delay, bio); 291 291 } 292 292 293 - bio->bi_bdev = dc->dev_read->bdev; 293 + bio_set_dev(bio, dc->dev_read->bdev); 294 294 bio->bi_iter.bi_sector = dc->start_read + 295 295 dm_target_offset(ti, bio->bi_iter.bi_sector); 296 296
+1 -1
drivers/md/dm-era-target.c
··· 1192 1192 1193 1193 static void remap_to_origin(struct era *era, struct bio *bio) 1194 1194 { 1195 - bio->bi_bdev = era->origin_dev->bdev; 1195 + bio_set_dev(bio, era->origin_dev->bdev); 1196 1196 } 1197 1197 1198 1198 /*----------------------------------------------------------------
+1 -1
drivers/md/dm-flakey.c
··· 274 274 { 275 275 struct flakey_c *fc = ti->private; 276 276 277 - bio->bi_bdev = fc->dev->bdev; 277 + bio_set_dev(bio, fc->dev->bdev); 278 278 if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) 279 279 bio->bi_iter.bi_sector = 280 280 flakey_map_sector(ti, bio->bi_iter.bi_sector);
+7 -4
drivers/md/dm-integrity.c
··· 250 250 251 251 struct completion *completion; 252 252 253 - struct block_device *orig_bi_bdev; 253 + struct gendisk *orig_bi_disk; 254 + u8 orig_bi_partno; 254 255 bio_end_io_t *orig_bi_end_io; 255 256 struct bio_integrity_payload *orig_bi_integrity; 256 257 struct bvec_iter orig_bi_iter; ··· 1165 1164 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 1166 1165 1167 1166 bio->bi_iter = dio->orig_bi_iter; 1168 - bio->bi_bdev = dio->orig_bi_bdev; 1167 + bio->bi_disk = dio->orig_bi_disk; 1168 + bio->bi_partno = dio->orig_bi_partno; 1169 1169 if (dio->orig_bi_integrity) { 1170 1170 bio->bi_integrity = dio->orig_bi_integrity; 1171 1171 bio->bi_opf |= REQ_INTEGRITY; ··· 1683 1681 1684 1682 dio->orig_bi_iter = bio->bi_iter; 1685 1683 1686 - dio->orig_bi_bdev = bio->bi_bdev; 1687 - bio->bi_bdev = ic->dev->bdev; 1684 + dio->orig_bi_disk = bio->bi_disk; 1685 + dio->orig_bi_partno = bio->bi_partno; 1686 + bio_set_dev(bio, ic->dev->bdev); 1688 1687 1689 1688 dio->orig_bi_integrity = bio_integrity(bio); 1690 1689 bio->bi_integrity = NULL;
+1 -1
drivers/md/dm-io.c
··· 347 347 348 348 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); 349 349 bio->bi_iter.bi_sector = where->sector + (where->count - remaining); 350 - bio->bi_bdev = where->bdev; 350 + bio_set_dev(bio, where->bdev); 351 351 bio->bi_end_io = endio; 352 352 bio_set_op_attrs(bio, op, op_flags); 353 353 store_io_and_region_in_bio(bio, io, region);
+1 -1
drivers/md/dm-linear.c
··· 88 88 { 89 89 struct linear_c *lc = ti->private; 90 90 91 - bio->bi_bdev = lc->dev->bdev; 91 + bio_set_dev(bio, lc->dev->bdev); 92 92 if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) 93 93 bio->bi_iter.bi_sector = 94 94 linear_map_sector(ti, bio->bi_iter.bi_sector);
+4 -4
drivers/md/dm-log-writes.c
··· 198 198 } 199 199 bio->bi_iter.bi_size = 0; 200 200 bio->bi_iter.bi_sector = sector; 201 - bio->bi_bdev = lc->logdev->bdev; 201 + bio_set_dev(bio, lc->logdev->bdev); 202 202 bio->bi_end_io = log_end_io; 203 203 bio->bi_private = lc; 204 204 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); ··· 263 263 } 264 264 bio->bi_iter.bi_size = 0; 265 265 bio->bi_iter.bi_sector = sector; 266 - bio->bi_bdev = lc->logdev->bdev; 266 + bio_set_dev(bio, lc->logdev->bdev); 267 267 bio->bi_end_io = log_end_io; 268 268 bio->bi_private = lc; 269 269 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); ··· 285 285 } 286 286 bio->bi_iter.bi_size = 0; 287 287 bio->bi_iter.bi_sector = sector; 288 - bio->bi_bdev = lc->logdev->bdev; 288 + bio_set_dev(bio, lc->logdev->bdev); 289 289 bio->bi_end_io = log_end_io; 290 290 bio->bi_private = lc; 291 291 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); ··· 539 539 { 540 540 struct log_writes_c *lc = ti->private; 541 541 542 - bio->bi_bdev = lc->dev->bdev; 542 + bio_set_dev(bio, lc->dev->bdev); 543 543 } 544 544 545 545 static int log_writes_map(struct dm_target *ti, struct bio *bio)
+1 -1
drivers/md/dm-mpath.c
··· 566 566 mpio->nr_bytes = nr_bytes; 567 567 568 568 bio->bi_status = 0; 569 - bio->bi_bdev = pgpath->path.dev->bdev; 569 + bio_set_dev(bio, pgpath->path.dev->bdev); 570 570 bio->bi_opf |= REQ_FAILFAST_TRANSPORT; 571 571 572 572 if (pgpath->pg->ps.type->start_io)
+6 -6
drivers/md/dm-raid1.c
··· 145 145 146 146 struct dm_raid1_bio_record { 147 147 struct mirror *m; 148 - /* if details->bi_bdev == NULL, details were not saved */ 148 + /* if details->bi_disk == NULL, details were not saved */ 149 149 struct dm_bio_details details; 150 150 region_t write_region; 151 151 }; ··· 464 464 465 465 static void map_bio(struct mirror *m, struct bio *bio) 466 466 { 467 - bio->bi_bdev = m->dev->bdev; 467 + bio_set_dev(bio, m->dev->bdev); 468 468 bio->bi_iter.bi_sector = map_sector(m, bio); 469 469 } 470 470 ··· 1199 1199 struct dm_raid1_bio_record *bio_record = 1200 1200 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); 1201 1201 1202 - bio_record->details.bi_bdev = NULL; 1202 + bio_record->details.bi_disk = NULL; 1203 1203 1204 1204 if (rw == WRITE) { 1205 1205 /* Save region for mirror_end_io() handler */ ··· 1266 1266 goto out; 1267 1267 1268 1268 if (unlikely(*error)) { 1269 - if (!bio_record->details.bi_bdev) { 1269 + if (!bio_record->details.bi_disk) { 1270 1270 /* 1271 1271 * There wasn't enough memory to record necessary 1272 1272 * information for a retry or there was no other ··· 1291 1291 bd = &bio_record->details; 1292 1292 1293 1293 dm_bio_restore(bd, bio); 1294 - bio_record->details.bi_bdev = NULL; 1294 + bio_record->details.bi_disk = NULL; 1295 1295 bio->bi_status = 0; 1296 1296 1297 1297 queue_bio(ms, bio, rw); ··· 1301 1301 } 1302 1302 1303 1303 out: 1304 - bio_record->details.bi_bdev = NULL; 1304 + bio_record->details.bi_disk = NULL; 1305 1305 1306 1306 return DM_ENDIO_DONE; 1307 1307 }
+8 -8
drivers/md/dm-snap.c
··· 1663 1663 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, 1664 1664 struct bio *bio, chunk_t chunk) 1665 1665 { 1666 - bio->bi_bdev = s->cow->bdev; 1666 + bio_set_dev(bio, s->cow->bdev); 1667 1667 bio->bi_iter.bi_sector = 1668 1668 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + 1669 1669 (chunk - e->old_chunk)) + ··· 1681 1681 init_tracked_chunk(bio); 1682 1682 1683 1683 if (bio->bi_opf & REQ_PREFLUSH) { 1684 - bio->bi_bdev = s->cow->bdev; 1684 + bio_set_dev(bio, s->cow->bdev); 1685 1685 return DM_MAPIO_REMAPPED; 1686 1686 } 1687 1687 ··· 1769 1769 goto out; 1770 1770 } 1771 1771 } else { 1772 - bio->bi_bdev = s->origin->bdev; 1772 + bio_set_dev(bio, s->origin->bdev); 1773 1773 track_chunk(s, bio, chunk); 1774 1774 } 1775 1775 ··· 1802 1802 1803 1803 if (bio->bi_opf & REQ_PREFLUSH) { 1804 1804 if (!dm_bio_get_target_bio_nr(bio)) 1805 - bio->bi_bdev = s->origin->bdev; 1805 + bio_set_dev(bio, s->origin->bdev); 1806 1806 else 1807 - bio->bi_bdev = s->cow->bdev; 1807 + bio_set_dev(bio, s->cow->bdev); 1808 1808 return DM_MAPIO_REMAPPED; 1809 1809 } 1810 1810 ··· 1824 1824 chunk >= s->first_merging_chunk && 1825 1825 chunk < (s->first_merging_chunk + 1826 1826 s->num_merging_chunks)) { 1827 - bio->bi_bdev = s->origin->bdev; 1827 + bio_set_dev(bio, s->origin->bdev); 1828 1828 bio_list_add(&s->bios_queued_during_merge, bio); 1829 1829 r = DM_MAPIO_SUBMITTED; 1830 1830 goto out_unlock; ··· 1838 1838 } 1839 1839 1840 1840 redirect_to_origin: 1841 - bio->bi_bdev = s->origin->bdev; 1841 + bio_set_dev(bio, s->origin->bdev); 1842 1842 1843 1843 if (bio_data_dir(bio) == WRITE) { 1844 1844 up_write(&s->lock); ··· 2285 2285 struct dm_origin *o = ti->private; 2286 2286 unsigned available_sectors; 2287 2287 2288 - bio->bi_bdev = o->dev->bdev; 2288 + bio_set_dev(bio, o->dev->bdev); 2289 2289 2290 2290 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) 2291 2291 return DM_MAPIO_REMAPPED;
+4 -6
drivers/md/dm-stripe.c
··· 270 270 stripe_map_range_sector(sc, bio_end_sector(bio), 271 271 target_stripe, &end); 272 272 if (begin < end) { 273 - bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; 273 + bio_set_dev(bio, sc->stripe[target_stripe].dev->bdev); 274 274 bio->bi_iter.bi_sector = begin + 275 275 sc->stripe[target_stripe].physical_start; 276 276 bio->bi_iter.bi_size = to_bytes(end - begin); ··· 291 291 if (bio->bi_opf & REQ_PREFLUSH) { 292 292 target_bio_nr = dm_bio_get_target_bio_nr(bio); 293 293 BUG_ON(target_bio_nr >= sc->stripes); 294 - bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev; 294 + bio_set_dev(bio, sc->stripe[target_bio_nr].dev->bdev); 295 295 return DM_MAPIO_REMAPPED; 296 296 } 297 297 if (unlikely(bio_op(bio) == REQ_OP_DISCARD) || ··· 306 306 &stripe, &bio->bi_iter.bi_sector); 307 307 308 308 bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; 309 - bio->bi_bdev = sc->stripe[stripe].dev->bdev; 309 + bio_set_dev(bio, sc->stripe[stripe].dev->bdev); 310 310 311 311 return DM_MAPIO_REMAPPED; 312 312 } ··· 430 430 return DM_ENDIO_DONE; 431 431 432 432 memset(major_minor, 0, sizeof(major_minor)); 433 - sprintf(major_minor, "%d:%d", 434 - MAJOR(disk_devt(bio->bi_bdev->bd_disk)), 435 - MINOR(disk_devt(bio->bi_bdev->bd_disk))); 433 + sprintf(major_minor, "%d:%d", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio))); 436 434 437 435 /* 438 436 * Test to see which stripe drive triggered the event
+1 -1
drivers/md/dm-switch.c
··· 322 322 sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); 323 323 unsigned path_nr = switch_get_path_nr(sctx, offset); 324 324 325 - bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; 325 + bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev); 326 326 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; 327 327 328 328 return DM_MAPIO_REMAPPED;
+3 -3
drivers/md/dm-thin.c
··· 679 679 struct pool *pool = tc->pool; 680 680 sector_t bi_sector = bio->bi_iter.bi_sector; 681 681 682 - bio->bi_bdev = tc->pool_dev->bdev; 682 + bio_set_dev(bio, tc->pool_dev->bdev); 683 683 if (block_size_is_power_of_two(pool)) 684 684 bio->bi_iter.bi_sector = 685 685 (block << pool->sectors_per_block_shift) | ··· 691 691 692 692 static void remap_to_origin(struct thin_c *tc, struct bio *bio) 693 693 { 694 - bio->bi_bdev = tc->origin_dev->bdev; 694 + bio_set_dev(bio, tc->origin_dev->bdev); 695 695 } 696 696 697 697 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) ··· 3313 3313 * As this is a singleton target, ti->begin is always zero. 3314 3314 */ 3315 3315 spin_lock_irqsave(&pool->lock, flags); 3316 - bio->bi_bdev = pt->data_dev->bdev; 3316 + bio_set_dev(bio, pt->data_dev->bdev); 3317 3317 r = DM_MAPIO_REMAPPED; 3318 3318 spin_unlock_irqrestore(&pool->lock, flags); 3319 3319
+1 -1
drivers/md/dm-verity-target.c
··· 637 637 struct dm_verity *v = ti->private; 638 638 struct dm_verity_io *io; 639 639 640 - bio->bi_bdev = v->data_dev->bdev; 640 + bio_set_dev(bio, v->data_dev->bdev); 641 641 bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector); 642 642 643 643 if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
+3 -3
drivers/md/dm-zoned-metadata.c
··· 409 409 } 410 410 411 411 bio->bi_iter.bi_sector = dmz_blk2sect(block); 412 - bio->bi_bdev = zmd->dev->bdev; 412 + bio_set_dev(bio, zmd->dev->bdev); 413 413 bio->bi_private = mblk; 414 414 bio->bi_end_io = dmz_mblock_bio_end_io; 415 415 bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO); ··· 564 564 set_bit(DMZ_META_WRITING, &mblk->state); 565 565 566 566 bio->bi_iter.bi_sector = dmz_blk2sect(block); 567 - bio->bi_bdev = zmd->dev->bdev; 567 + bio_set_dev(bio, zmd->dev->bdev); 568 568 bio->bi_private = mblk; 569 569 bio->bi_end_io = dmz_mblock_bio_end_io; 570 570 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); ··· 586 586 return -ENOMEM; 587 587 588 588 bio->bi_iter.bi_sector = dmz_blk2sect(block); 589 - bio->bi_bdev = zmd->dev->bdev; 589 + bio_set_dev(bio, zmd->dev->bdev); 590 590 bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO); 591 591 bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0); 592 592 ret = submit_bio_wait(bio);
+2 -2
drivers/md/dm-zoned-target.c
··· 238 238 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); 239 239 240 240 /* Setup and submit the BIO */ 241 - bio->bi_bdev = dmz->dev->bdev; 241 + bio_set_dev(bio, dmz->dev->bdev); 242 242 bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); 243 243 atomic_inc(&bioctx->ref); 244 244 generic_make_request(bio); ··· 586 586 (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)), 587 587 (unsigned int)dmz_bio_blocks(bio)); 588 588 589 - bio->bi_bdev = dev->bdev; 589 + bio_set_dev(bio, dev->bdev); 590 590 591 591 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) 592 592 return DM_MAPIO_REMAPPED;
+5 -5
drivers/md/dm.c
··· 851 851 852 852 if (unlikely(error == BLK_STS_TARGET)) { 853 853 if (bio_op(bio) == REQ_OP_WRITE_SAME && 854 - !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) 854 + !bio->bi_disk->queue->limits.max_write_same_sectors) 855 855 disable_write_same(md); 856 856 if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 857 - !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) 857 + !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 858 858 disable_write_zeroes(md); 859 859 } 860 860 ··· 1215 1215 break; 1216 1216 case DM_MAPIO_REMAPPED: 1217 1217 /* the bio has been remapped so dispatch it */ 1218 - trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, 1219 - tio->io->bio->bi_bdev->bd_dev, sector); 1218 + trace_block_bio_remap(clone->bi_disk->queue, clone, 1219 + bio_dev(tio->io->bio), sector); 1220 1220 generic_make_request(clone); 1221 1221 break; 1222 1222 case DM_MAPIO_KILL: ··· 1796 1796 goto bad; 1797 1797 1798 1798 bio_init(&md->flush_bio, NULL, 0); 1799 - md->flush_bio.bi_bdev = md->bdev; 1799 + bio_set_dev(&md->flush_bio, md->bdev); 1800 1800 md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1801 1801 1802 1802 dm_stats_init(&md->stats);
+2 -2
drivers/md/faulty.c
··· 216 216 if (failit) { 217 217 struct bio *b = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); 218 218 219 - b->bi_bdev = conf->rdev->bdev; 219 + bio_set_dev(b, conf->rdev->bdev); 220 220 b->bi_private = bio; 221 221 b->bi_end_io = faulty_fail; 222 222 bio = b; 223 223 } else 224 - bio->bi_bdev = conf->rdev->bdev; 224 + bio_set_dev(bio, conf->rdev->bdev); 225 225 226 226 generic_make_request(bio); 227 227 return true;
+3 -3
drivers/md/linear.c
··· 275 275 bio = split; 276 276 } 277 277 278 - bio->bi_bdev = tmp_dev->rdev->bdev; 278 + bio_set_dev(bio, tmp_dev->rdev->bdev); 279 279 bio->bi_iter.bi_sector = bio->bi_iter.bi_sector - 280 280 start_sector + data_offset; 281 281 282 282 if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 283 - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { 283 + !blk_queue_discard(bio->bi_disk->queue))) { 284 284 /* Just ignore it */ 285 285 bio_endio(bio); 286 286 } else { 287 287 if (mddev->gendisk) 288 - trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), 288 + trace_block_bio_remap(bio->bi_disk->queue, 289 289 bio, disk_devt(mddev->gendisk), 290 290 bio_sector); 291 291 mddev_check_writesame(mddev, bio);
+6 -4
drivers/md/md.c
··· 422 422 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); 423 423 bi->bi_end_io = md_end_flush; 424 424 bi->bi_private = rdev; 425 - bi->bi_bdev = rdev->bdev; 425 + bio_set_dev(bi, rdev->bdev); 426 426 bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 427 427 atomic_inc(&mddev->flush_pending); 428 428 submit_bio(bi); ··· 772 772 773 773 atomic_inc(&rdev->nr_pending); 774 774 775 - bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 775 + bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev); 776 776 bio->bi_iter.bi_sector = sector; 777 777 bio_add_page(bio, page, size, 0); 778 778 bio->bi_private = rdev; ··· 803 803 struct bio *bio = md_bio_alloc_sync(rdev->mddev); 804 804 int ret; 805 805 806 - bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 807 - rdev->meta_bdev : rdev->bdev; 806 + if (metadata_op && rdev->meta_bdev) 807 + bio_set_dev(bio, rdev->meta_bdev); 808 + else 809 + bio_set_dev(bio, rdev->bdev); 808 810 bio_set_op_attrs(bio, op, op_flags); 809 811 if (metadata_op) 810 812 bio->bi_iter.bi_sector = sector + rdev->sb_start;
+7 -2
drivers/md/md.h
··· 509 509 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); 510 510 } 511 511 512 + static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) 513 + { 514 + atomic_add(nr_sectors, &bio->bi_disk->sync_io); 515 + } 516 + 512 517 struct md_personality 513 518 { 514 519 char *name; ··· 726 721 static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio) 727 722 { 728 723 if (bio_op(bio) == REQ_OP_WRITE_SAME && 729 - !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) 724 + !bio->bi_disk->queue->limits.max_write_same_sectors) 730 725 mddev->queue->limits.max_write_same_sectors = 0; 731 726 } 732 727 733 728 static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio) 734 729 { 735 730 if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 736 - !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) 731 + !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 737 732 mddev->queue->limits.max_write_zeroes_sectors = 0; 738 733 } 739 734 #endif /* _MD_MD_H */
+4 -4
drivers/md/multipath.c
··· 134 134 __bio_clone_fast(&mp_bh->bio, bio); 135 135 136 136 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; 137 - mp_bh->bio.bi_bdev = multipath->rdev->bdev; 137 + bio_set_dev(&mp_bh->bio, multipath->rdev->bdev); 138 138 mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT; 139 139 mp_bh->bio.bi_end_io = multipath_end_request; 140 140 mp_bh->bio.bi_private = mp_bh; ··· 345 345 346 346 if ((mp_bh->path = multipath_map (conf))<0) { 347 347 pr_err("multipath: %s: unrecoverable IO read error for block %llu\n", 348 - bdevname(bio->bi_bdev,b), 348 + bio_devname(bio, b), 349 349 (unsigned long long)bio->bi_iter.bi_sector); 350 350 multipath_end_bh_io(mp_bh, BLK_STS_IOERR); 351 351 } else { 352 352 pr_err("multipath: %s: redirecting sector %llu to another IO path\n", 353 - bdevname(bio->bi_bdev,b), 353 + bio_devname(bio, b), 354 354 (unsigned long long)bio->bi_iter.bi_sector); 355 355 *bio = *(mp_bh->master_bio); 356 356 bio->bi_iter.bi_sector += 357 357 conf->multipaths[mp_bh->path].rdev->data_offset; 358 - bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; 358 + bio_set_dev(bio, conf->multipaths[mp_bh->path].rdev->bdev); 359 359 bio->bi_opf |= REQ_FAILFAST_TRANSPORT; 360 360 bio->bi_end_io = multipath_end_request; 361 361 bio->bi_private = mp_bh;
+3 -4
drivers/md/raid0.c
··· 588 588 589 589 zone = find_zone(mddev->private, &sector); 590 590 tmp_dev = map_sector(mddev, zone, sector, &sector); 591 - bio->bi_bdev = tmp_dev->bdev; 591 + bio_set_dev(bio, tmp_dev->bdev); 592 592 bio->bi_iter.bi_sector = sector + zone->dev_start + 593 593 tmp_dev->data_offset; 594 594 595 595 if (mddev->gendisk) 596 - trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), 597 - bio, disk_devt(mddev->gendisk), 598 - bio_sector); 596 + trace_block_bio_remap(bio->bi_disk->queue, bio, 597 + disk_devt(mddev->gendisk), bio_sector); 599 598 mddev_check_writesame(mddev, bio); 600 599 mddev_check_write_zeroes(mddev, bio); 601 600 generic_make_request(bio);
+15 -19
drivers/md/raid1.c
··· 786 786 787 787 while (bio) { /* submit pending writes */ 788 788 struct bio *next = bio->bi_next; 789 - struct md_rdev *rdev = (void*)bio->bi_bdev; 789 + struct md_rdev *rdev = (void *)bio->bi_disk; 790 790 bio->bi_next = NULL; 791 - bio->bi_bdev = rdev->bdev; 791 + bio_set_dev(bio, rdev->bdev); 792 792 if (test_bit(Faulty, &rdev->flags)) { 793 793 bio_io_error(bio); 794 794 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 795 - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 795 + !blk_queue_discard(bio->bi_disk->queue))) 796 796 /* Just ignore it */ 797 797 bio_endio(bio); 798 798 else ··· 1273 1273 1274 1274 read_bio->bi_iter.bi_sector = r1_bio->sector + 1275 1275 mirror->rdev->data_offset; 1276 - read_bio->bi_bdev = mirror->rdev->bdev; 1276 + bio_set_dev(read_bio, mirror->rdev->bdev); 1277 1277 read_bio->bi_end_io = raid1_end_read_request; 1278 1278 bio_set_op_attrs(read_bio, op, do_sync); 1279 1279 if (test_bit(FailFast, &mirror->rdev->flags) && ··· 1282 1282 read_bio->bi_private = r1_bio; 1283 1283 1284 1284 if (mddev->gendisk) 1285 - trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), 1286 - read_bio, disk_devt(mddev->gendisk), 1287 - r1_bio->sector); 1285 + trace_block_bio_remap(read_bio->bi_disk->queue, read_bio, 1286 + disk_devt(mddev->gendisk), r1_bio->sector); 1288 1287 1289 1288 generic_make_request(read_bio); 1290 1289 } ··· 1495 1496 1496 1497 mbio->bi_iter.bi_sector = (r1_bio->sector + 1497 1498 conf->mirrors[i].rdev->data_offset); 1498 - mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1499 + bio_set_dev(mbio, conf->mirrors[i].rdev->bdev); 1499 1500 mbio->bi_end_io = raid1_end_write_request; 1500 1501 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); 1501 1502 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && ··· 1507 1508 atomic_inc(&r1_bio->remaining); 1508 1509 1509 1510 if (mddev->gendisk) 1510 - trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), 1511 + trace_block_bio_remap(mbio->bi_disk->queue, 1511 1512 mbio, disk_devt(mddev->gendisk), 1512 1513 r1_bio->sector); 1513 1514 /* flush_pending_writes() needs access to the rdev so...*/ 1514 - mbio->bi_bdev = (void*)conf->mirrors[i].rdev; 1515 + mbio->bi_disk = (void *)conf->mirrors[i].rdev; 1515 1516 1516 1517 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); 1517 1518 if (cb) ··· 1989 1990 * Don't fail devices as that won't really help. 1990 1991 */ 1991 1992 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", 1992 - mdname(mddev), 1993 - bdevname(bio->bi_bdev, b), 1993 + mdname(mddev), bio_devname(bio, b), 1994 1994 (unsigned long long)r1_bio->sector); 1995 1995 for (d = 0; d < conf->raid_disks * 2; d++) { 1996 1996 rdev = conf->mirrors[d].rdev; ··· 2080 2082 b->bi_status = status; 2081 2083 b->bi_iter.bi_sector = r1_bio->sector + 2082 2084 conf->mirrors[i].rdev->data_offset; 2083 - b->bi_bdev = conf->mirrors[i].rdev->bdev; 2085 + bio_set_dev(b, conf->mirrors[i].rdev->bdev); 2084 2086 b->bi_end_io = end_sync_read; 2085 2087 rp->raid_bio = r1_bio; 2086 2088 b->bi_private = rp; ··· 2348 2350 2349 2351 bio_trim(wbio, sector - r1_bio->sector, sectors); 2350 2352 wbio->bi_iter.bi_sector += rdev->data_offset; 2351 - wbio->bi_bdev = rdev->bdev; 2353 + bio_set_dev(wbio, rdev->bdev); 2352 2354 2353 2355 if (submit_bio_wait(wbio) < 0) 2354 2356 /* failure! */ ··· 2438 2440 struct mddev *mddev = conf->mddev; 2439 2441 struct bio *bio; 2440 2442 struct md_rdev *rdev; 2441 - dev_t bio_dev; 2442 2443 sector_t bio_sector; 2443 2444 2444 2445 clear_bit(R1BIO_ReadError, &r1_bio->state); ··· 2451 2454 */ 2452 2455 2453 2456 bio = r1_bio->bios[r1_bio->read_disk]; 2454 - bio_dev = bio->bi_bdev->bd_dev; 2455 2457 bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector; 2456 2458 bio_put(bio); 2457 2459 r1_bio->bios[r1_bio->read_disk] = NULL; ··· 2723 2727 if (bio->bi_end_io) { 2724 2728 atomic_inc(&rdev->nr_pending); 2725 2729 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; 2726 - bio->bi_bdev = rdev->bdev; 2730 + bio_set_dev(bio, rdev->bdev); 2727 2731 if (test_bit(FailFast, &rdev->flags)) 2728 2732 bio->bi_opf |= MD_FAILFAST; 2729 2733 } ··· 2849 2853 bio = r1_bio->bios[i]; 2850 2854 if (bio->bi_end_io == end_sync_read) { 2851 2855 read_targets--; 2852 - md_sync_acct(bio->bi_bdev, nr_sectors); 2856 + md_sync_acct_bio(bio, nr_sectors); 2853 2857 if (read_targets == 1) 2854 2858 bio->bi_opf &= ~MD_FAILFAST; 2855 2859 generic_make_request(bio); ··· 2858 2862 } else { 2859 2863 atomic_set(&r1_bio->remaining, 1); 2860 2864 bio = r1_bio->bios[r1_bio->read_disk]; 2861 - md_sync_acct(bio->bi_bdev, nr_sectors); 2865 + md_sync_acct_bio(bio, nr_sectors); 2862 2866 if (read_targets == 1) 2863 2867 bio->bi_opf &= ~MD_FAILFAST; 2864 2868 generic_make_request(bio);
+24 -26
drivers/md/raid10.c
··· 901 901 902 902 while (bio) { /* submit pending writes */ 903 903 struct bio *next = bio->bi_next; 904 - struct md_rdev *rdev = (void*)bio->bi_bdev; 904 + struct md_rdev *rdev = (void*)bio->bi_disk; 905 905 bio->bi_next = NULL; 906 - bio->bi_bdev = rdev->bdev; 906 + bio_set_dev(bio, rdev->bdev); 907 907 if (test_bit(Faulty, &rdev->flags)) { 908 908 bio_io_error(bio); 909 909 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 910 - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 910 + !blk_queue_discard(bio->bi_disk->queue))) 911 911 /* Just ignore it */ 912 912 bio_endio(bio); 913 913 else ··· 1085 1085 1086 1086 while (bio) { /* submit pending writes */ 1087 1087 struct bio *next = bio->bi_next; 1088 - struct md_rdev *rdev = (void*)bio->bi_bdev; 1088 + struct md_rdev *rdev = (void*)bio->bi_disk; 1089 1089 bio->bi_next = NULL; 1090 - bio->bi_bdev = rdev->bdev; 1090 + bio_set_dev(bio, rdev->bdev); 1091 1091 if (test_bit(Faulty, &rdev->flags)) { 1092 1092 bio_io_error(bio); 1093 1093 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 1094 - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 1094 + !blk_queue_discard(bio->bi_disk->queue))) 1095 1095 /* Just ignore it */ 1096 1096 bio_endio(bio); 1097 1097 else ··· 1200 1200 1201 1201 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + 1202 1202 choose_data_offset(r10_bio, rdev); 1203 - read_bio->bi_bdev = rdev->bdev; 1203 + bio_set_dev(read_bio, rdev->bdev); 1204 1204 read_bio->bi_end_io = raid10_end_read_request; 1205 1205 bio_set_op_attrs(read_bio, op, do_sync); 1206 1206 if (test_bit(FailFast, &rdev->flags) && ··· 1209 1209 read_bio->bi_private = r10_bio; 1210 1210 1211 1211 if (mddev->gendisk) 1212 - trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), 1212 + trace_block_bio_remap(read_bio->bi_disk->queue, 1213 1213 read_bio, disk_devt(mddev->gendisk), 1214 1214 r10_bio->sector); 1215 1215 generic_make_request(read_bio); ··· 1249 1249 1250 1250 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + 1251 1251 choose_data_offset(r10_bio, rdev)); 1252 - mbio->bi_bdev = rdev->bdev; 1252 + bio_set_dev(mbio, rdev->bdev); 1253 1253 mbio->bi_end_io = raid10_end_write_request; 1254 1254 bio_set_op_attrs(mbio, op, do_sync | do_fua); 1255 1255 if (!replacement && test_bit(FailFast, ··· 1259 1259 mbio->bi_private = r10_bio; 1260 1260 1261 1261 if (conf->mddev->gendisk) 1262 - trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), 1262 + trace_block_bio_remap(mbio->bi_disk->queue, 1263 1263 mbio, disk_devt(conf->mddev->gendisk), 1264 1264 r10_bio->sector); 1265 1265 /* flush_pending_writes() needs access to the rdev so...*/ 1266 - mbio->bi_bdev = (void *)rdev; 1266 + mbio->bi_disk = (void *)rdev; 1267 1267 1268 1268 atomic_inc(&r10_bio->remaining); 1269 1269 ··· 2094 2094 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) 2095 2095 tbio->bi_opf |= MD_FAILFAST; 2096 2096 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; 2097 - tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 2097 + bio_set_dev(tbio, conf->mirrors[d].rdev->bdev); 2098 2098 generic_make_request(tbio); 2099 2099 } 2100 2100 ··· 2552 2552 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); 2553 2553 wbio->bi_iter.bi_sector = wsector + 2554 2554 choose_data_offset(r10_bio, rdev); 2555 - wbio->bi_bdev = rdev->bdev; 2555 + bio_set_dev(wbio, rdev->bdev); 2556 2556 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2557 2557 2558 2558 if (submit_bio_wait(wbio) < 0) ··· 2575 2575 struct bio *bio; 2576 2576 struct r10conf *conf = mddev->private; 2577 2577 struct md_rdev *rdev = r10_bio->devs[slot].rdev; 2578 - dev_t bio_dev; 2579 2578 sector_t bio_last_sector; 2580 2579 2581 2580 /* we got a read error. Maybe the drive is bad. Maybe just ··· 2586 2587 * frozen. 2587 2588 */ 2588 2589 bio = r10_bio->devs[slot].bio; 2589 - bio_dev = bio->bi_bdev->bd_dev; 2590 2590 bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors; 2591 2591 bio_put(bio); 2592 2592 r10_bio->devs[slot].bio = NULL; ··· 2948 2950 2949 2951 /* Again, very different code for resync and recovery. 2950 2952 * Both must result in an r10bio with a list of bios that 2951 - * have bi_end_io, bi_sector, bi_bdev set, 2953 + * have bi_end_io, bi_sector, bi_disk set, 2952 2954 * and bi_private set to the r10bio. 2953 2955 * For recovery, we may actually create several r10bios 2954 2956 * with 2 bios in each, that correspond to the bios in the main one. ··· 3093 3095 from_addr = r10_bio->devs[j].addr; 3094 3096 bio->bi_iter.bi_sector = from_addr + 3095 3097 rdev->data_offset; 3096 - bio->bi_bdev = rdev->bdev; 3098 + bio_set_dev(bio, rdev->bdev); 3097 3099 atomic_inc(&rdev->nr_pending); 3098 3100 /* and we write to 'i' (if not in_sync) */ 3099 3101 ··· 3115 3117 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3116 3118 bio->bi_iter.bi_sector = to_addr 3117 3119 + mrdev->data_offset; 3118 - bio->bi_bdev = mrdev->bdev; 3120 + bio_set_dev(bio, mrdev->bdev); 3119 3121 atomic_inc(&r10_bio->remaining); 3120 3122 } else 3121 3123 r10_bio->devs[1].bio->bi_end_io = NULL; ··· 3141 3143 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3142 3144 bio->bi_iter.bi_sector = to_addr + 3143 3145 mreplace->data_offset; 3144 - bio->bi_bdev = mreplace->bdev; 3146 + bio_set_dev(bio, mreplace->bdev); 3145 3147 atomic_inc(&r10_bio->remaining); 3146 3148 break; 3147 3149 } ··· 3287 3289 if (test_bit(FailFast, &rdev->flags)) 3288 3290 bio->bi_opf |= MD_FAILFAST; 3289 3291 bio->bi_iter.bi_sector = sector + rdev->data_offset; 3290 - bio->bi_bdev = rdev->bdev; 3292 + bio_set_dev(bio, rdev->bdev); 3291 3293 count++; 3292 3294 3293 3295 rdev = rcu_dereference(conf->mirrors[d].replacement); ··· 3309 3311 if (test_bit(FailFast, &rdev->flags)) 3310 3312 bio->bi_opf |= MD_FAILFAST; 3311 3313 bio->bi_iter.bi_sector = sector + rdev->data_offset; 3312 - bio->bi_bdev = rdev->bdev; 3314 + bio_set_dev(bio, rdev->bdev); 3313 3315 count++; 3314 3316 rcu_read_unlock(); 3315 3317 } ··· 3365 3367 r10_bio->sectors = nr_sectors; 3366 3368 3367 3369 if (bio->bi_end_io == end_sync_read) { 3368 - md_sync_acct(bio->bi_bdev, nr_sectors); 3370 + md_sync_acct_bio(bio, nr_sectors); 3369 3371 bio->bi_status = 0; 3370 3372 generic_make_request(bio); 3371 3373 } ··· 4381 4383 4382 4384 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); 4383 4385 4384 - read_bio->bi_bdev = rdev->bdev; 4386 + bio_set_dev(read_bio, rdev->bdev); 4385 4387 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr 4386 4388 + rdev->data_offset); 4387 4389 read_bio->bi_private = r10_bio; ··· 4415 4417 if (!rdev2 || test_bit(Faulty, &rdev2->flags)) 4416 4418 continue; 4417 4419 4418 - b->bi_bdev = rdev2->bdev; 4420 + bio_set_dev(b, rdev2->bdev); 4419 4421 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + 4420 4422 rdev2->new_data_offset; 4421 4423 b->bi_end_io = end_reshape_write; ··· 4447 4449 r10_bio->sectors = nr_sectors; 4448 4450 4449 4451 /* Now submit the read */ 4450 - md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); 4452 + md_sync_acct_bio(read_bio, r10_bio->sectors); 4451 4453 atomic_inc(&r10_bio->remaining); 4452 4454 read_bio->bi_next = NULL; 4453 4455 generic_make_request(read_bio); ··· 4509 4511 } 4510 4512 atomic_inc(&rdev->nr_pending); 4511 4513 rcu_read_unlock(); 4512 - md_sync_acct(b->bi_bdev, r10_bio->sectors); 4514 + md_sync_acct_bio(b, r10_bio->sectors); 4513 4515 atomic_inc(&r10_bio->remaining); 4514 4516 b->bi_next = NULL; 4515 4517 generic_make_request(b);
+3 -3
drivers/md/raid5-cache.c
··· 728 728 struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs); 729 729 730 730 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 731 - bio->bi_bdev = log->rdev->bdev; 731 + bio_set_dev(bio, log->rdev->bdev); 732 732 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; 733 733 734 734 return bio; ··· 1291 1291 if (!do_flush) 1292 1292 return; 1293 1293 bio_reset(&log->flush_bio); 1294 - log->flush_bio.bi_bdev = log->rdev->bdev; 1294 + bio_set_dev(&log->flush_bio, log->rdev->bdev); 1295 1295 log->flush_bio.bi_end_io = r5l_log_flush_endio; 1296 1296 log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 1297 1297 submit_bio(&log->flush_bio); ··· 1669 1669 sector_t offset) 1670 1670 { 1671 1671 bio_reset(ctx->ra_bio); 1672 - ctx->ra_bio->bi_bdev = log->rdev->bdev; 1672 + bio_set_dev(ctx->ra_bio, log->rdev->bdev); 1673 1673 bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0); 1674 1674 ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset; 1675 1675
+3 -3
drivers/md/raid5-ppl.c
··· 415 415 pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n", 416 416 __func__, io->seq, bio->bi_iter.bi_size, 417 417 (unsigned long long)bio->bi_iter.bi_sector, 418 - bdevname(bio->bi_bdev, b)); 418 + bio_devname(bio, b)); 419 419 420 420 submit_bio(bio); 421 421 } ··· 453 453 454 454 bio->bi_end_io = ppl_log_endio; 455 455 bio->bi_opf = REQ_OP_WRITE | REQ_FUA; 456 - bio->bi_bdev = log->rdev->bdev; 456 + bio_set_dev(bio, log->rdev->bdev); 457 457 bio->bi_iter.bi_sector = log->rdev->ppl.sector; 458 458 bio_add_page(bio, io->header_page, PAGE_SIZE, 0); 459 459 ··· 468 468 bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, 469 469 ppl_conf->bs); 470 470 bio->bi_opf = prev->bi_opf; 471 - bio->bi_bdev = prev->bi_bdev; 471 + bio_copy_dev(bio, prev); 472 472 bio->bi_iter.bi_sector = bio_end_sector(prev); 473 473 bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); 474 474
+6 -6
drivers/md/raid5.c
··· 1096 1096 1097 1097 set_bit(STRIPE_IO_STARTED, &sh->state); 1098 1098 1099 - bi->bi_bdev = rdev->bdev; 1099 + bio_set_dev(bi, rdev->bdev); 1100 1100 bio_set_op_attrs(bi, op, op_flags); 1101 1101 bi->bi_end_io = op_is_write(op) 1102 1102 ? raid5_end_write_request ··· 1145 1145 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 1146 1146 1147 1147 if (conf->mddev->gendisk) 1148 - trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), 1148 + trace_block_bio_remap(bi->bi_disk->queue, 1149 1149 bi, disk_devt(conf->mddev->gendisk), 1150 1150 sh->dev[i].sector); 1151 1151 if (should_defer && op_is_write(op)) ··· 1160 1160 1161 1161 set_bit(STRIPE_IO_STARTED, &sh->state); 1162 1162 1163 - rbi->bi_bdev = rrdev->bdev; 1163 + bio_set_dev(rbi, rrdev->bdev); 1164 1164 bio_set_op_attrs(rbi, op, op_flags); 1165 1165 BUG_ON(!op_is_write(op)); 1166 1166 rbi->bi_end_io = raid5_end_write_request; ··· 1193 1193 if (op == REQ_OP_DISCARD) 1194 1194 rbi->bi_vcnt = 0; 1195 1195 if (conf->mddev->gendisk) 1196 - trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 1196 + trace_block_bio_remap(rbi->bi_disk->queue, 1197 1197 rbi, disk_devt(conf->mddev->gendisk), 1198 1198 sh->dev[i].sector); 1199 1199 if (should_defer && op_is_write(op)) ··· 5233 5233 atomic_inc(&rdev->nr_pending); 5234 5234 rcu_read_unlock(); 5235 5235 raid_bio->bi_next = (void*)rdev; 5236 - align_bi->bi_bdev = rdev->bdev; 5236 + bio_set_dev(align_bi, rdev->bdev); 5237 5237 bio_clear_flag(align_bi, BIO_SEG_VALID); 5238 5238 5239 5239 if (is_badblock(rdev, align_bi->bi_iter.bi_sector, ··· 5255 5255 spin_unlock_irq(&conf->device_lock); 5256 5256 5257 5257 if (mddev->gendisk) 5258 - trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 5258 + trace_block_bio_remap(align_bi->bi_disk->queue, 5259 5259 align_bi, disk_devt(mddev->gendisk), 5260 5260 raid_bio->bi_iter.bi_sector); 5261 5261 generic_make_request(align_bi);
+2 -2
drivers/nvdimm/nd.h
··· 390 390 void __nd_iostat_start(struct bio *bio, unsigned long *start); 391 391 static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) 392 392 { 393 - struct gendisk *disk = bio->bi_bdev->bd_disk; 393 + struct gendisk *disk = bio->bi_disk; 394 394 395 395 if (!blk_queue_io_stat(disk->queue)) 396 396 return false; ··· 402 402 } 403 403 static inline void nd_iostat_end(struct bio *bio, unsigned long start) 404 404 { 405 - struct gendisk *disk = bio->bi_bdev->bd_disk; 405 + struct gendisk *disk = bio->bi_disk; 406 406 407 407 generic_end_io_acct(disk->queue, bio_data_dir(bio), &disk->part0, 408 408 start);
+2 -9
drivers/nvme/host/core.c
··· 613 613 614 614 if (!disk) 615 615 goto submit; 616 - bio->bi_bdev = bdget_disk(disk, 0); 617 - if (!bio->bi_bdev) { 618 - ret = -ENODEV; 619 - goto out_unmap; 620 - } 616 + bio->bi_disk = disk; 621 617 622 618 if (meta_buffer && meta_len) { 623 619 struct bio_integrity_payload *bip; ··· 664 668 out_free_meta: 665 669 kfree(meta); 666 670 out_unmap: 667 - if (bio) { 668 - if (disk && bio->bi_bdev) 669 - bdput(bio->bi_bdev); 671 + if (bio) 670 672 blk_rq_unmap_user(bio); 671 - } 672 673 out: 673 674 blk_mq_free_request(req); 674 675 return ret;
+2 -13
drivers/nvme/host/lightnvm.c
··· 643 643 vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma); 644 644 } 645 645 646 - if (!disk) 647 - goto submit; 648 - 649 - bio->bi_bdev = bdget_disk(disk, 0); 650 - if (!bio->bi_bdev) { 651 - ret = -ENODEV; 652 - goto err_meta; 653 - } 646 + bio->bi_disk = disk; 654 647 } 655 648 656 - submit: 657 649 blk_execute_rq(q, NULL, rq, 0); 658 650 659 651 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) ··· 665 673 if (meta_buf && meta_len) 666 674 dma_pool_free(dev->dma_pool, metadata, metadata_dma); 667 675 err_map: 668 - if (bio) { 669 - if (disk && bio->bi_bdev) 670 - bdput(bio->bi_bdev); 676 + if (bio) 671 677 blk_rq_unmap_user(bio); 672 - } 673 678 err_ppa: 674 679 if (ppa_buf && ppa_len) 675 680 dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
+3 -3
drivers/nvme/target/io-cmd.c
··· 68 68 69 69 nvmet_inline_bio_init(req); 70 70 bio = &req->inline_bio; 71 - bio->bi_bdev = req->ns->bdev; 71 + bio_set_dev(bio, req->ns->bdev); 72 72 bio->bi_iter.bi_sector = sector; 73 73 bio->bi_private = req; 74 74 bio->bi_end_io = nvmet_bio_done; ··· 80 80 struct bio *prev = bio; 81 81 82 82 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); 83 - bio->bi_bdev = req->ns->bdev; 83 + bio_set_dev(bio, req->ns->bdev); 84 84 bio->bi_iter.bi_sector = sector; 85 85 bio_set_op_attrs(bio, op, op_flags); 86 86 ··· 104 104 nvmet_inline_bio_init(req); 105 105 bio = &req->inline_bio; 106 106 107 - bio->bi_bdev = req->ns->bdev; 107 + bio_set_dev(bio, req->ns->bdev); 108 108 bio->bi_private = req; 109 109 bio->bi_end_io = nvmet_bio_done; 110 110 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+2 -2
drivers/s390/block/dcssblk.c
··· 856 856 blk_queue_split(q, &bio); 857 857 858 858 bytes_done = 0; 859 - dev_info = bio->bi_bdev->bd_disk->private_data; 859 + dev_info = bio->bi_disk->private_data; 860 860 if (dev_info == NULL) 861 861 goto fail; 862 862 if ((bio->bi_iter.bi_sector & 7) != 0 || 863 863 (bio->bi_iter.bi_size & 4095) != 0) 864 864 /* Request is not page-aligned. */ 865 865 goto fail; 866 - if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { 866 + if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) { 867 867 /* Request beyond end of DCSS segment. */ 868 868 goto fail; 869 869 }
+1 -1
drivers/s390/block/xpram.c
··· 183 183 */ 184 184 static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio) 185 185 { 186 - xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; 186 + xpram_device_t *xdev = bio->bi_disk->private_data; 187 187 struct bio_vec bvec; 188 188 struct bvec_iter iter; 189 189 unsigned int index;
+2 -2
drivers/target/target_core_iblock.c
··· 338 338 return NULL; 339 339 } 340 340 341 - bio->bi_bdev = ib_dev->ibd_bd; 341 + bio_set_dev(bio, ib_dev->ibd_bd); 342 342 bio->bi_private = cmd; 343 343 bio->bi_end_io = &iblock_bio_done; 344 344 bio->bi_iter.bi_sector = lba; ··· 395 395 396 396 bio = bio_alloc(GFP_KERNEL, 0); 397 397 bio->bi_end_io = iblock_end_io_flush; 398 - bio->bi_bdev = ib_dev->ibd_bd; 398 + bio_set_dev(bio, ib_dev->ibd_bd); 399 399 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 400 400 if (!immed) 401 401 bio->bi_private = cmd;
+2 -2
fs/block_dev.c
··· 223 223 } 224 224 225 225 bio_init(&bio, vecs, nr_pages); 226 - bio.bi_bdev = bdev; 226 + bio_set_dev(&bio, bdev); 227 227 bio.bi_iter.bi_sector = pos >> 9; 228 228 bio.bi_write_hint = iocb->ki_hint; 229 229 bio.bi_private = current; ··· 362 362 363 363 blk_start_plug(&plug); 364 364 for (;;) { 365 - bio->bi_bdev = bdev; 365 + bio_set_dev(bio, bdev); 366 366 bio->bi_iter.bi_sector = pos >> 9; 367 367 bio->bi_write_hint = iocb->ki_hint; 368 368 bio->bi_private = dio;
+6 -6
fs/btrfs/check-integrity.c
··· 1635 1635 unsigned int j; 1636 1636 1637 1637 bio = btrfs_io_bio_alloc(num_pages - i); 1638 - bio->bi_bdev = block_ctx->dev->bdev; 1638 + bio_set_dev(bio, block_ctx->dev->bdev); 1639 1639 bio->bi_iter.bi_sector = dev_bytenr >> 9; 1640 1640 bio_set_op_attrs(bio, REQ_OP_READ, 0); 1641 1641 ··· 2803 2803 mutex_lock(&btrfsic_mutex); 2804 2804 /* since btrfsic_submit_bio() is also called before 2805 2805 * btrfsic_mount(), this might return NULL */ 2806 - dev_state = btrfsic_dev_state_lookup(bio->bi_bdev->bd_dev); 2806 + dev_state = btrfsic_dev_state_lookup(bio_dev(bio)); 2807 2807 if (NULL != dev_state && 2808 2808 (bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) { 2809 2809 unsigned int i = 0; ··· 2819 2819 bio_is_patched = 0; 2820 2820 if (dev_state->state->print_mask & 2821 2821 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2822 - pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", 2822 + pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_disk=%p)\n", 2823 2823 bio_op(bio), bio->bi_opf, segs, 2824 2824 (unsigned long long)bio->bi_iter.bi_sector, 2825 - dev_bytenr, bio->bi_bdev); 2825 + dev_bytenr, bio->bi_disk); 2826 2826 2827 2827 mapped_datav = kmalloc_array(segs, 2828 2828 sizeof(*mapped_datav), GFP_NOFS); ··· 2851 2851 } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) { 2852 2852 if (dev_state->state->print_mask & 2853 2853 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2854 - pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n", 2855 - bio_op(bio), bio->bi_opf, bio->bi_bdev); 2854 + pr_info("submit_bio(rw=%d,0x%x FLUSH, disk=%p)\n", 2855 + bio_op(bio), bio->bi_opf, bio->bi_disk); 2856 2856 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { 2857 2857 if ((dev_state->state->print_mask & 2858 2858 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
+1 -1
fs/btrfs/disk-io.c
··· 3499 3499 3500 3500 bio_reset(bio); 3501 3501 bio->bi_end_io = btrfs_end_empty_barrier; 3502 - bio->bi_bdev = device->bdev; 3502 + bio_set_dev(bio, device->bdev); 3503 3503 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; 3504 3504 init_completion(&device->flush_wait); 3505 3505 bio->bi_private = &device->flush_wait;
+3 -3
fs/btrfs/extent_io.c
··· 2033 2033 bio_put(bio); 2034 2034 return -EIO; 2035 2035 } 2036 - bio->bi_bdev = dev->bdev; 2036 + bio_set_dev(bio, dev->bdev); 2037 2037 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; 2038 2038 bio_add_page(bio, page, length, pg_offset); 2039 2039 ··· 2335 2335 bio = btrfs_io_bio_alloc(1); 2336 2336 bio->bi_end_io = endio_func; 2337 2337 bio->bi_iter.bi_sector = failrec->logical >> 9; 2338 - bio->bi_bdev = fs_info->fs_devices->latest_bdev; 2338 + bio_set_dev(bio, fs_info->fs_devices->latest_bdev); 2339 2339 bio->bi_iter.bi_size = 0; 2340 2340 bio->bi_private = data; 2341 2341 ··· 2675 2675 struct bio *bio; 2676 2676 2677 2677 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, btrfs_bioset); 2678 - bio->bi_bdev = bdev; 2678 + bio_set_dev(bio, bdev); 2679 2679 bio->bi_iter.bi_sector = first_byte >> 9; 2680 2680 btrfs_io_bio_init(btrfs_io_bio(bio)); 2681 2681 return bio;
+5 -3
fs/btrfs/raid56.c
··· 1090 1090 */ 1091 1091 if (last_end == disk_start && stripe->dev->bdev && 1092 1092 !last->bi_status && 1093 - last->bi_bdev == stripe->dev->bdev) { 1093 + last->bi_disk == stripe->dev->bdev->bd_disk && 1094 + last->bi_partno == stripe->dev->bdev->bd_partno) { 1094 1095 ret = bio_add_page(last, page, PAGE_SIZE, 0); 1095 1096 if (ret == PAGE_SIZE) 1096 1097 return 0; ··· 1101 1100 /* put a new bio on the list */ 1102 1101 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1); 1103 1102 bio->bi_iter.bi_size = 0; 1104 - bio->bi_bdev = stripe->dev->bdev; 1103 + bio_set_dev(bio, stripe->dev->bdev); 1105 1104 bio->bi_iter.bi_sector = disk_start >> 9; 1106 1105 1107 1106 bio_add_page(bio, page, PAGE_SIZE, 0); ··· 1348 1347 stripe_start = stripe->physical; 1349 1348 if (physical >= stripe_start && 1350 1349 physical < stripe_start + rbio->stripe_len && 1351 - bio->bi_bdev == stripe->dev->bdev) { 1350 + bio->bi_disk == stripe->dev->bdev->bd_disk && 1351 + bio->bi_partno == stripe->dev->bdev->bd_partno) { 1352 1352 return i; 1353 1353 } 1354 1354 }
+6 -6
fs/btrfs/scrub.c
··· 1738 1738 1739 1739 WARN_ON(!page->page); 1740 1740 bio = btrfs_io_bio_alloc(1); 1741 - bio->bi_bdev = page->dev->bdev; 1741 + bio_set_dev(bio, page->dev->bdev); 1742 1742 1743 1743 bio_add_page(bio, page->page, PAGE_SIZE, 0); 1744 1744 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) { ··· 1826 1826 } 1827 1827 1828 1828 bio = btrfs_io_bio_alloc(1); 1829 - bio->bi_bdev = page_bad->dev->bdev; 1829 + bio_set_dev(bio, page_bad->dev->bdev); 1830 1830 bio->bi_iter.bi_sector = page_bad->physical >> 9; 1831 1831 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1832 1832 ··· 1921 1921 1922 1922 bio->bi_private = sbio; 1923 1923 bio->bi_end_io = scrub_wr_bio_end_io; 1924 - bio->bi_bdev = sbio->dev->bdev; 1924 + bio_set_dev(bio, sbio->dev->bdev); 1925 1925 bio->bi_iter.bi_sector = sbio->physical >> 9; 1926 1926 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1927 1927 sbio->status = 0; ··· 1964 1964 1965 1965 sbio = sctx->wr_curr_bio; 1966 1966 sctx->wr_curr_bio = NULL; 1967 - WARN_ON(!sbio->bio->bi_bdev); 1967 + WARN_ON(!sbio->bio->bi_disk); 1968 1968 scrub_pending_bio_inc(sctx); 1969 1969 /* process all writes in a single worker thread. Then the block layer 1970 1970 * orders the requests before sending them to the driver which ··· 2321 2321 2322 2322 bio->bi_private = sbio; 2323 2323 bio->bi_end_io = scrub_bio_end_io; 2324 - bio->bi_bdev = sbio->dev->bdev; 2324 + bio_set_dev(bio, sbio->dev->bdev); 2325 2325 bio->bi_iter.bi_sector = sbio->physical >> 9; 2326 2326 bio_set_op_attrs(bio, REQ_OP_READ, 0); 2327 2327 sbio->status = 0; ··· 4627 4627 bio = btrfs_io_bio_alloc(1); 4628 4628 bio->bi_iter.bi_size = 0; 4629 4629 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; 4630 - bio->bi_bdev = dev->bdev; 4630 + bio_set_dev(bio, dev->bdev); 4631 4631 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; 4632 4632 ret = bio_add_page(bio, page, PAGE_SIZE, 0); 4633 4633 if (ret != PAGE_SIZE) {
+1 -1
fs/btrfs/volumes.c
··· 6188 6188 rcu_read_unlock(); 6189 6189 } 6190 6190 #endif 6191 - bio->bi_bdev = dev->bdev; 6191 + bio_set_dev(bio, dev->bdev); 6192 6192 6193 6193 btrfs_bio_counter_inc_noblocked(fs_info); 6194 6194
+2 -2
fs/buffer.c
··· 3057 3057 struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; 3058 3058 unsigned truncated_bytes; 3059 3059 3060 - maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 3060 + maxsector = get_capacity(bio->bi_disk); 3061 3061 if (!maxsector) 3062 3062 return; 3063 3063 ··· 3116 3116 } 3117 3117 3118 3118 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 3119 - bio->bi_bdev = bh->b_bdev; 3119 + bio_set_dev(bio, bh->b_bdev); 3120 3120 bio->bi_write_hint = write_hint; 3121 3121 3122 3122 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
+1 -1
fs/crypto/bio.c
··· 115 115 err = -ENOMEM; 116 116 goto errout; 117 117 } 118 - bio->bi_bdev = inode->i_sb->s_bdev; 118 + bio_set_dev(bio, inode->i_sb->s_bdev); 119 119 bio->bi_iter.bi_sector = 120 120 pblk << (inode->i_sb->s_blocksize_bits - 9); 121 121 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+4 -4
fs/direct-io.c
··· 111 111 int op; 112 112 int op_flags; 113 113 blk_qc_t bio_cookie; 114 - struct block_device *bio_bdev; 114 + struct gendisk *bio_disk; 115 115 struct inode *inode; 116 116 loff_t i_size; /* i_size when submitted */ 117 117 dio_iodone_t *end_io; /* IO completion function */ ··· 377 377 */ 378 378 bio = bio_alloc(GFP_KERNEL, nr_vecs); 379 379 380 - bio->bi_bdev = bdev; 380 + bio_set_dev(bio, bdev); 381 381 bio->bi_iter.bi_sector = first_sector; 382 382 bio_set_op_attrs(bio, dio->op, dio->op_flags); 383 383 if (dio->is_async) ··· 412 412 if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) 413 413 bio_set_pages_dirty(bio); 414 414 415 - dio->bio_bdev = bio->bi_bdev; 415 + dio->bio_disk = bio->bi_disk; 416 416 417 417 if (sdio->submit_io) { 418 418 sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); ··· 458 458 dio->waiter = current; 459 459 spin_unlock_irqrestore(&dio->bio_lock, flags); 460 460 if (!(dio->iocb->ki_flags & IOCB_HIPRI) || 461 - !blk_mq_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie)) 461 + !blk_mq_poll(dio->bio_disk->queue, dio->bio_cookie)) 462 462 io_schedule(); 463 463 /* wake up sets us TASK_RUNNING */ 464 464 spin_lock_irqsave(&dio->bio_lock, flags);
+1 -1
fs/exofs/ore.c
··· 869 869 goto out; 870 870 } 871 871 872 - bio->bi_bdev = NULL; 872 + bio->bi_disk = NULL; 873 873 bio->bi_next = NULL; 874 874 per_dev->offset = master_dev->offset; 875 875 per_dev->length = master_dev->length;
+2 -2
fs/ext4/page-io.c
··· 300 300 char b[BDEVNAME_SIZE]; 301 301 302 302 if (WARN_ONCE(!io_end, "io_end is NULL: %s: sector %Lu len %u err %d\n", 303 - bdevname(bio->bi_bdev, b), 303 + bio_devname(bio, b), 304 304 (long long) bio->bi_iter.bi_sector, 305 305 (unsigned) bio_sectors(bio), 306 306 bio->bi_status)) { ··· 375 375 return -ENOMEM; 376 376 wbc_init_bio(io->io_wbc, bio); 377 377 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 378 - bio->bi_bdev = bh->b_bdev; 378 + bio_set_dev(bio, bh->b_bdev); 379 379 bio->bi_end_io = ext4_end_bio; 380 380 bio->bi_private = ext4_get_io_end(io->io_end); 381 381 io->io_bio = bio;
+1 -1
fs/ext4/readpage.c
··· 254 254 fscrypt_release_ctx(ctx); 255 255 goto set_error_page; 256 256 } 257 - bio->bi_bdev = bdev; 257 + bio_set_dev(bio, bdev); 258 258 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); 259 259 bio->bi_end_io = mpage_end_io; 260 260 bio->bi_private = ctx;
+3 -2
fs/f2fs/data.c
··· 142 142 } 143 143 } 144 144 if (bio) { 145 - bio->bi_bdev = bdev; 145 + bio_set_dev(bio, bdev); 146 146 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); 147 147 } 148 148 return bdev; ··· 161 161 static bool __same_bdev(struct f2fs_sb_info *sbi, 162 162 block_t blk_addr, struct bio *bio) 163 163 { 164 - return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev; 164 + struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL); 165 + return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno; 165 166 } 166 167 167 168 /*
+1 -1
fs/f2fs/segment.c
··· 447 447 int ret; 448 448 449 449 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; 450 - bio->bi_bdev = bdev; 450 + bio_set_dev(bio, bdev); 451 451 ret = submit_bio_wait(bio); 452 452 bio_put(bio); 453 453
+1 -1
fs/gfs2/lops.c
··· 265 265 266 266 bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); 267 267 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9); 268 - bio->bi_bdev = sb->s_bdev; 268 + bio_set_dev(bio, sb->s_bdev); 269 269 bio->bi_end_io = gfs2_end_log_write; 270 270 bio->bi_private = sdp; 271 271
+1 -1
fs/gfs2/meta_io.c
··· 221 221 222 222 bio = bio_alloc(GFP_NOIO, num); 223 223 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 224 - bio->bi_bdev = bh->b_bdev; 224 + bio_set_dev(bio, bh->b_bdev); 225 225 while (num > 0) { 226 226 bh = *bhs; 227 227 if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
+1 -1
fs/gfs2/ops_fstype.c
··· 242 242 243 243 bio = bio_alloc(GFP_NOFS, 1); 244 244 bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); 245 - bio->bi_bdev = sb->s_bdev; 245 + bio_set_dev(bio, sb->s_bdev); 246 246 bio_add_page(bio, page, PAGE_SIZE, 0); 247 247 248 248 bio->bi_end_io = end_bio_io_page;
+1 -1
fs/hfsplus/wrapper.c
··· 65 65 66 66 bio = bio_alloc(GFP_NOIO, 1); 67 67 bio->bi_iter.bi_sector = sector; 68 - bio->bi_bdev = sb->s_bdev; 68 + bio_set_dev(bio, sb->s_bdev); 69 69 bio_set_op_attrs(bio, op, op_flags); 70 70 71 71 if (op != WRITE && data)
+2 -2
fs/iomap.c
··· 805 805 struct bio *bio; 806 806 807 807 bio = bio_alloc(GFP_KERNEL, 1); 808 - bio->bi_bdev = iomap->bdev; 808 + bio_set_dev(bio, iomap->bdev); 809 809 bio->bi_iter.bi_sector = 810 810 iomap->blkno + ((pos - iomap->offset) >> 9); 811 811 bio->bi_private = dio; ··· 884 884 return 0; 885 885 886 886 bio = bio_alloc(GFP_KERNEL, nr_pages); 887 - bio->bi_bdev = iomap->bdev; 887 + bio_set_dev(bio, iomap->bdev); 888 888 bio->bi_iter.bi_sector = 889 889 iomap->blkno + ((pos - iomap->offset) >> 9); 890 890 bio->bi_write_hint = dio->iocb->ki_hint;
+2 -2
fs/jfs/jfs_logmgr.c
··· 1995 1995 bio = bio_alloc(GFP_NOFS, 1); 1996 1996 1997 1997 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); 1998 - bio->bi_bdev = log->bdev; 1998 + bio_set_dev(bio, log->bdev); 1999 1999 2000 2000 bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); 2001 2001 BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); ··· 2139 2139 2140 2140 bio = bio_alloc(GFP_NOFS, 1); 2141 2141 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); 2142 - bio->bi_bdev = log->bdev; 2142 + bio_set_dev(bio, log->bdev); 2143 2143 2144 2144 bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); 2145 2145 BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
+2 -2
fs/jfs/jfs_metapage.c
··· 430 430 len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage); 431 431 432 432 bio = bio_alloc(GFP_NOFS, 1); 433 - bio->bi_bdev = inode->i_sb->s_bdev; 433 + bio_set_dev(bio, inode->i_sb->s_bdev); 434 434 bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); 435 435 bio->bi_end_io = metapage_write_end_io; 436 436 bio->bi_private = page; ··· 510 510 submit_bio(bio); 511 511 512 512 bio = bio_alloc(GFP_NOFS, 1); 513 - bio->bi_bdev = inode->i_sb->s_bdev; 513 + bio_set_dev(bio, inode->i_sb->s_bdev); 514 514 bio->bi_iter.bi_sector = 515 515 pblock << (inode->i_blkbits - 9); 516 516 bio->bi_end_io = metapage_read_end_io;
+1 -1
fs/mpage.c
··· 83 83 } 84 84 85 85 if (bio) { 86 - bio->bi_bdev = bdev; 86 + bio_set_dev(bio, bdev); 87 87 bio->bi_iter.bi_sector = first_sector; 88 88 } 89 89 return bio;
+1 -1
fs/nfs/blocklayout/blocklayout.c
··· 130 130 131 131 if (bio) { 132 132 bio->bi_iter.bi_sector = disk_sector; 133 - bio->bi_bdev = bdev; 133 + bio_set_dev(bio, bdev); 134 134 bio->bi_end_io = end_io; 135 135 bio->bi_private = par; 136 136 }
+1 -1
fs/nilfs2/segbuf.c
··· 400 400 bio = bio_alloc(GFP_NOIO, nr_vecs); 401 401 } 402 402 if (likely(bio)) { 403 - bio->bi_bdev = nilfs->ns_bdev; 403 + bio_set_dev(bio, nilfs->ns_bdev); 404 404 bio->bi_iter.bi_sector = 405 405 start << (nilfs->ns_blocksize_bits - 9); 406 406 }
+1 -1
fs/ocfs2/cluster/heartbeat.c
··· 554 554 555 555 /* Must put everything in 512 byte sectors for the bio... */ 556 556 bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); 557 - bio->bi_bdev = reg->hr_bdev; 557 + bio_set_dev(bio, reg->hr_bdev); 558 558 bio->bi_private = wc; 559 559 bio->bi_end_io = o2hb_bio_end_io; 560 560 bio_set_op_attrs(bio, op, op_flags);
+1 -1
fs/xfs/xfs_aops.c
··· 517 517 struct buffer_head *bh) 518 518 { 519 519 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 520 - bio->bi_bdev = bh->b_bdev; 520 + bio_set_dev(bio, bh->b_bdev); 521 521 } 522 522 523 523 static struct xfs_ioend *
+1 -1
fs/xfs/xfs_buf.c
··· 1281 1281 nr_pages = min(total_nr_pages, BIO_MAX_PAGES); 1282 1282 1283 1283 bio = bio_alloc(GFP_NOIO, nr_pages); 1284 - bio->bi_bdev = bp->b_target->bt_bdev; 1284 + bio_set_dev(bio, bp->b_target->bt_bdev); 1285 1285 bio->bi_iter.bi_sector = sector; 1286 1286 bio->bi_end_io = xfs_buf_bio_end_io; 1287 1287 bio->bi_private = bp;
+18
include/linux/bio.h
··· 494 494 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); 495 495 extern unsigned int bvec_nr_vecs(unsigned short idx); 496 496 497 + #define bio_set_dev(bio, bdev) \ 498 + do { \ 499 + (bio)->bi_disk = (bdev)->bd_disk; \ 500 + (bio)->bi_partno = (bdev)->bd_partno; \ 501 + } while (0) 502 + 503 + #define bio_copy_dev(dst, src) \ 504 + do { \ 505 + (dst)->bi_disk = (src)->bi_disk; \ 506 + (dst)->bi_partno = (src)->bi_partno; \ 507 + } while (0) 508 + 509 + #define bio_dev(bio) \ 510 + disk_devt((bio)->bi_disk) 511 + 512 + #define bio_devname(bio, buf) \ 513 + __bdevname(bio_dev(bio), (buf)) 514 + 497 515 #ifdef CONFIG_BLK_CGROUP 498 516 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); 499 517 int bio_associate_current(struct bio *bio);
+2 -1
include/linux/blk_types.h
··· 48 48 */ 49 49 struct bio { 50 50 struct bio *bi_next; /* request queue link */ 51 - struct block_device *bi_bdev; 51 + struct gendisk *bi_disk; 52 + u8 bi_partno; 52 53 blk_status_t bi_status; 53 54 unsigned int bi_opf; /* bottom bits req flags, 54 55 * top bits REQ_OP. Use
+3 -3
include/trace/events/bcache.h
··· 21 21 ), 22 22 23 23 TP_fast_assign( 24 - __entry->dev = bio->bi_bdev->bd_dev; 24 + __entry->dev = bio_dev(bio); 25 25 __entry->orig_major = d->disk->major; 26 26 __entry->orig_minor = d->disk->first_minor; 27 27 __entry->sector = bio->bi_iter.bi_sector; ··· 98 98 ), 99 99 100 100 TP_fast_assign( 101 - __entry->dev = bio->bi_bdev->bd_dev; 101 + __entry->dev = bio_dev(bio); 102 102 __entry->sector = bio->bi_iter.bi_sector; 103 103 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 104 104 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); ··· 133 133 ), 134 134 135 135 TP_fast_assign( 136 - __entry->dev = bio->bi_bdev->bd_dev; 136 + __entry->dev = bio_dev(bio); 137 137 __entry->sector = bio->bi_iter.bi_sector; 138 138 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 139 139 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+8 -8
include/trace/events/block.h
··· 236 236 ), 237 237 238 238 TP_fast_assign( 239 - __entry->dev = bio->bi_bdev ? 240 - bio->bi_bdev->bd_dev : 0; 239 + __entry->dev = bio_dev(bio); 241 240 __entry->sector = bio->bi_iter.bi_sector; 242 241 __entry->nr_sector = bio_sectors(bio); 243 242 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); ··· 273 274 ), 274 275 275 276 TP_fast_assign( 276 - __entry->dev = bio->bi_bdev->bd_dev; 277 + __entry->dev = bio_dev(bio); 277 278 __entry->sector = bio->bi_iter.bi_sector; 278 279 __entry->nr_sector = bio_sectors(bio); 279 280 __entry->error = error; ··· 301 302 ), 302 303 303 304 TP_fast_assign( 304 - __entry->dev = bio->bi_bdev->bd_dev; 305 + __entry->dev = bio_dev(bio); 305 306 __entry->sector = bio->bi_iter.bi_sector; 306 307 __entry->nr_sector = bio_sectors(bio); 307 308 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); ··· 368 369 ), 369 370 370 371 TP_fast_assign( 371 - __entry->dev = bio->bi_bdev->bd_dev; 372 + __entry->dev = bio_dev(bio); 372 373 __entry->sector = bio->bi_iter.bi_sector; 373 374 __entry->nr_sector = bio_sectors(bio); 374 375 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); ··· 396 397 ), 397 398 398 399 TP_fast_assign( 399 - __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; 400 + __entry->dev = bio ? bio_dev(bio) : 0; 401 + __entry->dev = bio_dev(bio); 400 402 __entry->sector = bio ? bio->bi_iter.bi_sector : 0; 401 403 __entry->nr_sector = bio ? bio_sectors(bio) : 0; 402 404 blk_fill_rwbs(__entry->rwbs, ··· 532 532 ), 533 533 534 534 TP_fast_assign( 535 - __entry->dev = bio->bi_bdev->bd_dev; 535 + __entry->dev = bio_dev(bio); 536 536 __entry->sector = bio->bi_iter.bi_sector; 537 537 __entry->new_sector = new_sector; 538 538 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); ··· 573 573 ), 574 574 575 575 TP_fast_assign( 576 - __entry->dev = bio->bi_bdev->bd_dev; 576 + __entry->dev = bio_dev(bio); 577 577 __entry->sector = bio->bi_iter.bi_sector; 578 578 __entry->nr_sector = bio_sectors(bio); 579 579 __entry->old_dev = dev;
+1 -1
include/trace/events/f2fs.h
··· 829 829 830 830 TP_fast_assign( 831 831 __entry->dev = sb->s_dev; 832 - __entry->target = bio->bi_bdev->bd_dev; 832 + __entry->target = bio_dev(bio); 833 833 __entry->op = bio_op(bio); 834 834 __entry->op_flags = bio->bi_opf; 835 835 __entry->type = type;
+2 -3
kernel/power/swap.c
··· 242 242 243 243 if (bio->bi_status) { 244 244 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", 245 - imajor(bio->bi_bdev->bd_inode), 246 - iminor(bio->bi_bdev->bd_inode), 245 + MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 247 246 (unsigned long long)bio->bi_iter.bi_sector); 248 247 } 249 248 ··· 269 270 270 271 bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1); 271 272 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); 272 - bio->bi_bdev = hib_resume_bdev; 273 + bio_set_dev(bio, hib_resume_bdev); 273 274 bio_set_op_attrs(bio, op, op_flags); 274 275 275 276 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+1 -1
kernel/trace/blktrace.c
··· 963 963 return; 964 964 965 965 r.device_from = cpu_to_be32(dev); 966 - r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev); 966 + r.device_to = cpu_to_be32(bio_dev(bio)); 967 967 r.sector_from = cpu_to_be64(from); 968 968 969 969 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
+9 -8
mm/page_io.c
··· 31 31 32 32 bio = bio_alloc(gfp_flags, 1); 33 33 if (bio) { 34 - bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); 34 + struct block_device *bdev; 35 + 36 + bio->bi_iter.bi_sector = map_swap_page(page, &bdev); 37 + bio_set_dev(bio, bdev); 35 38 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; 36 39 bio->bi_end_io = end_io; 37 40 ··· 60 57 */ 61 58 set_page_dirty(page); 62 59 pr_alert("Write-error on swap-device (%u:%u:%llu)\n", 63 - imajor(bio->bi_bdev->bd_inode), 64 - iminor(bio->bi_bdev->bd_inode), 60 + MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 65 61 (unsigned long long)bio->bi_iter.bi_sector); 66 62 ClearPageReclaim(page); 67 63 } ··· 125 123 SetPageError(page); 126 124 ClearPageUptodate(page); 127 125 pr_alert("Read-error on swap-device (%u:%u:%llu)\n", 128 - imajor(bio->bi_bdev->bd_inode), 129 - iminor(bio->bi_bdev->bd_inode), 126 + MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 130 127 (unsigned long long)bio->bi_iter.bi_sector); 131 128 goto out; 132 129 } ··· 339 338 int ret = 0; 340 339 struct swap_info_struct *sis = page_swap_info(page); 341 340 blk_qc_t qc; 342 - struct block_device *bdev; 341 + struct gendisk *disk; 343 342 344 343 VM_BUG_ON_PAGE(!PageSwapCache(page), page); 345 344 VM_BUG_ON_PAGE(!PageLocked(page), page); ··· 378 377 ret = -ENOMEM; 379 378 goto out; 380 379 } 381 - bdev = bio->bi_bdev; 380 + disk = bio->bi_disk; 382 381 bio->bi_private = current; 383 382 bio_set_op_attrs(bio, REQ_OP_READ, 0); 384 383 count_vm_event(PSWPIN); ··· 389 388 if (!READ_ONCE(bio->bi_private)) 390 389 break; 391 390 392 - if (!blk_mq_poll(bdev_get_queue(bdev), qc)) 391 + if (!blk_mq_poll(disk->queue, qc)) 393 392 break; 394 393 } 395 394 __set_current_state(TASK_RUNNING);