Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: store a block_device pointer in struct bio

Replace the gendisk pointer in struct bio with a pointer to the newly
improved struct block device. From that the gendisk can be trivially
accessed with an extra indirection, but it also allows to directly
look up all information related to partition remapping.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
309dca30 cf9a978f

+154 -184
+1 -1
arch/m68k/emu/nfblock.c
··· 61 61 62 62 static blk_qc_t nfhd_submit_bio(struct bio *bio) 63 63 { 64 - struct nfhd_device *dev = bio->bi_disk->private_data; 64 + struct nfhd_device *dev = bio->bi_bdev->bd_disk->private_data; 65 65 struct bio_vec bvec; 66 66 struct bvec_iter iter; 67 67 int dir, len, shift;
+1 -1
arch/xtensa/platforms/iss/simdisk.c
··· 103 103 104 104 static blk_qc_t simdisk_submit_bio(struct bio *bio) 105 105 { 106 - struct simdisk *dev = bio->bi_disk->private_data; 106 + struct simdisk *dev = bio->bi_bdev->bd_disk->private_data; 107 107 struct bio_vec bvec; 108 108 struct bvec_iter iter; 109 109 sector_t sector = bio->bi_iter.bi_sector;
+9 -9
block/bio-integrity.c
··· 140 140 iv = bip->bip_vec + bip->bip_vcnt; 141 141 142 142 if (bip->bip_vcnt && 143 - bvec_gap_to_prev(bio->bi_disk->queue, 143 + bvec_gap_to_prev(bio->bi_bdev->bd_disk->queue, 144 144 &bip->bip_vec[bip->bip_vcnt - 1], offset)) 145 145 return 0; 146 146 ··· 162 162 static blk_status_t bio_integrity_process(struct bio *bio, 163 163 struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn) 164 164 { 165 - struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 165 + struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); 166 166 struct blk_integrity_iter iter; 167 167 struct bvec_iter bviter; 168 168 struct bio_vec bv; ··· 171 171 void *prot_buf = page_address(bip->bip_vec->bv_page) + 172 172 bip->bip_vec->bv_offset; 173 173 174 - iter.disk_name = bio->bi_disk->disk_name; 174 + iter.disk_name = bio->bi_bdev->bd_disk->disk_name; 175 175 iter.interval = 1 << bi->interval_exp; 176 176 iter.seed = proc_iter->bi_sector; 177 177 iter.prot_buf = prot_buf; ··· 208 208 bool bio_integrity_prep(struct bio *bio) 209 209 { 210 210 struct bio_integrity_payload *bip; 211 - struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 212 - struct request_queue *q = bio->bi_disk->queue; 211 + struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); 212 + struct request_queue *q = bio->bi_bdev->bd_disk->queue; 213 213 void *buf; 214 214 unsigned long start, end; 215 215 unsigned int len, nr_pages; ··· 329 329 struct bio_integrity_payload *bip = 330 330 container_of(work, struct bio_integrity_payload, bip_work); 331 331 struct bio *bio = bip->bip_bio; 332 - struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 332 + struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); 333 333 334 334 /* 335 335 * At the moment verify is called bio's iterator was advanced ··· 355 355 */ 356 356 bool __bio_integrity_endio(struct bio *bio) 357 357 { 358 - struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 358 + struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); 359 359 struct bio_integrity_payload *bip = bio_integrity(bio); 360 360 361 361 if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && ··· 381 381 void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) 382 382 { 383 383 struct bio_integrity_payload *bip = bio_integrity(bio); 384 - struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 384 + struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); 385 385 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); 386 386 387 387 bip->bip_iter.bi_sector += bytes_done >> 9; ··· 397 397 void bio_integrity_trim(struct bio *bio) 398 398 { 399 399 struct bio_integrity_payload *bip = bio_integrity(bio); 400 - struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 400 + struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); 401 401 402 402 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); 403 403 }
+11 -20
block/bio.c
··· 607 607 */ 608 608 void guard_bio_eod(struct bio *bio) 609 609 { 610 - sector_t maxsector; 611 - struct block_device *part; 612 - 613 - rcu_read_lock(); 614 - part = __disk_get_part(bio->bi_disk, bio->bi_partno); 615 - if (part) 616 - maxsector = bdev_nr_sectors(part); 617 - else 618 - maxsector = get_capacity(bio->bi_disk); 619 - rcu_read_unlock(); 610 + sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); 620 611 621 612 if (!maxsector) 622 613 return; ··· 667 676 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio)); 668 677 669 678 /* 670 - * most users will be overriding ->bi_disk with a new target, 679 + * most users will be overriding ->bi_bdev with a new target, 671 680 * so we don't set nor calculate new physical/hw segment counts here 672 681 */ 673 - bio->bi_disk = bio_src->bi_disk; 674 - bio->bi_partno = bio_src->bi_partno; 682 + bio->bi_bdev = bio_src->bi_bdev; 675 683 bio_set_flag(bio, BIO_CLONED); 676 684 if (bio_flagged(bio_src, BIO_THROTTLED)) 677 685 bio_set_flag(bio, BIO_THROTTLED); ··· 720 730 721 731 const char *bio_devname(struct bio *bio, char *buf) 722 732 { 723 - return disk_name(bio->bi_disk, bio->bi_partno, buf); 733 + return bdevname(bio->bi_bdev, buf); 724 734 } 725 735 EXPORT_SYMBOL(bio_devname); 726 736 ··· 1027 1037 { 1028 1038 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; 1029 1039 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; 1030 - struct request_queue *q = bio->bi_disk->queue; 1040 + struct request_queue *q = bio->bi_bdev->bd_disk->queue; 1031 1041 unsigned int max_append_sectors = queue_max_zone_append_sectors(q); 1032 1042 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; 1033 1043 struct page **pages = (struct page **)bv; ··· 1135 1145 */ 1136 1146 int submit_bio_wait(struct bio *bio) 1137 1147 { 1138 - DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map); 1148 + DECLARE_COMPLETION_ONSTACK_MAP(done, 1149 + bio->bi_bdev->bd_disk->lockdep_map); 1139 1150 unsigned long hang_check; 1140 1151 1141 1152 bio->bi_private = &done; ··· 1413 1422 if (!bio_integrity_endio(bio)) 1414 1423 return; 1415 1424 1416 - if (bio->bi_disk) 1417 - rq_qos_done_bio(bio->bi_disk->queue, bio); 1425 + if (bio->bi_bdev) 1426 + rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio); 1418 1427 1419 1428 /* 1420 1429 * Need to have a real endio function for chained bios, otherwise ··· 1429 1438 goto again; 1430 1439 } 1431 1440 1432 - if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) { 1433 - trace_block_bio_complete(bio->bi_disk->queue, bio); 1441 + if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { 1442 + trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio); 1434 1443 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 1435 1444 } 1436 1445
+4 -3
block/blk-cgroup.c
··· 1800 1800 struct blkcg_gq *blkg, *ret_blkg = NULL; 1801 1801 1802 1802 rcu_read_lock(); 1803 - blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_disk->queue); 1803 + blkg = blkg_lookup_create(css_to_blkcg(css), 1804 + bio->bi_bdev->bd_disk->queue); 1804 1805 while (blkg) { 1805 1806 if (blkg_tryget(blkg)) { 1806 1807 ret_blkg = blkg; ··· 1837 1836 if (css && css->parent) { 1838 1837 bio->bi_blkg = blkg_tryget_closest(bio, css); 1839 1838 } else { 1840 - blkg_get(bio->bi_disk->queue->root_blkg); 1841 - bio->bi_blkg = bio->bi_disk->queue->root_blkg; 1839 + blkg_get(bio->bi_bdev->bd_disk->queue->root_blkg); 1840 + bio->bi_blkg = bio->bi_bdev->bd_disk->queue->root_blkg; 1842 1841 } 1843 1842 } 1844 1843 EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
+17 -20
block/blk-core.c
··· 476 476 477 477 static inline int bio_queue_enter(struct bio *bio) 478 478 { 479 - struct request_queue *q = bio->bi_disk->queue; 479 + struct request_queue *q = bio->bi_bdev->bd_disk->queue; 480 480 bool nowait = bio->bi_opf & REQ_NOWAIT; 481 481 int ret; 482 482 ··· 712 712 713 713 static noinline int should_fail_bio(struct bio *bio) 714 714 { 715 - if (should_fail_request(bio->bi_disk->part0, bio->bi_iter.bi_size)) 715 + if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size)) 716 716 return -EIO; 717 717 return 0; 718 718 } ··· 741 741 */ 742 742 static inline int blk_partition_remap(struct bio *bio) 743 743 { 744 - struct block_device *p; 744 + struct block_device *p = bio->bi_bdev; 745 745 int ret = -EIO; 746 746 747 - rcu_read_lock(); 748 - p = __disk_get_part(bio->bi_disk, bio->bi_partno); 749 - if (unlikely(!p)) 750 - goto out; 751 747 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) 752 748 goto out; 753 749 if (unlikely(bio_check_ro(bio, p))) ··· 757 761 bio->bi_iter.bi_sector - 758 762 p->bd_start_sect); 759 763 } 760 - bio->bi_partno = 0; 764 + bio->bi_bdev = bdev_whole(p); 761 765 ret = 0; 762 766 out: 763 - rcu_read_unlock(); 764 767 return ret; 765 768 } 766 769 ··· 800 805 801 806 static noinline_for_stack bool submit_bio_checks(struct bio *bio) 802 807 { 803 - struct request_queue *q = bio->bi_disk->queue; 808 + struct block_device *bdev = bio->bi_bdev; 809 + struct request_queue *q = bdev->bd_disk->queue; 804 810 blk_status_t status = BLK_STS_IOERR; 805 811 struct blk_plug *plug; 806 812 ··· 821 825 if (should_fail_bio(bio)) 822 826 goto end_io; 823 827 824 - if (bio->bi_partno) { 828 + if (bio->bi_bdev->bd_partno) { 825 829 if (unlikely(blk_partition_remap(bio))) 826 830 goto end_io; 827 831 } else { 828 - if (unlikely(bio_check_ro(bio, bio->bi_disk->part0))) 832 + if (unlikely(bio_check_ro(bio, bdev_whole(bdev)))) 829 833 goto end_io; 830 - if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk)))) 834 + if (unlikely(bio_check_eod(bio, get_capacity(bdev->bd_disk)))) 831 835 goto end_io; 832 836 } 833 837 ··· 920 924 921 925 static blk_qc_t __submit_bio(struct bio *bio) 922 926 { 923 - struct gendisk *disk = bio->bi_disk; 927 + struct gendisk *disk = bio->bi_bdev->bd_disk; 924 928 blk_qc_t ret = BLK_QC_T_NONE; 925 929 926 930 if (blk_crypto_bio_prep(&bio)) { ··· 962 966 current->bio_list = bio_list_on_stack; 963 967 964 968 do { 965 - struct request_queue *q = bio->bi_disk->queue; 969 + struct request_queue *q = bio->bi_bdev->bd_disk->queue; 966 970 struct bio_list lower, same; 967 971 968 972 if (unlikely(bio_queue_enter(bio) != 0)) ··· 983 987 bio_list_init(&lower); 984 988 bio_list_init(&same); 985 989 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) 986 - if (q == bio->bi_disk->queue) 990 + if (q == bio->bi_bdev->bd_disk->queue) 987 991 bio_list_add(&same, bio); 988 992 else 989 993 bio_list_add(&lower, bio); ··· 1008 1012 current->bio_list = bio_list; 1009 1013 1010 1014 do { 1011 - struct gendisk *disk = bio->bi_disk; 1015 + struct gendisk *disk = bio->bi_bdev->bd_disk; 1012 1016 1013 1017 if (unlikely(bio_queue_enter(bio) != 0)) 1014 1018 continue; ··· 1051 1055 return BLK_QC_T_NONE; 1052 1056 } 1053 1057 1054 - if (!bio->bi_disk->fops->submit_bio) 1058 + if (!bio->bi_bdev->bd_disk->fops->submit_bio) 1055 1059 return __submit_bio_noacct_mq(bio); 1056 1060 return __submit_bio_noacct(bio); 1057 1061 } ··· 1063 1067 * 1064 1068 * submit_bio() is used to submit I/O requests to block devices. It is passed a 1065 1069 * fully set up &struct bio that describes the I/O that needs to be done. The 1066 - * bio will be send to the device described by the bi_disk and bi_partno fields. 1070 + * bio will be send to the device described by the bi_bdev field. 1067 1071 * 1068 1072 * The success/failure status of the request, along with notification of 1069 1073 * completion, is delivered asynchronously through the ->bi_end_io() callback ··· 1083 1087 unsigned int count; 1084 1088 1085 1089 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 1086 - count = queue_logical_block_size(bio->bi_disk->queue) >> 9; 1090 + count = queue_logical_block_size( 1091 + bio->bi_bdev->bd_disk->queue) >> 9; 1087 1092 else 1088 1093 count = bio_sectors(bio); 1089 1094
+1 -1
block/blk-crypto-fallback.c
··· 167 167 bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL); 168 168 if (!bio) 169 169 return NULL; 170 - bio->bi_disk = bio_src->bi_disk; 170 + bio->bi_bdev = bio_src->bi_bdev; 171 171 bio->bi_opf = bio_src->bi_opf; 172 172 bio->bi_ioprio = bio_src->bi_ioprio; 173 173 bio->bi_write_hint = bio_src->bi_write_hint;
+1 -1
block/blk-crypto.c
··· 280 280 * Success if device supports the encryption context, or if we succeeded 281 281 * in falling back to the crypto API. 282 282 */ 283 - if (blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm, 283 + if (blk_ksm_crypto_cfg_supported(bio->bi_bdev->bd_disk->queue->ksm, 284 284 &bc_key->crypto_cfg)) 285 285 return true; 286 286
+8 -9
block/blk-merge.c
··· 298 298 * Split a bio into two bios, chain the two bios, submit the second half and 299 299 * store a pointer to the first half in *@bio. If the second bio is still too 300 300 * big it will be split by a recursive call to this function. Since this 301 - * function may allocate a new bio from @bio->bi_disk->queue->bio_split, it is 302 - * the responsibility of the caller to ensure that 303 - * @bio->bi_disk->queue->bio_split is only released after processing of the 304 - * split bio has finished. 301 + * function may allocate a new bio from q->bio_split, it is the responsibility 302 + * of the caller to ensure that q->bio_split is only released after processing 303 + * of the split bio has finished. 305 304 */ 306 305 void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) 307 306 { 308 - struct request_queue *q = (*bio)->bi_disk->queue; 307 + struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue; 309 308 struct bio *split = NULL; 310 309 311 310 switch (bio_op(*bio)) { ··· 357 358 * 358 359 * Split a bio into two bios, chains the two bios, submit the second half and 359 360 * store a pointer to the first half in *@bio. Since this function may allocate 360 - * a new bio from @bio->bi_disk->queue->bio_split, it is the responsibility of 361 - * the caller to ensure that @bio->bi_disk->queue->bio_split is only released 362 - * after processing of the split bio has finished. 361 + * a new bio from q->bio_split, it is the responsibility of the caller to ensure 362 + * that q->bio_split is only released after processing of the split bio has 363 + * finished. 363 364 */ 364 365 void blk_queue_split(struct bio **bio) 365 366 { ··· 865 866 return false; 866 867 867 868 /* must be same device */ 868 - if (rq->rq_disk != bio->bi_disk) 869 + if (rq->rq_disk != bio->bi_bdev->bd_disk) 869 870 return false; 870 871 871 872 /* only merge integrity protected bio into ditto rq */
+1 -1
block/blk-mq.c
··· 2128 2128 */ 2129 2129 blk_qc_t blk_mq_submit_bio(struct bio *bio) 2130 2130 { 2131 - struct request_queue *q = bio->bi_disk->queue; 2131 + struct request_queue *q = bio->bi_bdev->bd_disk->queue; 2132 2132 const int is_sync = op_is_sync(bio->bi_opf); 2133 2133 const int is_flush_fua = op_is_flush(bio->bi_opf); 2134 2134 struct blk_mq_alloc_data data = {
+1 -1
block/blk-throttle.c
··· 2178 2178 2179 2179 bool blk_throtl_bio(struct bio *bio) 2180 2180 { 2181 - struct request_queue *q = bio->bi_disk->queue; 2181 + struct request_queue *q = bio->bi_bdev->bd_disk->queue; 2182 2182 struct blkcg_gq *blkg = bio->bi_blkg; 2183 2183 struct throtl_qnode *qn = NULL; 2184 2184 struct throtl_grp *tg = blkg_to_tg(blkg);
-2
block/blk.h
··· 202 202 __elevator_exit(q, e); 203 203 } 204 204 205 - struct block_device *__disk_get_part(struct gendisk *disk, int partno); 206 - 207 205 ssize_t part_size_show(struct device *dev, struct device_attribute *attr, 208 206 char *buf); 209 207 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
+1 -1
block/bounce.c
··· 246 246 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); 247 247 if (!bio) 248 248 return NULL; 249 - bio->bi_disk = bio_src->bi_disk; 249 + bio->bi_bdev = bio_src->bi_bdev; 250 250 bio->bi_opf = bio_src->bi_opf; 251 251 bio->bi_ioprio = bio_src->bi_ioprio; 252 252 bio->bi_write_hint = bio_src->bi_write_hint;
+1 -1
block/genhd.c
··· 161 161 inflight[1] = 0; 162 162 } 163 163 164 - struct block_device *__disk_get_part(struct gendisk *disk, int partno) 164 + static struct block_device *__disk_get_part(struct gendisk *disk, int partno) 165 165 { 166 166 struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl); 167 167
+1 -1
drivers/block/brd.c
··· 284 284 285 285 static blk_qc_t brd_submit_bio(struct bio *bio) 286 286 { 287 - struct brd_device *brd = bio->bi_disk->private_data; 287 + struct brd_device *brd = bio->bi_bdev->bd_disk->private_data; 288 288 sector_t sector = bio->bi_iter.bi_sector; 289 289 struct bio_vec bvec; 290 290 struct bvec_iter iter;
+2 -2
drivers/block/drbd/drbd_int.h
··· 1579 1579 int fault_type, struct bio *bio) 1580 1580 { 1581 1581 __release(local); 1582 - if (!bio->bi_disk) { 1583 - drbd_err(device, "drbd_submit_bio_noacct: bio->bi_disk == NULL\n"); 1582 + if (!bio->bi_bdev) { 1583 + drbd_err(device, "drbd_submit_bio_noacct: bio->bi_bdev == NULL\n"); 1584 1584 bio->bi_status = BLK_STS_IOERR; 1585 1585 bio_endio(bio); 1586 1586 return;
+1 -1
drivers/block/drbd/drbd_req.c
··· 1595 1595 1596 1596 blk_qc_t drbd_submit_bio(struct bio *bio) 1597 1597 { 1598 - struct drbd_device *device = bio->bi_disk->private_data; 1598 + struct drbd_device *device = bio->bi_bdev->bd_disk->private_data; 1599 1599 unsigned long start_jif; 1600 1600 1601 1601 blk_queue_split(&bio);
+1 -1
drivers/block/null_blk/main.c
··· 1420 1420 { 1421 1421 sector_t sector = bio->bi_iter.bi_sector; 1422 1422 sector_t nr_sectors = bio_sectors(bio); 1423 - struct nullb *nullb = bio->bi_disk->private_data; 1423 + struct nullb *nullb = bio->bi_bdev->bd_disk->private_data; 1424 1424 struct nullb_queue *nq = nullb_to_queue(nullb); 1425 1425 struct nullb_cmd *cmd; 1426 1426
+2 -2
drivers/block/pktcdvd.c
··· 2374 2374 2375 2375 blk_queue_split(&bio); 2376 2376 2377 - pd = bio->bi_disk->queue->queuedata; 2377 + pd = bio->bi_bdev->bd_disk->queue->queuedata; 2378 2378 if (!pd) { 2379 2379 pr_err("%s incorrect request queue\n", bio_devname(bio, b)); 2380 2380 goto end_io; ··· 2418 2418 split = bio; 2419 2419 } 2420 2420 2421 - pkt_make_request_write(bio->bi_disk->queue, split); 2421 + pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split); 2422 2422 } while (split != bio); 2423 2423 2424 2424 return BLK_QC_T_NONE;
+1 -1
drivers/block/ps3vram.c
··· 581 581 582 582 static blk_qc_t ps3vram_submit_bio(struct bio *bio) 583 583 { 584 - struct ps3_system_bus_device *dev = bio->bi_disk->private_data; 584 + struct ps3_system_bus_device *dev = bio->bi_bdev->bd_disk->private_data; 585 585 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); 586 586 int busy; 587 587
+1 -1
drivers/block/rsxx/dev.c
··· 122 122 123 123 static blk_qc_t rsxx_submit_bio(struct bio *bio) 124 124 { 125 - struct rsxx_cardinfo *card = bio->bi_disk->private_data; 125 + struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data; 126 126 struct rsxx_bio_meta *bio_meta; 127 127 blk_status_t st = BLK_STS_IOERR; 128 128
+1 -1
drivers/block/umem.c
··· 521 521 522 522 static blk_qc_t mm_submit_bio(struct bio *bio) 523 523 { 524 - struct cardinfo *card = bio->bi_disk->private_data; 524 + struct cardinfo *card = bio->bi_bdev->bd_disk->private_data; 525 525 526 526 pr_debug("mm_make_request %llu %u\n", 527 527 (unsigned long long)bio->bi_iter.bi_sector,
+1 -1
drivers/block/zram/zram_drv.c
··· 1596 1596 */ 1597 1597 static blk_qc_t zram_submit_bio(struct bio *bio) 1598 1598 { 1599 - struct zram *zram = bio->bi_disk->private_data; 1599 + struct zram *zram = bio->bi_bdev->bd_disk->private_data; 1600 1600 1601 1601 if (!valid_io_request(zram, bio->bi_iter.bi_sector, 1602 1602 bio->bi_iter.bi_size)) {
+1 -1
drivers/lightnvm/pblk-init.c
··· 49 49 50 50 static blk_qc_t pblk_submit_bio(struct bio *bio) 51 51 { 52 - struct pblk *pblk = bio->bi_disk->queue->queuedata; 52 + struct pblk *pblk = bio->bi_bdev->bd_disk->queue->queuedata; 53 53 54 54 if (bio_op(bio) == REQ_OP_DISCARD) { 55 55 pblk_discard(pblk, bio);
+1 -1
drivers/md/bcache/debug.c
··· 114 114 check = bio_kmalloc(GFP_NOIO, bio_segments(bio)); 115 115 if (!check) 116 116 return; 117 - check->bi_disk = bio->bi_disk; 117 + check->bi_bdev = bio->bi_bdev; 118 118 check->bi_opf = REQ_OP_READ; 119 119 check->bi_iter.bi_sector = bio->bi_iter.bi_sector; 120 120 check->bi_iter.bi_size = bio->bi_iter.bi_size;
+4 -3
drivers/md/bcache/request.c
··· 894 894 !(bio->bi_opf & (REQ_META|REQ_PRIO)) && 895 895 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) 896 896 reada = min_t(sector_t, dc->readahead >> 9, 897 - get_capacity(bio->bi_disk) - bio_end_sector(bio)); 897 + get_capacity(bio->bi_bdev->bd_disk) - 898 + bio_end_sector(bio)); 898 899 899 900 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); 900 901 ··· 1168 1167 blk_qc_t cached_dev_submit_bio(struct bio *bio) 1169 1168 { 1170 1169 struct search *s; 1171 - struct bcache_device *d = bio->bi_disk->private_data; 1170 + struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; 1172 1171 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 1173 1172 int rw = bio_data_dir(bio); 1174 1173 ··· 1275 1274 { 1276 1275 struct search *s; 1277 1276 struct closure *cl; 1278 - struct bcache_device *d = bio->bi_disk->private_data; 1277 + struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; 1279 1278 1280 1279 if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { 1281 1280 bio->bi_status = BLK_STS_IOERR;
+3 -6
drivers/md/dm-bio-record.h
··· 18 18 */ 19 19 20 20 struct dm_bio_details { 21 - struct gendisk *bi_disk; 22 - u8 bi_partno; 21 + struct block_device *bi_bdev; 23 22 int __bi_remaining; 24 23 unsigned long bi_flags; 25 24 struct bvec_iter bi_iter; ··· 30 31 31 32 static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) 32 33 { 33 - bd->bi_disk = bio->bi_disk; 34 - bd->bi_partno = bio->bi_partno; 34 + bd->bi_bdev = bio->bi_bdev; 35 35 bd->bi_flags = bio->bi_flags; 36 36 bd->bi_iter = bio->bi_iter; 37 37 bd->__bi_remaining = atomic_read(&bio->__bi_remaining); ··· 42 44 43 45 static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) 44 46 { 45 - bio->bi_disk = bd->bi_disk; 46 - bio->bi_partno = bd->bi_partno; 47 + bio->bi_bdev = bd->bi_bdev; 47 48 bio->bi_flags = bd->bi_flags; 48 49 bio->bi_iter = bd->bi_iter; 49 50 atomic_set(&bio->__bi_remaining, bd->__bi_remaining);
+5 -5
drivers/md/dm-raid1.c
··· 145 145 146 146 struct dm_raid1_bio_record { 147 147 struct mirror *m; 148 - /* if details->bi_disk == NULL, details were not saved */ 148 + /* if details->bi_bdev == NULL, details were not saved */ 149 149 struct dm_bio_details details; 150 150 region_t write_region; 151 151 }; ··· 1190 1190 struct dm_raid1_bio_record *bio_record = 1191 1191 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); 1192 1192 1193 - bio_record->details.bi_disk = NULL; 1193 + bio_record->details.bi_bdev = NULL; 1194 1194 1195 1195 if (rw == WRITE) { 1196 1196 /* Save region for mirror_end_io() handler */ ··· 1257 1257 goto out; 1258 1258 1259 1259 if (unlikely(*error)) { 1260 - if (!bio_record->details.bi_disk) { 1260 + if (!bio_record->details.bi_bdev) { 1261 1261 /* 1262 1262 * There wasn't enough memory to record necessary 1263 1263 * information for a retry or there was no other ··· 1282 1282 bd = &bio_record->details; 1283 1283 1284 1284 dm_bio_restore(bd, bio); 1285 - bio_record->details.bi_disk = NULL; 1285 + bio_record->details.bi_bdev = NULL; 1286 1286 bio->bi_status = 0; 1287 1287 1288 1288 queue_bio(ms, bio, rw); ··· 1292 1292 } 1293 1293 1294 1294 out: 1295 - bio_record->details.bi_disk = NULL; 1295 + bio_record->details.bi_bdev = NULL; 1296 1296 1297 1297 return DM_ENDIO_DONE; 1298 1298 }
+7 -7
drivers/md/dm.c
··· 977 977 struct mapped_device *md = tio->io->md; 978 978 dm_endio_fn endio = tio->ti->type->end_io; 979 979 struct bio *orig_bio = io->orig_bio; 980 + struct request_queue *q = bio->bi_bdev->bd_disk->queue; 980 981 981 982 if (unlikely(error == BLK_STS_TARGET)) { 982 983 if (bio_op(bio) == REQ_OP_DISCARD && 983 - !bio->bi_disk->queue->limits.max_discard_sectors) 984 + !q->limits.max_discard_sectors) 984 985 disable_discard(md); 985 986 else if (bio_op(bio) == REQ_OP_WRITE_SAME && 986 - !bio->bi_disk->queue->limits.max_write_same_sectors) 987 + !q->limits.max_write_same_sectors) 987 988 disable_write_same(md); 988 989 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 989 - !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 990 + !q->limits.max_write_zeroes_sectors) 990 991 disable_write_zeroes(md); 991 992 } 992 993 ··· 997 996 */ 998 997 if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) { 999 998 sector_t written_sector = bio->bi_iter.bi_sector; 1000 - struct request_queue *q = orig_bio->bi_disk->queue; 999 + struct request_queue *q = orig_bio->bi_bdev->bd_disk->queue; 1001 1000 u64 mask = (u64)blk_queue_zone_sectors(q) - 1; 1002 1001 1003 1002 orig_bio->bi_iter.bi_sector += written_sector & mask; ··· 1423 1422 */ 1424 1423 bio_init(&flush_bio, NULL, 0); 1425 1424 flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1426 - flush_bio.bi_disk = ci->io->md->disk; 1427 - bio_associate_blkg(&flush_bio); 1425 + bio_set_dev(&flush_bio, ci->io->md->disk->part0); 1428 1426 1429 1427 ci->bio = &flush_bio; 1430 1428 ci->sector_count = 0; ··· 1626 1626 1627 1627 static blk_qc_t dm_submit_bio(struct bio *bio) 1628 1628 { 1629 - struct mapped_device *md = bio->bi_disk->private_data; 1629 + struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; 1630 1630 blk_qc_t ret = BLK_QC_T_NONE; 1631 1631 int srcu_idx; 1632 1632 struct dm_table *map;
+1 -1
drivers/md/md-linear.c
··· 252 252 start_sector + data_offset; 253 253 254 254 if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 255 - !blk_queue_discard(bio->bi_disk->queue))) { 255 + !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) { 256 256 /* Just ignore it */ 257 257 bio_endio(bio); 258 258 } else {
+1 -1
drivers/md/md.c
··· 486 486 static blk_qc_t md_submit_bio(struct bio *bio) 487 487 { 488 488 const int rw = bio_data_dir(bio); 489 - struct mddev *mddev = bio->bi_disk->private_data; 489 + struct mddev *mddev = bio->bi_bdev->bd_disk->private_data; 490 490 491 491 if (mddev == NULL || mddev->pers == NULL) { 492 492 bio_io_error(bio);
+3 -3
drivers/md/md.h
··· 556 556 557 557 static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) 558 558 { 559 - atomic_add(nr_sectors, &bio->bi_disk->sync_io); 559 + md_sync_acct(bio->bi_bdev, nr_sectors); 560 560 } 561 561 562 562 struct md_personality ··· 793 793 static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio) 794 794 { 795 795 if (bio_op(bio) == REQ_OP_WRITE_SAME && 796 - !bio->bi_disk->queue->limits.max_write_same_sectors) 796 + !bio->bi_bdev->bd_disk->queue->limits.max_write_same_sectors) 797 797 mddev->queue->limits.max_write_same_sectors = 0; 798 798 } 799 799 800 800 static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio) 801 801 { 802 802 if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 803 - !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 803 + !bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors) 804 804 mddev->queue->limits.max_write_zeroes_sectors = 0; 805 805 } 806 806
+3 -3
drivers/md/raid1.c
··· 794 794 795 795 while (bio) { /* submit pending writes */ 796 796 struct bio *next = bio->bi_next; 797 - struct md_rdev *rdev = (void *)bio->bi_disk; 797 + struct md_rdev *rdev = (void *)bio->bi_bdev; 798 798 bio->bi_next = NULL; 799 799 bio_set_dev(bio, rdev->bdev); 800 800 if (test_bit(Faulty, &rdev->flags)) { 801 801 bio_io_error(bio); 802 802 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 803 - !blk_queue_discard(bio->bi_disk->queue))) 803 + !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) 804 804 /* Just ignore it */ 805 805 bio_endio(bio); 806 806 else ··· 1520 1520 trace_block_bio_remap(mbio, disk_devt(mddev->gendisk), 1521 1521 r1_bio->sector); 1522 1522 /* flush_pending_writes() needs access to the rdev so...*/ 1523 - mbio->bi_disk = (void *)conf->mirrors[i].rdev; 1523 + mbio->bi_bdev = (void *)conf->mirrors[i].rdev; 1524 1524 1525 1525 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); 1526 1526 if (cb)
+6 -6
drivers/md/raid10.c
··· 882 882 883 883 while (bio) { /* submit pending writes */ 884 884 struct bio *next = bio->bi_next; 885 - struct md_rdev *rdev = (void*)bio->bi_disk; 885 + struct md_rdev *rdev = (void*)bio->bi_bdev; 886 886 bio->bi_next = NULL; 887 887 bio_set_dev(bio, rdev->bdev); 888 888 if (test_bit(Faulty, &rdev->flags)) { 889 889 bio_io_error(bio); 890 890 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 891 - !blk_queue_discard(bio->bi_disk->queue))) 891 + !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) 892 892 /* Just ignore it */ 893 893 bio_endio(bio); 894 894 else ··· 1075 1075 1076 1076 while (bio) { /* submit pending writes */ 1077 1077 struct bio *next = bio->bi_next; 1078 - struct md_rdev *rdev = (void*)bio->bi_disk; 1078 + struct md_rdev *rdev = (void*)bio->bi_bdev; 1079 1079 bio->bi_next = NULL; 1080 1080 bio_set_dev(bio, rdev->bdev); 1081 1081 if (test_bit(Faulty, &rdev->flags)) { 1082 1082 bio_io_error(bio); 1083 1083 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 1084 - !blk_queue_discard(bio->bi_disk->queue))) 1084 + !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) 1085 1085 /* Just ignore it */ 1086 1086 bio_endio(bio); 1087 1087 else ··· 1253 1253 trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk), 1254 1254 r10_bio->sector); 1255 1255 /* flush_pending_writes() needs access to the rdev so...*/ 1256 - mbio->bi_disk = (void *)rdev; 1256 + mbio->bi_bdev = (void *)rdev; 1257 1257 1258 1258 atomic_inc(&r10_bio->remaining); 1259 1259 ··· 3003 3003 3004 3004 /* Again, very different code for resync and recovery. 3005 3005 * Both must result in an r10bio with a list of bios that 3006 - * have bi_end_io, bi_sector, bi_disk set, 3006 + * have bi_end_io, bi_sector, bi_bdev set, 3007 3007 * and bi_private set to the r10bio. 3008 3008 * For recovery, we may actually create several r10bios 3009 3009 * with 2 bios in each, that correspond to the bios in the main one.
+1 -1
drivers/md/raid5.c
··· 5310 5310 unsigned int chunk_sectors; 5311 5311 unsigned int bio_sectors = bio_sectors(bio); 5312 5312 5313 - WARN_ON_ONCE(bio->bi_partno); 5313 + WARN_ON_ONCE(bio->bi_bdev->bd_partno); 5314 5314 5315 5315 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); 5316 5316 return chunk_sectors >=
+2 -2
drivers/nvdimm/blk.c
··· 165 165 static blk_qc_t nd_blk_submit_bio(struct bio *bio) 166 166 { 167 167 struct bio_integrity_payload *bip; 168 - struct nd_namespace_blk *nsblk = bio->bi_disk->private_data; 168 + struct nd_namespace_blk *nsblk = bio->bi_bdev->bd_disk->private_data; 169 169 struct bvec_iter iter; 170 170 unsigned long start; 171 171 struct bio_vec bvec; ··· 177 177 178 178 bip = bio_integrity(bio); 179 179 rw = bio_data_dir(bio); 180 - do_acct = blk_queue_io_stat(bio->bi_disk->queue); 180 + do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue); 181 181 if (do_acct) 182 182 start = bio_start_io_acct(bio); 183 183 bio_for_each_segment(bvec, bio, iter) {
+2 -2
drivers/nvdimm/btt.c
··· 1442 1442 static blk_qc_t btt_submit_bio(struct bio *bio) 1443 1443 { 1444 1444 struct bio_integrity_payload *bip = bio_integrity(bio); 1445 - struct btt *btt = bio->bi_disk->private_data; 1445 + struct btt *btt = bio->bi_bdev->bd_disk->private_data; 1446 1446 struct bvec_iter iter; 1447 1447 unsigned long start; 1448 1448 struct bio_vec bvec; ··· 1452 1452 if (!bio_integrity_prep(bio)) 1453 1453 return BLK_QC_T_NONE; 1454 1454 1455 - do_acct = blk_queue_io_stat(bio->bi_disk->queue); 1455 + do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue); 1456 1456 if (do_acct) 1457 1457 start = bio_start_io_acct(bio); 1458 1458 bio_for_each_segment(bvec, bio, iter) {
+2 -2
drivers/nvdimm/pmem.c
··· 197 197 unsigned long start; 198 198 struct bio_vec bvec; 199 199 struct bvec_iter iter; 200 - struct pmem_device *pmem = bio->bi_disk->private_data; 200 + struct pmem_device *pmem = bio->bi_bdev->bd_disk->private_data; 201 201 struct nd_region *nd_region = to_region(pmem); 202 202 203 203 if (bio->bi_opf & REQ_PREFLUSH) 204 204 ret = nvdimm_flush(nd_region, bio); 205 205 206 - do_acct = blk_queue_io_stat(bio->bi_disk->queue); 206 + do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue); 207 207 if (do_acct) 208 208 start = bio_start_io_acct(bio); 209 209 bio_for_each_segment(bvec, bio, iter) {
+3 -3
drivers/nvme/host/core.c
··· 1113 1113 { 1114 1114 bool write = nvme_is_write(cmd); 1115 1115 struct nvme_ns *ns = q->queuedata; 1116 - struct gendisk *disk = ns ? ns->disk : NULL; 1116 + struct block_device *bdev = ns ? ns->disk->part0 : NULL; 1117 1117 struct request *req; 1118 1118 struct bio *bio = NULL; 1119 1119 void *meta = NULL; ··· 1133 1133 if (ret) 1134 1134 goto out; 1135 1135 bio = req->bio; 1136 - bio->bi_disk = disk; 1137 - if (disk && meta_buffer && meta_len) { 1136 + bio->bi_bdev = bdev; 1137 + if (bdev && meta_buffer && meta_len) { 1138 1138 meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, 1139 1139 meta_seed, write); 1140 1140 if (IS_ERR(meta)) {
+1 -2
drivers/nvme/host/lightnvm.c
··· 757 757 { 758 758 bool write = nvme_is_write((struct nvme_command *)vcmd); 759 759 struct nvm_dev *dev = ns->ndev; 760 - struct gendisk *disk = ns->disk; 761 760 struct request *rq; 762 761 struct bio *bio = NULL; 763 762 __le64 *ppa_list = NULL; ··· 816 817 vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma); 817 818 } 818 819 819 - bio->bi_disk = disk; 820 + bio->bi_bdev = ns->disk->part0; 820 821 } 821 822 822 823 blk_execute_rq(q, NULL, rq, 0);
+3 -3
drivers/nvme/host/multipath.c
··· 296 296 297 297 blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) 298 298 { 299 - struct nvme_ns_head *head = bio->bi_disk->private_data; 299 + struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data; 300 300 struct device *dev = disk_to_dev(head->disk); 301 301 struct nvme_ns *ns; 302 302 blk_qc_t ret = BLK_QC_T_NONE; ··· 312 312 srcu_idx = srcu_read_lock(&head->srcu); 313 313 ns = nvme_find_path(head); 314 314 if (likely(ns)) { 315 - bio->bi_disk = ns->disk; 315 + bio->bi_bdev = ns->disk->part0; 316 316 bio->bi_opf |= REQ_NVME_MPATH; 317 317 trace_block_bio_remap(bio, disk_devt(ns->head->disk), 318 318 bio->bi_iter.bi_sector); ··· 352 352 * Reset disk to the mpath node and resubmit to select a new 353 353 * path. 354 354 */ 355 - bio->bi_disk = head->disk; 355 + bio->bi_bdev = head->disk->part0; 356 356 submit_bio_noacct(bio); 357 357 } 358 358 }
+1 -1
drivers/nvme/host/rdma.c
··· 1468 1468 if (unlikely(nr)) 1469 1469 goto mr_put; 1470 1470 1471 - nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_disk), c, 1471 + nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c, 1472 1472 req->mr->sig_attrs, ns->pi_type); 1473 1473 nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask); 1474 1474
+1 -1
drivers/s390/block/dcssblk.c
··· 879 879 blk_queue_split(&bio); 880 880 881 881 bytes_done = 0; 882 - dev_info = bio->bi_disk->private_data; 882 + dev_info = bio->bi_bdev->bd_disk->private_data; 883 883 if (dev_info == NULL) 884 884 goto fail; 885 885 if ((bio->bi_iter.bi_sector & 7) != 0 ||
+1 -1
drivers/s390/block/xpram.c
··· 184 184 */ 185 185 static blk_qc_t xpram_submit_bio(struct bio *bio) 186 186 { 187 - xpram_device_t *xdev = bio->bi_disk->private_data; 187 + xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; 188 188 struct bio_vec bvec; 189 189 struct bvec_iter iter; 190 190 unsigned int index;
+5 -5
fs/btrfs/check-integrity.c
··· 2674 2674 mutex_lock(&btrfsic_mutex); 2675 2675 /* since btrfsic_submit_bio() is also called before 2676 2676 * btrfsic_mount(), this might return NULL */ 2677 - dev_state = btrfsic_dev_state_lookup(bio_dev(bio) + bio->bi_partno); 2677 + dev_state = btrfsic_dev_state_lookup(bio->bi_bdev->bd_dev); 2678 2678 if (NULL != dev_state && 2679 2679 (bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) { 2680 2680 unsigned int i = 0; ··· 2690 2690 bio_is_patched = 0; 2691 2691 if (dev_state->state->print_mask & 2692 2692 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2693 - pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_disk=%p)\n", 2693 + pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", 2694 2694 bio_op(bio), bio->bi_opf, segs, 2695 - bio->bi_iter.bi_sector, dev_bytenr, bio->bi_disk); 2695 + bio->bi_iter.bi_sector, dev_bytenr, bio->bi_bdev); 2696 2696 2697 2697 mapped_datav = kmalloc_array(segs, 2698 2698 sizeof(*mapped_datav), GFP_NOFS); ··· 2721 2721 } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) { 2722 2722 if (dev_state->state->print_mask & 2723 2723 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2724 - pr_info("submit_bio(rw=%d,0x%x FLUSH, disk=%p)\n", 2725 - bio_op(bio), bio->bi_opf, bio->bi_disk); 2724 + pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n", 2725 + bio_op(bio), bio->bi_opf, bio->bi_bdev); 2726 2726 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { 2727 2727 if ((dev_state->state->print_mask & 2728 2728 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
+2 -5
fs/btrfs/raid56.c
··· 1105 1105 * devices or if they are not contiguous 1106 1106 */ 1107 1107 if (last_end == disk_start && !last->bi_status && 1108 - last->bi_disk == stripe->dev->bdev->bd_disk && 1109 - last->bi_partno == stripe->dev->bdev->bd_partno) { 1108 + last->bi_bdev == stripe->dev->bdev) { 1110 1109 ret = bio_add_page(last, page, PAGE_SIZE, 0); 1111 1110 if (ret == PAGE_SIZE) 1112 1111 return 0; ··· 1356 1357 for (i = 0; i < rbio->bbio->num_stripes; i++) { 1357 1358 stripe = &rbio->bbio->stripes[i]; 1358 1359 if (in_range(physical, stripe->physical, rbio->stripe_len) && 1359 - stripe->dev->bdev && 1360 - bio->bi_disk == stripe->dev->bdev->bd_disk && 1361 - bio->bi_partno == stripe->dev->bdev->bd_partno) { 1360 + stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) { 1362 1361 return i; 1363 1362 } 1364 1363 }
+1 -1
fs/btrfs/scrub.c
··· 1695 1695 1696 1696 sbio = sctx->wr_curr_bio; 1697 1697 sctx->wr_curr_bio = NULL; 1698 - WARN_ON(!sbio->bio->bi_disk); 1698 + WARN_ON(!sbio->bio->bi_bdev); 1699 1699 scrub_pending_bio_inc(sctx); 1700 1700 /* process all writes in a single worker thread. Then the block layer 1701 1701 * orders the requests before sending them to the driver which
+1 -1
fs/direct-io.c
··· 434 434 if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) 435 435 bio_set_pages_dirty(bio); 436 436 437 - dio->bio_disk = bio->bi_disk; 437 + dio->bio_disk = bio->bi_bdev->bd_disk; 438 438 439 439 if (sdio->submit_io) { 440 440 sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
+1 -11
fs/f2fs/data.c
··· 427 427 return 0; 428 428 } 429 429 430 - /* 431 - * Return true, if pre_bio's bdev is same as its target device. 432 - */ 433 - static bool __same_bdev(struct f2fs_sb_info *sbi, 434 - block_t blk_addr, struct bio *bio) 435 - { 436 - struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL); 437 - return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno; 438 - } 439 - 440 430 static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) 441 431 { 442 432 struct f2fs_sb_info *sbi = fio->sbi; ··· 731 741 return false; 732 742 if (last_blkaddr + 1 != cur_blkaddr) 733 743 return false; 734 - return __same_bdev(sbi, cur_blkaddr, bio); 744 + return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL); 735 745 } 736 746 737 747 static bool io_type_is_mergeable(struct f2fs_bio_info *io,
+8 -10
include/linux/bio.h
··· 483 483 extern unsigned int bvec_nr_vecs(unsigned short idx); 484 484 extern const char *bio_devname(struct bio *bio, char *buffer); 485 485 486 - #define bio_set_dev(bio, bdev) \ 487 - do { \ 488 - if ((bio)->bi_disk != (bdev)->bd_disk) \ 489 - bio_clear_flag(bio, BIO_THROTTLED);\ 490 - (bio)->bi_disk = (bdev)->bd_disk; \ 491 - (bio)->bi_partno = (bdev)->bd_partno; \ 492 - bio_associate_blkg(bio); \ 486 + #define bio_set_dev(bio, bdev) \ 487 + do { \ 488 + if ((bio)->bi_bdev != (bdev)) \ 489 + bio_clear_flag(bio, BIO_THROTTLED); \ 490 + (bio)->bi_bdev = (bdev); \ 491 + bio_associate_blkg(bio); \ 493 492 } while (0) 494 493 495 494 #define bio_copy_dev(dst, src) \ 496 495 do { \ 497 - (dst)->bi_disk = (src)->bi_disk; \ 498 - (dst)->bi_partno = (src)->bi_partno; \ 496 + (dst)->bi_bdev = (src)->bi_bdev; \ 499 497 bio_clone_blkg_association(dst, src); \ 500 498 } while (0) 501 499 502 500 #define bio_dev(bio) \ 503 - disk_devt((bio)->bi_disk) 501 + disk_devt((bio)->bi_bdev->bd_disk) 504 502 505 503 #ifdef CONFIG_BLK_CGROUP 506 504 void bio_associate_blkg(struct bio *bio);
+2 -2
include/linux/blk-mq.h
··· 602 602 rq->bio = rq->biotail = bio; 603 603 rq->ioprio = bio_prio(bio); 604 604 605 - if (bio->bi_disk) 606 - rq->rq_disk = bio->bi_disk; 605 + if (bio->bi_bdev) 606 + rq->rq_disk = bio->bi_bdev->bd_disk; 607 607 } 608 608 609 609 blk_qc_t blk_mq_submit_bio(struct bio *bio);
+1 -2
include/linux/blk_types.h
··· 222 222 */ 223 223 struct bio { 224 224 struct bio *bi_next; /* request queue link */ 225 - struct gendisk *bi_disk; 225 + struct block_device *bi_bdev; 226 226 unsigned int bi_opf; /* bottom bits req flags, 227 227 * top bits REQ_OP. Use 228 228 * accessors. ··· 231 231 unsigned short bi_ioprio; 232 232 unsigned short bi_write_hint; 233 233 blk_status_t bi_status; 234 - u8 bi_partno; 235 234 atomic_t __bi_remaining; 236 235 237 236 struct bvec_iter bi_iter;
+3 -2
include/linux/blkdev.h
··· 1967 1967 */ 1968 1968 static inline unsigned long bio_start_io_acct(struct bio *bio) 1969 1969 { 1970 - return disk_start_io_acct(bio->bi_disk, bio_sectors(bio), bio_op(bio)); 1970 + return disk_start_io_acct(bio->bi_bdev->bd_disk, bio_sectors(bio), 1971 + bio_op(bio)); 1971 1972 } 1972 1973 1973 1974 /** ··· 1978 1977 */ 1979 1978 static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) 1980 1979 { 1981 - return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time); 1980 + return disk_end_io_acct(bio->bi_bdev->bd_disk, bio_op(bio), start_time); 1982 1981 } 1983 1982 1984 1983 int bdev_read_only(struct block_device *bdev);
+9 -7
kernel/trace/blktrace.c
··· 903 903 904 904 static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio) 905 905 { 906 - blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_BOUNCE, 0); 906 + blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0); 907 907 } 908 908 909 909 static void blk_add_trace_bio_complete(void *ignore, ··· 915 915 916 916 static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio) 917 917 { 918 - blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_BACKMERGE, 0); 918 + blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE, 919 + 0); 919 920 } 920 921 921 922 static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio) 922 923 { 923 - blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_FRONTMERGE, 0); 924 + blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE, 925 + 0); 924 926 } 925 927 926 928 static void blk_add_trace_bio_queue(void *ignore, struct bio *bio) 927 929 { 928 - blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_QUEUE, 0); 930 + blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0); 929 931 } 930 932 931 933 static void blk_add_trace_getrq(void *ignore, struct bio *bio) 932 934 { 933 - blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_GETRQ, 0); 935 + blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0); 934 936 } 935 937 936 938 static void blk_add_trace_plug(void *ignore, struct request_queue *q) ··· 969 967 970 968 static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu) 971 969 { 972 - struct request_queue *q = bio->bi_disk->queue; 970 + struct request_queue *q = bio->bi_bdev->bd_disk->queue; 973 971 struct blk_trace *bt; 974 972 975 973 rcu_read_lock(); ··· 999 997 static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev, 1000 998 sector_t from) 1001 999 { 1002 - struct request_queue *q = bio->bi_disk->queue; 1000 + struct request_queue *q = bio->bi_bdev->bd_disk->queue; 1003 1001 struct blk_trace *bt; 1004 1002 struct blk_io_trace_remap r; 1005 1003
+1 -1
mm/page_io.c
··· 433 433 ret = -ENOMEM; 434 434 goto out; 435 435 } 436 - disk = bio->bi_disk; 436 + disk = bio->bi_bdev->bd_disk; 437 437 /* 438 438 * Keep this task valid during swap readpage because the oom killer may 439 439 * attempt to access it in the page fault retry time check.