Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: move ->make_request_fn to struct block_device_operations

The make_request_fn is a little weird in that it sits directly in
struct request_queue instead of an operation vector. Replace it with
a block_device_operations method called submit_bio (which describes much
better what it does). Also remove the request_queue argument to it, as
the queue can be derived pretty trivially from the bio.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
c62b37d9 e439ab71

+153 -140
+1 -1
Documentation/block/biodoc.rst
··· 1036 1036 provides drivers with a sector number relative to whole device, rather than 1037 1037 having to take partition number into account in order to arrive at the true 1038 1038 sector number. The routine blk_partition_remap() is invoked by 1039 - generic_make_request even before invoking the queue specific make_request_fn, 1039 + generic_make_request even before invoking the queue specific ->submit_bio, 1040 1040 so the i/o scheduler also gets to operate on whole disk sector numbers. This 1041 1041 should typically not require changes to block drivers, it just never gets 1042 1042 to invoke its own partition sector offset calculations since all bios
+1 -1
Documentation/block/writeback_cache_control.rst
··· 47 47 may both be set on a single bio. 48 48 49 49 50 - Implementation details for make_request_fn based block drivers 50 + Implementation details for bio based block drivers 51 51 -------------------------------------------------------------- 52 52 53 53 These drivers will always see the REQ_PREFLUSH and REQ_FUA bits as they sit
+3 -2
arch/m68k/emu/nfblock.c
··· 59 59 struct gendisk *disk; 60 60 }; 61 61 62 - static blk_qc_t nfhd_make_request(struct request_queue *queue, struct bio *bio) 62 + static blk_qc_t nfhd_submit_bio(struct bio *bio) 63 63 { 64 64 struct nfhd_device *dev = bio->bi_disk->private_data; 65 65 struct bio_vec bvec; ··· 93 93 94 94 static const struct block_device_operations nfhd_ops = { 95 95 .owner = THIS_MODULE, 96 + .submit_bio = nfhd_submit_bio, 96 97 .getgeo = nfhd_getgeo, 97 98 }; 98 99 ··· 119 118 dev->bsize = bsize; 120 119 dev->bshift = ffs(bsize) - 10; 121 120 122 - dev->queue = blk_alloc_queue(nfhd_make_request, NUMA_NO_NODE); 121 + dev->queue = blk_alloc_queue(NUMA_NO_NODE); 123 122 if (dev->queue == NULL) 124 123 goto free_dev; 125 124
+3 -2
arch/xtensa/platforms/iss/simdisk.c
··· 101 101 spin_unlock(&dev->lock); 102 102 } 103 103 104 - static blk_qc_t simdisk_make_request(struct request_queue *q, struct bio *bio) 104 + static blk_qc_t simdisk_submit_bio(struct bio *bio) 105 105 { 106 106 struct simdisk *dev = bio->bi_disk->private_data; 107 107 struct bio_vec bvec; ··· 144 144 145 145 static const struct block_device_operations simdisk_ops = { 146 146 .owner = THIS_MODULE, 147 + .submit_bio = simdisk_submit_bio, 147 148 .open = simdisk_open, 148 149 .release = simdisk_release, 149 150 }; ··· 268 267 spin_lock_init(&dev->lock); 269 268 dev->users = 0; 270 269 271 - dev->queue = blk_alloc_queue(simdisk_make_request, NUMA_NO_NODE); 270 + dev->queue = blk_alloc_queue(NUMA_NO_NODE); 272 271 if (dev->queue == NULL) { 273 272 pr_err("blk_alloc_queue failed\n"); 274 273 goto out_alloc_queue;
+1 -1
block/blk-cgroup.c
··· 1012 1012 * blkcg_init_queue - initialize blkcg part of request queue 1013 1013 * @q: request_queue to initialize 1014 1014 * 1015 - * Called from __blk_alloc_queue(). Responsible for initializing blkcg 1015 + * Called from blk_alloc_queue(). Responsible for initializing blkcg 1016 1016 * part of new request_queue @q. 1017 1017 * 1018 1018 * RETURNS:
+19 -34
block/blk-core.c
··· 283 283 * A block device may call blk_sync_queue to ensure that any 284 284 * such activity is cancelled, thus allowing it to release resources 285 285 * that the callbacks might use. The caller must already have made sure 286 - * that its ->make_request_fn will not re-add plugging prior to calling 286 + * that its ->submit_bio will not re-add plugging prior to calling 287 287 * this function. 288 288 * 289 289 * This function does not cancel any asynchronous activity arising ··· 510 510 { 511 511 } 512 512 513 - struct request_queue *__blk_alloc_queue(int node_id) 513 + struct request_queue *blk_alloc_queue(int node_id) 514 514 { 515 515 struct request_queue *q; 516 516 int ret; ··· 575 575 576 576 blk_queue_dma_alignment(q, 511); 577 577 blk_set_default_limits(&q->limits); 578 + q->nr_requests = BLKDEV_MAX_RQ; 578 579 579 580 return q; 580 581 ··· 592 591 fail_q: 593 592 kmem_cache_free(blk_requestq_cachep, q); 594 593 return NULL; 595 - } 596 - 597 - struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id) 598 - { 599 - struct request_queue *q; 600 - 601 - if (WARN_ON_ONCE(!make_request)) 602 - return NULL; 603 - 604 - q = __blk_alloc_queue(node_id); 605 - if (!q) 606 - return NULL; 607 - q->make_request_fn = make_request; 608 - q->nr_requests = BLKDEV_MAX_RQ; 609 - return q; 610 594 } 611 595 EXPORT_SYMBOL(blk_alloc_queue); 612 596 ··· 1074 1088 1075 1089 static blk_qc_t do_make_request(struct bio *bio) 1076 1090 { 1077 - struct request_queue *q = bio->bi_disk->queue; 1091 + struct gendisk *disk = bio->bi_disk; 1078 1092 blk_qc_t ret = BLK_QC_T_NONE; 1079 1093 1080 1094 if (blk_crypto_bio_prep(&bio)) { 1081 - if (!q->make_request_fn) 1082 - return blk_mq_make_request(q, bio); 1083 - ret = q->make_request_fn(q, bio); 1095 + if (!disk->fops->submit_bio) 1096 + return blk_mq_submit_bio(bio); 1097 + ret = disk->fops->submit_bio(bio); 1084 1098 } 1085 - blk_queue_exit(q); 1099 + blk_queue_exit(disk->queue); 1086 1100 return ret; 1087 1101 } 1088 1102 ··· 1099 1113 { 1100 1114 /* 1101 1115 * bio_list_on_stack[0] contains bios submitted by the current 1102 - * make_request_fn. 1103 - * bio_list_on_stack[1] contains bios that were submitted before 1104 - * the current make_request_fn, but that haven't been processed 1105 - * yet. 1116 + * ->submit_bio. 1117 + * bio_list_on_stack[1] contains bios that were submitted before the 1118 + * current ->submit_bio_bio, but that haven't been processed yet. 1106 1119 */ 1107 1120 struct bio_list bio_list_on_stack[2]; 1108 1121 blk_qc_t ret = BLK_QC_T_NONE; ··· 1110 1125 goto out; 1111 1126 1112 1127 /* 1113 - * We only want one ->make_request_fn to be active at a time, else 1128 + * We only want one ->submit_bio to be active at a time, else 1114 1129 * stack usage with stacked devices could be a problem. So use 1115 1130 * current->bio_list to keep a list of requests submited by a 1116 - * make_request_fn function. current->bio_list is also used as a 1131 + * ->submit_bio method. current->bio_list is also used as a 1117 1132 * flag to say if generic_make_request is currently active in this 1118 1133 * task or not. If it is NULL, then no make_request is active. If 1119 1134 * it is non-NULL, then a make_request is active, and new requests ··· 1131 1146 * We pretend that we have just taken it off a longer list, so 1132 1147 * we assign bio_list to a pointer to the bio_list_on_stack, 1133 1148 * thus initialising the bio_list of new bios to be 1134 - * added. ->make_request() may indeed add some more bios 1149 + * added. ->submit_bio() may indeed add some more bios 1135 1150 * through a recursive call to generic_make_request. If it 1136 1151 * did, we find a non-NULL value in bio_list and re-enter the loop 1137 1152 * from the top. In this case we really did just take the bio 1138 1153 * of the top of the list (no pretending) and so remove it from 1139 - * bio_list, and call into ->make_request() again. 1154 + * bio_list, and call into ->submit_bio() again. 1140 1155 */ 1141 1156 BUG_ON(bio->bi_next); 1142 1157 bio_list_init(&bio_list_on_stack[0]); ··· 1186 1201 */ 1187 1202 blk_qc_t direct_make_request(struct bio *bio) 1188 1203 { 1189 - struct request_queue *q = bio->bi_disk->queue; 1204 + struct gendisk *disk = bio->bi_disk; 1190 1205 1191 - if (WARN_ON_ONCE(q->make_request_fn)) { 1206 + if (WARN_ON_ONCE(!disk->queue->mq_ops)) { 1192 1207 bio_io_error(bio); 1193 1208 return BLK_QC_T_NONE; 1194 1209 } ··· 1197 1212 if (unlikely(bio_queue_enter(bio))) 1198 1213 return BLK_QC_T_NONE; 1199 1214 if (!blk_crypto_bio_prep(&bio)) { 1200 - blk_queue_exit(q); 1215 + blk_queue_exit(disk->queue); 1201 1216 return BLK_QC_T_NONE; 1202 1217 } 1203 - return blk_mq_make_request(q, bio); 1218 + return blk_mq_submit_bio(bio); 1204 1219 } 1205 1220 EXPORT_SYMBOL_GPL(direct_make_request); 1206 1221
+5 -5
block/blk-mq.c
··· 2136 2136 } 2137 2137 2138 2138 /** 2139 - * blk_mq_make_request - Create and send a request to block device. 2140 - * @q: Request queue pointer. 2139 + * blk_mq_submit_bio - Create and send a request to block device. 2141 2140 * @bio: Bio pointer. 2142 2141 * 2143 2142 * Builds up a request structure from @q and @bio and send to the device. The ··· 2150 2151 * 2151 2152 * Returns: Request queue cookie. 2152 2153 */ 2153 - blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) 2154 + blk_qc_t blk_mq_submit_bio(struct bio *bio) 2154 2155 { 2156 + struct request_queue *q = bio->bi_disk->queue; 2155 2157 const int is_sync = op_is_sync(bio->bi_opf); 2156 2158 const int is_flush_fua = op_is_flush(bio->bi_opf); 2157 2159 struct blk_mq_alloc_data data = { ··· 2277 2277 blk_queue_exit(q); 2278 2278 return BLK_QC_T_NONE; 2279 2279 } 2280 - EXPORT_SYMBOL_GPL(blk_mq_make_request); /* only for request based dm */ 2280 + EXPORT_SYMBOL_GPL(blk_mq_submit_bio); /* only for request based dm */ 2281 2281 2282 2282 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 2283 2283 unsigned int hctx_idx) ··· 3017 3017 { 3018 3018 struct request_queue *uninit_q, *q; 3019 3019 3020 - uninit_q = __blk_alloc_queue(set->numa_node); 3020 + uninit_q = blk_alloc_queue(set->numa_node); 3021 3021 if (!uninit_q) 3022 3022 return ERR_PTR(-ENOMEM); 3023 3023 uninit_q->queuedata = queuedata;
-2
block/blk.h
··· 419 419 #endif 420 420 } 421 421 422 - struct request_queue *__blk_alloc_queue(int node_id); 423 - 424 422 int bio_add_hw_page(struct request_queue *q, struct bio *bio, 425 423 struct page *page, unsigned int len, unsigned int offset, 426 424 unsigned int max_sectors, bool *same_page);
+3 -2
drivers/block/brd.c
··· 282 282 return err; 283 283 } 284 284 285 - static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) 285 + static blk_qc_t brd_submit_bio(struct bio *bio) 286 286 { 287 287 struct brd_device *brd = bio->bi_disk->private_data; 288 288 struct bio_vec bvec; ··· 330 330 331 331 static const struct block_device_operations brd_fops = { 332 332 .owner = THIS_MODULE, 333 + .submit_bio = brd_submit_bio, 333 334 .rw_page = brd_rw_page, 334 335 }; 335 336 ··· 382 381 spin_lock_init(&brd->brd_lock); 383 382 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); 384 383 385 - brd->brd_queue = blk_alloc_queue(brd_make_request, NUMA_NO_NODE); 384 + brd->brd_queue = blk_alloc_queue(NUMA_NO_NODE); 386 385 if (!brd->brd_queue) 387 386 goto out_free_dev; 388 387
+1 -1
drivers/block/drbd/drbd_int.h
··· 1451 1451 /* drbd_req */ 1452 1452 extern void do_submit(struct work_struct *ws); 1453 1453 extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long); 1454 - extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio); 1454 + extern blk_qc_t drbd_submit_bio(struct bio *bio); 1455 1455 extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req); 1456 1456 extern int is_valid_ar_handle(struct drbd_request *, sector_t); 1457 1457
+5 -4
drivers/block/drbd/drbd_main.c
··· 132 132 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5); 133 133 134 134 static const struct block_device_operations drbd_ops = { 135 - .owner = THIS_MODULE, 136 - .open = drbd_open, 137 - .release = drbd_release, 135 + .owner = THIS_MODULE, 136 + .submit_bio = drbd_submit_bio, 137 + .open = drbd_open, 138 + .release = drbd_release, 138 139 }; 139 140 140 141 struct bio *bio_alloc_drbd(gfp_t gfp_mask) ··· 2802 2801 2803 2802 drbd_init_set_defaults(device); 2804 2803 2805 - q = blk_alloc_queue(drbd_make_request, NUMA_NO_NODE); 2804 + q = blk_alloc_queue(NUMA_NO_NODE); 2806 2805 if (!q) 2807 2806 goto out_no_q; 2808 2807 device->rq_queue = q;
+1 -1
drivers/block/drbd/drbd_req.c
··· 1593 1593 } 1594 1594 } 1595 1595 1596 - blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio) 1596 + blk_qc_t drbd_submit_bio(struct bio *bio) 1597 1597 { 1598 1598 struct drbd_device *device = bio->bi_disk->private_data; 1599 1599 unsigned long start_jif;
+13 -4
drivers/block/null_blk_main.c
··· 1388 1388 return &nullb->queues[index]; 1389 1389 } 1390 1390 1391 - static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) 1391 + static blk_qc_t null_submit_bio(struct bio *bio) 1392 1392 { 1393 1393 sector_t sector = bio->bi_iter.bi_sector; 1394 1394 sector_t nr_sectors = bio_sectors(bio); ··· 1575 1575 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q); 1576 1576 } 1577 1577 1578 - static const struct block_device_operations null_ops = { 1578 + static const struct block_device_operations null_bio_ops = { 1579 + .owner = THIS_MODULE, 1580 + .submit_bio = null_submit_bio, 1581 + .report_zones = null_report_zones, 1582 + }; 1583 + 1584 + static const struct block_device_operations null_rq_ops = { 1579 1585 .owner = THIS_MODULE, 1580 1586 .report_zones = null_report_zones, 1581 1587 }; ··· 1653 1647 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; 1654 1648 disk->major = null_major; 1655 1649 disk->first_minor = nullb->index; 1656 - disk->fops = &null_ops; 1650 + if (queue_is_mq(nullb->q)) 1651 + disk->fops = &null_rq_ops; 1652 + else 1653 + disk->fops = &null_bio_ops; 1657 1654 disk->private_data = nullb; 1658 1655 disk->queue = nullb->q; 1659 1656 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); ··· 1801 1792 goto out_cleanup_tags; 1802 1793 } 1803 1794 } else if (dev->queue_mode == NULL_Q_BIO) { 1804 - nullb->q = blk_alloc_queue(null_queue_bio, dev->home_node); 1795 + nullb->q = blk_alloc_queue(dev->home_node); 1805 1796 if (!nullb->q) { 1806 1797 rv = -ENOMEM; 1807 1798 goto out_cleanup_queues;
+6 -5
drivers/block/pktcdvd.c
··· 36 36 * block device, assembling the pieces to full packets and queuing them to the 37 37 * packet I/O scheduler. 38 38 * 39 - * At the top layer there is a custom make_request_fn function that forwards 39 + * At the top layer there is a custom ->submit_bio function that forwards 40 40 * read requests directly to the iosched queue and puts write requests in the 41 41 * unaligned write queue. A kernel thread performs the necessary read 42 42 * gathering to convert the unaligned writes to aligned writes and then feeds ··· 2428 2428 } 2429 2429 } 2430 2430 2431 - static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio) 2431 + static blk_qc_t pkt_submit_bio(struct bio *bio) 2432 2432 { 2433 2433 struct pktcdvd_device *pd; 2434 2434 char b[BDEVNAME_SIZE]; ··· 2436 2436 2437 2437 blk_queue_split(&bio); 2438 2438 2439 - pd = q->queuedata; 2439 + pd = bio->bi_disk->queue->queuedata; 2440 2440 if (!pd) { 2441 2441 pr_err("%s incorrect request queue\n", bio_devname(bio, b)); 2442 2442 goto end_io; ··· 2480 2480 split = bio; 2481 2481 } 2482 2482 2483 - pkt_make_request_write(q, split); 2483 + pkt_make_request_write(bio->bi_disk->queue, split); 2484 2484 } while (split != bio); 2485 2485 2486 2486 return BLK_QC_T_NONE; ··· 2685 2685 2686 2686 static const struct block_device_operations pktcdvd_ops = { 2687 2687 .owner = THIS_MODULE, 2688 + .submit_bio = pkt_submit_bio, 2688 2689 .open = pkt_open, 2689 2690 .release = pkt_close, 2690 2691 .ioctl = pkt_ioctl, ··· 2750 2749 disk->flags = GENHD_FL_REMOVABLE; 2751 2750 strcpy(disk->disk_name, pd->name); 2752 2751 disk->private_data = pd; 2753 - disk->queue = blk_alloc_queue(pkt_make_request, NUMA_NO_NODE); 2752 + disk->queue = blk_alloc_queue(NUMA_NO_NODE); 2754 2753 if (!disk->queue) 2755 2754 goto out_mem2; 2756 2755
+7 -8
drivers/block/ps3vram.c
··· 90 90 91 91 static int ps3vram_major; 92 92 93 - 94 - static const struct block_device_operations ps3vram_fops = { 95 - .owner = THIS_MODULE, 96 - }; 97 - 98 - 99 93 #define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */ 100 94 #define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */ 101 95 #define DMA_NOTIFIER_SIZE 0x40 ··· 579 585 return next; 580 586 } 581 587 582 - static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio) 588 + static blk_qc_t ps3vram_submit_bio(struct bio *bio) 583 589 { 584 590 struct ps3_system_bus_device *dev = bio->bi_disk->private_data; 585 591 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); ··· 603 609 604 610 return BLK_QC_T_NONE; 605 611 } 612 + 613 + static const struct block_device_operations ps3vram_fops = { 614 + .owner = THIS_MODULE, 615 + .submit_bio = ps3vram_submit_bio, 616 + }; 606 617 607 618 static int ps3vram_probe(struct ps3_system_bus_device *dev) 608 619 { ··· 736 737 737 738 ps3vram_proc_init(dev); 738 739 739 - queue = blk_alloc_queue(ps3vram_make_request, NUMA_NO_NODE); 740 + queue = blk_alloc_queue(NUMA_NO_NODE); 740 741 if (!queue) { 741 742 dev_err(&dev->core, "blk_alloc_queue failed\n"); 742 743 error = -ENOMEM;
+5 -2
drivers/block/rsxx/dev.c
··· 50 50 51 51 static struct kmem_cache *bio_meta_pool; 52 52 53 + static blk_qc_t rsxx_submit_bio(struct bio *bio); 54 + 53 55 /*----------------- Block Device Operations -----------------*/ 54 56 static int rsxx_blkdev_ioctl(struct block_device *bdev, 55 57 fmode_t mode, ··· 94 92 95 93 static const struct block_device_operations rsxx_fops = { 96 94 .owner = THIS_MODULE, 95 + .submit_bio = rsxx_submit_bio, 97 96 .getgeo = rsxx_getgeo, 98 97 .ioctl = rsxx_blkdev_ioctl, 99 98 }; ··· 120 117 } 121 118 } 122 119 123 - static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio) 120 + static blk_qc_t rsxx_submit_bio(struct bio *bio) 124 121 { 125 122 struct rsxx_cardinfo *card = bio->bi_disk->private_data; 126 123 struct rsxx_bio_meta *bio_meta; ··· 236 233 return -ENOMEM; 237 234 } 238 235 239 - card->queue = blk_alloc_queue(rsxx_make_request, NUMA_NO_NODE); 236 + card->queue = blk_alloc_queue(NUMA_NO_NODE); 240 237 if (!card->queue) { 241 238 dev_err(CARD_TO_DEV(card), "Failed queue alloc\n"); 242 239 unregister_blkdev(card->major, DRIVER_NAME);
+3 -2
drivers/block/umem.c
··· 519 519 return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb)); 520 520 } 521 521 522 - static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio) 522 + static blk_qc_t mm_submit_bio(struct bio *bio) 523 523 { 524 524 struct cardinfo *card = bio->bi_disk->private_data; 525 525 ··· 779 779 780 780 static const struct block_device_operations mm_fops = { 781 781 .owner = THIS_MODULE, 782 + .submit_bio = mm_submit_bio, 782 783 .getgeo = mm_getgeo, 783 784 .revalidate_disk = mm_revalidate, 784 785 }; ··· 887 886 card->biotail = &card->bio; 888 887 spin_lock_init(&card->lock); 889 888 890 - card->queue = blk_alloc_queue(mm_make_request, NUMA_NO_NODE); 889 + card->queue = blk_alloc_queue(NUMA_NO_NODE); 891 890 if (!card->queue) 892 891 goto failed_alloc; 893 892
+6 -5
drivers/block/zram/zram_drv.c
··· 793 793 } 794 794 795 795 /* 796 - * Block layer want one ->make_request_fn to be active at a time 797 - * so if we use chained IO with parent IO in same context, 798 - * it's a deadlock. To avoid, it, it uses worker thread context. 796 + * Block layer want one ->submit_bio to be active at a time, so if we use 797 + * chained IO with parent IO in same context, it's a deadlock. To avoid that, 798 + * use a worker thread context. 799 799 */ 800 800 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, 801 801 unsigned long entry, struct bio *bio) ··· 1584 1584 /* 1585 1585 * Handler function for all zram I/O requests. 1586 1586 */ 1587 - static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio) 1587 + static blk_qc_t zram_submit_bio(struct bio *bio) 1588 1588 { 1589 1589 struct zram *zram = bio->bi_disk->private_data; 1590 1590 ··· 1813 1813 1814 1814 static const struct block_device_operations zram_devops = { 1815 1815 .open = zram_open, 1816 + .submit_bio = zram_submit_bio, 1816 1817 .swap_slot_free_notify = zram_slot_free_notify, 1817 1818 .rw_page = zram_rw_page, 1818 1819 .owner = THIS_MODULE ··· 1892 1891 #ifdef CONFIG_ZRAM_WRITEBACK 1893 1892 spin_lock_init(&zram->wb_limit_lock); 1894 1893 #endif 1895 - queue = blk_alloc_queue(zram_make_request, NUMA_NO_NODE); 1894 + queue = blk_alloc_queue(NUMA_NO_NODE); 1896 1895 if (!queue) { 1897 1896 pr_err("Error allocating disk queue for device %d\n", 1898 1897 device_id);
+2 -6
drivers/lightnvm/core.c
··· 236 236 return tgt_dev; 237 237 } 238 238 239 - static const struct block_device_operations nvm_fops = { 240 - .owner = THIS_MODULE, 241 - }; 242 - 243 239 static struct nvm_tgt_type *__nvm_find_target_type(const char *name) 244 240 { 245 241 struct nvm_tgt_type *tt; ··· 376 380 goto err_dev; 377 381 } 378 382 379 - tqueue = blk_alloc_queue(tt->make_rq, dev->q->node); 383 + tqueue = blk_alloc_queue(dev->q->node); 380 384 if (!tqueue) { 381 385 ret = -ENOMEM; 382 386 goto err_disk; ··· 386 390 tdisk->flags = GENHD_FL_EXT_DEVT; 387 391 tdisk->major = 0; 388 392 tdisk->first_minor = 0; 389 - tdisk->fops = &nvm_fops; 393 + tdisk->fops = tt->bops; 390 394 tdisk->queue = tqueue; 391 395 392 396 targetdata = tt->init(tgt_dev, tdisk, create->flags);
+9 -3
drivers/lightnvm/pblk-init.c
··· 47 47 48 48 struct bio_set pblk_bio_set; 49 49 50 - static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio) 50 + static blk_qc_t pblk_submit_bio(struct bio *bio) 51 51 { 52 - struct pblk *pblk = q->queuedata; 52 + struct pblk *pblk = bio->bi_disk->queue->queuedata; 53 53 54 54 if (bio_op(bio) == REQ_OP_DISCARD) { 55 55 pblk_discard(pblk, bio); ··· 78 78 79 79 return BLK_QC_T_NONE; 80 80 } 81 + 82 + static const struct block_device_operations pblk_bops = { 83 + .owner = THIS_MODULE, 84 + .submit_bio = pblk_submit_bio, 85 + }; 86 + 81 87 82 88 static size_t pblk_trans_map_size(struct pblk *pblk) 83 89 { ··· 1286 1280 .name = "pblk", 1287 1281 .version = {1, 0, 0}, 1288 1282 1289 - .make_rq = pblk_make_rq, 1283 + .bops = &pblk_bops, 1290 1284 .capacity = pblk_capacity, 1291 1285 1292 1286 .init = pblk_init,
+2 -2
drivers/md/bcache/request.c
··· 1158 1158 1159 1159 /* Cached devices - read & write stuff */ 1160 1160 1161 - blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio) 1161 + blk_qc_t cached_dev_submit_bio(struct bio *bio) 1162 1162 { 1163 1163 struct search *s; 1164 1164 struct bcache_device *d = bio->bi_disk->private_data; ··· 1291 1291 continue_at(cl, search_free, NULL); 1292 1292 } 1293 1293 1294 - blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio) 1294 + blk_qc_t flash_dev_submit_bio(struct bio *bio) 1295 1295 { 1296 1296 struct search *s; 1297 1297 struct closure *cl;
+2 -2
drivers/md/bcache/request.h
··· 37 37 void bch_data_insert(struct closure *cl); 38 38 39 39 void bch_cached_dev_request_init(struct cached_dev *dc); 40 - blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio); 40 + blk_qc_t cached_dev_submit_bio(struct bio *bio); 41 41 42 42 void bch_flash_dev_request_init(struct bcache_device *d); 43 - blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio); 43 + blk_qc_t flash_dev_submit_bio(struct bio *bio); 44 44 45 45 extern struct kmem_cache *bch_search_cache; 46 46
+16 -7
drivers/md/bcache/super.c
··· 680 680 return d->ioctl(d, mode, cmd, arg); 681 681 } 682 682 683 - static const struct block_device_operations bcache_ops = { 683 + static const struct block_device_operations bcache_cached_ops = { 684 + .submit_bio = cached_dev_submit_bio, 685 + .open = open_dev, 686 + .release = release_dev, 687 + .ioctl = ioctl_dev, 688 + .owner = THIS_MODULE, 689 + }; 690 + 691 + static const struct block_device_operations bcache_flash_ops = { 692 + .submit_bio = flash_dev_submit_bio, 684 693 .open = open_dev, 685 694 .release = release_dev, 686 695 .ioctl = ioctl_dev, ··· 829 820 } 830 821 831 822 static int bcache_device_init(struct bcache_device *d, unsigned int block_size, 832 - sector_t sectors, make_request_fn make_request_fn, 833 - struct block_device *cached_bdev) 823 + sector_t sectors, struct block_device *cached_bdev, 824 + const struct block_device_operations *ops) 834 825 { 835 826 struct request_queue *q; 836 827 const size_t max_stripes = min_t(size_t, INT_MAX, ··· 877 868 878 869 d->disk->major = bcache_major; 879 870 d->disk->first_minor = idx_to_first_minor(idx); 880 - d->disk->fops = &bcache_ops; 871 + d->disk->fops = ops; 881 872 d->disk->private_data = d; 882 873 883 - q = blk_alloc_queue(make_request_fn, NUMA_NO_NODE); 874 + q = blk_alloc_queue(NUMA_NO_NODE); 884 875 if (!q) 885 876 return -ENOMEM; 886 877 ··· 1364 1355 1365 1356 ret = bcache_device_init(&dc->disk, block_size, 1366 1357 dc->bdev->bd_part->nr_sects - dc->sb.data_offset, 1367 - cached_dev_make_request, dc->bdev); 1358 + dc->bdev, &bcache_cached_ops); 1368 1359 if (ret) 1369 1360 return ret; 1370 1361 ··· 1477 1468 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1478 1469 1479 1470 if (bcache_device_init(d, block_bytes(c), u->sectors, 1480 - flash_dev_make_request, NULL)) 1471 + NULL, &bcache_flash_ops)) 1481 1472 goto err; 1482 1473 1483 1474 bcache_device_attach(d, c, u - c->uuids);
+12 -11
drivers/md/dm.c
··· 1770 1770 } 1771 1771 1772 1772 /* 1773 - * If in ->make_request_fn we need to use blk_queue_split(), otherwise 1773 + * If in ->queue_bio we need to use blk_queue_split(), otherwise 1774 1774 * queue_limits for abnormal requests (e.g. discard, writesame, etc) 1775 1775 * won't be imposed. 1776 1776 */ ··· 1787 1787 return __split_and_process_bio(md, map, bio); 1788 1788 } 1789 1789 1790 - static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 1790 + static blk_qc_t dm_submit_bio(struct bio *bio) 1791 1791 { 1792 1792 struct mapped_device *md = bio->bi_disk->private_data; 1793 1793 blk_qc_t ret = BLK_QC_T_NONE; ··· 1798 1798 /* 1799 1799 * We are called with a live reference on q_usage_counter, but 1800 1800 * that one will be released as soon as we return. Grab an 1801 - * extra one as blk_mq_make_request expects to be able to 1802 - * consume a reference (which lives until the request is freed 1803 - * in case a request is allocated). 1801 + * extra one as blk_mq_submit_bio expects to be able to consume 1802 + * a reference (which lives until the request is freed in case a 1803 + * request is allocated). 1804 1804 */ 1805 - percpu_ref_get(&q->q_usage_counter); 1806 - return blk_mq_make_request(q, bio); 1805 + percpu_ref_get(&bio->bi_disk->queue->q_usage_counter); 1806 + return blk_mq_submit_bio(bio); 1807 1807 } 1808 1808 1809 1809 map = dm_get_live_table(md, &srcu_idx); ··· 1988 1988 spin_lock_init(&md->uevent_lock); 1989 1989 1990 1990 /* 1991 - * default to bio-based required ->make_request_fn until DM 1992 - * table is loaded and md->type established. If request-based 1993 - * table is loaded: blk-mq will override accordingly. 1991 + * default to bio-based until DM table is loaded and md->type 1992 + * established. If request-based table is loaded: blk-mq will 1993 + * override accordingly. 1994 1994 */ 1995 - md->queue = blk_alloc_queue(dm_make_request, numa_node_id); 1995 + md->queue = blk_alloc_queue(numa_node_id); 1996 1996 if (!md->queue) 1997 1997 goto bad; 1998 1998 ··· 3232 3232 }; 3233 3233 3234 3234 static const struct block_device_operations dm_blk_dops = { 3235 + .submit_bio = dm_submit_bio, 3235 3236 .open = dm_blk_open, 3236 3237 .release = dm_blk_close, 3237 3238 .ioctl = dm_blk_ioctl,
+3 -2
drivers/md/md.c
··· 463 463 } 464 464 EXPORT_SYMBOL(md_handle_request); 465 465 466 - static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) 466 + static blk_qc_t md_submit_bio(struct bio *bio) 467 467 { 468 468 const int rw = bio_data_dir(bio); 469 469 const int sgrp = op_stat_group(bio_op(bio)); ··· 5641 5641 mddev->hold_active = UNTIL_STOP; 5642 5642 5643 5643 error = -ENOMEM; 5644 - mddev->queue = blk_alloc_queue(md_make_request, NUMA_NO_NODE); 5644 + mddev->queue = blk_alloc_queue(NUMA_NO_NODE); 5645 5645 if (!mddev->queue) 5646 5646 goto abort; 5647 5647 ··· 7823 7823 static const struct block_device_operations md_fops = 7824 7824 { 7825 7825 .owner = THIS_MODULE, 7826 + .submit_bio = md_submit_bio, 7826 7827 .open = md_open, 7827 7828 .release = md_release, 7828 7829 .ioctl = md_ioctl,
+3 -2
drivers/nvdimm/blk.c
··· 162 162 return err; 163 163 } 164 164 165 - static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio) 165 + static blk_qc_t nd_blk_submit_bio(struct bio *bio) 166 166 { 167 167 struct bio_integrity_payload *bip; 168 168 struct nd_namespace_blk *nsblk = bio->bi_disk->private_data; ··· 225 225 226 226 static const struct block_device_operations nd_blk_fops = { 227 227 .owner = THIS_MODULE, 228 + .submit_bio = nd_blk_submit_bio, 228 229 .revalidate_disk = nvdimm_revalidate_disk, 229 230 }; 230 231 ··· 251 250 internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk)); 252 251 available_disk_size = internal_nlba * nsblk_sector_size(nsblk); 253 252 254 - q = blk_alloc_queue(nd_blk_make_request, NUMA_NO_NODE); 253 + q = blk_alloc_queue(NUMA_NO_NODE); 255 254 if (!q) 256 255 return -ENOMEM; 257 256 if (devm_add_action_or_reset(dev, nd_blk_release_queue, q))
+3 -2
drivers/nvdimm/btt.c
··· 1439 1439 return ret; 1440 1440 } 1441 1441 1442 - static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) 1442 + static blk_qc_t btt_submit_bio(struct bio *bio) 1443 1443 { 1444 1444 struct bio_integrity_payload *bip = bio_integrity(bio); 1445 1445 struct btt *btt = bio->bi_disk->private_data; ··· 1512 1512 1513 1513 static const struct block_device_operations btt_fops = { 1514 1514 .owner = THIS_MODULE, 1515 + .submit_bio = btt_submit_bio, 1515 1516 .rw_page = btt_rw_page, 1516 1517 .getgeo = btt_getgeo, 1517 1518 .revalidate_disk = nvdimm_revalidate_disk, ··· 1524 1523 struct nd_namespace_common *ndns = nd_btt->ndns; 1525 1524 1526 1525 /* create a new disk and request queue for btt */ 1527 - btt->btt_queue = blk_alloc_queue(btt_make_request, NUMA_NO_NODE); 1526 + btt->btt_queue = blk_alloc_queue(NUMA_NO_NODE); 1528 1527 if (!btt->btt_queue) 1529 1528 return -ENOMEM; 1530 1529
+3 -2
drivers/nvdimm/pmem.c
··· 189 189 return rc; 190 190 } 191 191 192 - static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) 192 + static blk_qc_t pmem_submit_bio(struct bio *bio) 193 193 { 194 194 int ret = 0; 195 195 blk_status_t rc = 0; ··· 281 281 282 282 static const struct block_device_operations pmem_fops = { 283 283 .owner = THIS_MODULE, 284 + .submit_bio = pmem_submit_bio, 284 285 .rw_page = pmem_rw_page, 285 286 .revalidate_disk = nvdimm_revalidate_disk, 286 287 }; ··· 424 423 return -EBUSY; 425 424 } 426 425 427 - q = blk_alloc_queue(pmem_make_request, dev_to_node(dev)); 426 + q = blk_alloc_queue(dev_to_node(dev)); 428 427 if (!q) 429 428 return -ENOMEM; 430 429
+1
drivers/nvme/host/core.c
··· 2178 2178 2179 2179 const struct block_device_operations nvme_ns_head_ops = { 2180 2180 .owner = THIS_MODULE, 2181 + .submit_bio = nvme_ns_head_submit_bio, 2181 2182 .open = nvme_ns_head_open, 2182 2183 .release = nvme_ns_head_release, 2183 2184 .ioctl = nvme_ioctl,
+2 -3
drivers/nvme/host/multipath.c
··· 291 291 return false; 292 292 } 293 293 294 - static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, 295 - struct bio *bio) 294 + blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) 296 295 { 297 296 struct nvme_ns_head *head = bio->bi_disk->private_data; 298 297 struct device *dev = disk_to_dev(head->disk); ··· 373 374 if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath) 374 375 return 0; 375 376 376 - q = blk_alloc_queue(nvme_ns_head_make_request, ctrl->numa_node); 377 + q = blk_alloc_queue(ctrl->numa_node); 377 378 if (!q) 378 379 goto out; 379 380 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+1
drivers/nvme/host/nvme.h
··· 586 586 bool nvme_mpath_clear_current_path(struct nvme_ns *ns); 587 587 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); 588 588 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 589 + blk_qc_t nvme_ns_head_submit_bio(struct bio *bio); 589 590 590 591 static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 591 592 {
+4 -5
drivers/s390/block/dcssblk.c
··· 31 31 32 32 static int dcssblk_open(struct block_device *bdev, fmode_t mode); 33 33 static void dcssblk_release(struct gendisk *disk, fmode_t mode); 34 - static blk_qc_t dcssblk_make_request(struct request_queue *q, 35 - struct bio *bio); 34 + static blk_qc_t dcssblk_submit_bio(struct bio *bio); 36 35 static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 37 36 long nr_pages, void **kaddr, pfn_t *pfn); 38 37 ··· 40 41 static int dcssblk_major; 41 42 static const struct block_device_operations dcssblk_devops = { 42 43 .owner = THIS_MODULE, 44 + .submit_bio = dcssblk_submit_bio, 43 45 .open = dcssblk_open, 44 46 .release = dcssblk_release, 45 47 }; ··· 651 651 } 652 652 dev_info->gd->major = dcssblk_major; 653 653 dev_info->gd->fops = &dcssblk_devops; 654 - dev_info->dcssblk_queue = 655 - blk_alloc_queue(dcssblk_make_request, NUMA_NO_NODE); 654 + dev_info->dcssblk_queue = blk_alloc_queue(NUMA_NO_NODE); 656 655 dev_info->gd->queue = dev_info->dcssblk_queue; 657 656 dev_info->gd->private_data = dev_info; 658 657 blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); ··· 867 868 } 868 869 869 870 static blk_qc_t 870 - dcssblk_make_request(struct request_queue *q, struct bio *bio) 871 + dcssblk_submit_bio(struct bio *bio) 871 872 { 872 873 struct dcssblk_dev_info *dev_info; 873 874 struct bio_vec bvec;
+3 -3
drivers/s390/block/xpram.c
··· 182 182 /* 183 183 * Block device make request function. 184 184 */ 185 - static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio) 185 + static blk_qc_t xpram_submit_bio(struct bio *bio) 186 186 { 187 187 xpram_device_t *xdev = bio->bi_disk->private_data; 188 188 struct bio_vec bvec; ··· 250 250 static const struct block_device_operations xpram_devops = 251 251 { 252 252 .owner = THIS_MODULE, 253 + .submit_bio = xpram_submit_bio, 253 254 .getgeo = xpram_getgeo, 254 255 }; 255 256 ··· 344 343 xpram_disks[i] = alloc_disk(1); 345 344 if (!xpram_disks[i]) 346 345 goto out; 347 - xpram_queues[i] = blk_alloc_queue(xpram_make_request, 348 - NUMA_NO_NODE); 346 + xpram_queues[i] = blk_alloc_queue(NUMA_NO_NODE); 349 347 if (!xpram_queues[i]) { 350 348 put_disk(xpram_disks[i]); 351 349 goto out;
+1 -1
include/linux/blk-mq.h
··· 596 596 rq->q->mq_ops->cleanup_rq(rq); 597 597 } 598 598 599 - blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio); 599 + blk_qc_t blk_mq_submit_bio(struct bio *bio); 600 600 601 601 #endif
+2 -5
include/linux/blkdev.h
··· 286 286 287 287 struct blk_queue_ctx; 288 288 289 - typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); 290 - 291 289 struct bio_vec; 292 290 293 291 enum blk_eh_timer_return { ··· 395 397 396 398 struct blk_queue_stats *stats; 397 399 struct rq_qos *rq_qos; 398 - 399 - make_request_fn *make_request_fn; 400 400 401 401 const struct blk_mq_ops *mq_ops; 402 402 ··· 1158 1162 extern void blk_dump_rq_flags(struct request *, char *); 1159 1163 1160 1164 bool __must_check blk_get_queue(struct request_queue *); 1161 - struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id); 1165 + struct request_queue *blk_alloc_queue(int node_id); 1162 1166 extern void blk_put_queue(struct request_queue *); 1163 1167 extern void blk_set_queue_dying(struct request_queue *); 1164 1168 ··· 1774 1778 1775 1779 1776 1780 struct block_device_operations { 1781 + blk_qc_t (*submit_bio) (struct bio *bio); 1777 1782 int (*open) (struct block_device *, fmode_t); 1778 1783 void (*release) (struct gendisk *, fmode_t); 1779 1784 int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
+1 -2
include/linux/lightnvm.h
··· 631 631 return last; 632 632 } 633 633 634 - typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); 635 634 typedef sector_t (nvm_tgt_capacity_fn)(void *); 636 635 typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, 637 636 int flags); ··· 649 650 int flags; 650 651 651 652 /* target entry points */ 652 - nvm_tgt_make_rq_fn *make_rq; 653 + const struct block_device_operations *bops; 653 654 nvm_tgt_capacity_fn *capacity; 654 655 655 656 /* module-specific init/teardown */