Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: remove BLK_MQ_F_SHOULD_MERGE

BLK_MQ_F_SHOULD_MERGE is set for all tag_sets except those that purely
process passthrough commands (bsg-lib, ufs tmf, various nvme admin
queues) and thus don't even check the flag. Remove it to simplify the
driver interface.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20241219060214.1928848-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
cc76ace4 9bc1e897

+15 -43
-1
arch/um/drivers/ubd_kern.c
··· 865 865 ubd_dev->tag_set.ops = &ubd_mq_ops; 866 866 ubd_dev->tag_set.queue_depth = 64; 867 867 ubd_dev->tag_set.numa_node = NUMA_NO_NODE; 868 - ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 869 868 ubd_dev->tag_set.driver_data = ubd_dev; 870 869 ubd_dev->tag_set.nr_hw_queues = 1; 871 870
-1
block/blk-mq-debugfs.c
··· 181 181 182 182 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name 183 183 static const char *const hctx_flag_name[] = { 184 - HCTX_FLAG_NAME(SHOULD_MERGE), 185 184 HCTX_FLAG_NAME(TAG_QUEUE_SHARED), 186 185 HCTX_FLAG_NAME(STACKING), 187 186 HCTX_FLAG_NAME(TAG_HCTX_SHARED),
+1 -2
block/blk-mq-sched.c
··· 351 351 ctx = blk_mq_get_ctx(q); 352 352 hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); 353 353 type = hctx->type; 354 - if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || 355 - list_empty_careful(&ctx->rq_lists[type])) 354 + if (list_empty_careful(&ctx->rq_lists[type])) 356 355 goto out_put; 357 356 358 357 /* default per sw-queue merge */
-1
drivers/block/amiflop.c
··· 1819 1819 unit[drive].tag_set.nr_maps = 1; 1820 1820 unit[drive].tag_set.queue_depth = 2; 1821 1821 unit[drive].tag_set.numa_node = NUMA_NO_NODE; 1822 - unit[drive].tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 1823 1822 if (blk_mq_alloc_tag_set(&unit[drive].tag_set)) 1824 1823 goto out_cleanup_trackbuf; 1825 1824
-1
drivers/block/aoe/aoeblk.c
··· 368 368 set->nr_hw_queues = 1; 369 369 set->queue_depth = 128; 370 370 set->numa_node = NUMA_NO_NODE; 371 - set->flags = BLK_MQ_F_SHOULD_MERGE; 372 371 err = blk_mq_alloc_tag_set(set); 373 372 if (err) { 374 373 pr_err("aoe: cannot allocate tag set for %ld.%d\n",
-1
drivers/block/ataflop.c
··· 2088 2088 unit[i].tag_set.nr_maps = 1; 2089 2089 unit[i].tag_set.queue_depth = 2; 2090 2090 unit[i].tag_set.numa_node = NUMA_NO_NODE; 2091 - unit[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 2092 2091 ret = blk_mq_alloc_tag_set(&unit[i].tag_set); 2093 2092 if (ret) 2094 2093 goto err;
-1
drivers/block/floppy.c
··· 4596 4596 tag_sets[drive].nr_maps = 1; 4597 4597 tag_sets[drive].queue_depth = 2; 4598 4598 tag_sets[drive].numa_node = NUMA_NO_NODE; 4599 - tag_sets[drive].flags = BLK_MQ_F_SHOULD_MERGE; 4600 4599 err = blk_mq_alloc_tag_set(&tag_sets[drive]); 4601 4600 if (err) 4602 4601 goto out_put_disk;
+1 -2
drivers/block/loop.c
··· 2023 2023 lo->tag_set.queue_depth = hw_queue_depth; 2024 2024 lo->tag_set.numa_node = NUMA_NO_NODE; 2025 2025 lo->tag_set.cmd_size = sizeof(struct loop_cmd); 2026 - lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING | 2027 - BLK_MQ_F_NO_SCHED_BY_DEFAULT; 2026 + lo->tag_set.flags = BLK_MQ_F_STACKING | BLK_MQ_F_NO_SCHED_BY_DEFAULT; 2028 2027 lo->tag_set.driver_data = lo; 2029 2028 2030 2029 err = blk_mq_alloc_tag_set(&lo->tag_set);
-1
drivers/block/mtip32xx/mtip32xx.c
··· 3416 3416 dd->tags.reserved_tags = 1; 3417 3417 dd->tags.cmd_size = sizeof(struct mtip_cmd); 3418 3418 dd->tags.numa_node = dd->numa_node; 3419 - dd->tags.flags = BLK_MQ_F_SHOULD_MERGE; 3420 3419 dd->tags.driver_data = dd; 3421 3420 dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS; 3422 3421
+1 -2
drivers/block/nbd.c
··· 1841 1841 nbd->tag_set.queue_depth = 128; 1842 1842 nbd->tag_set.numa_node = NUMA_NO_NODE; 1843 1843 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); 1844 - nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | 1845 - BLK_MQ_F_BLOCKING; 1844 + nbd->tag_set.flags = BLK_MQ_F_BLOCKING; 1846 1845 nbd->tag_set.driver_data = nbd; 1847 1846 INIT_WORK(&nbd->remove_work, nbd_dev_remove_work); 1848 1847 nbd->backend = NULL;
-2
drivers/block/null_blk/main.c
··· 1791 1791 tag_set.nr_hw_queues = g_submit_queues; 1792 1792 tag_set.queue_depth = g_hw_queue_depth; 1793 1793 tag_set.numa_node = g_home_node; 1794 - tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 1795 1794 if (g_no_sched) 1796 1795 tag_set.flags |= BLK_MQ_F_NO_SCHED; 1797 1796 if (g_shared_tag_bitmap) ··· 1816 1817 nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues; 1817 1818 nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth; 1818 1819 nullb->tag_set->numa_node = nullb->dev->home_node; 1819 - nullb->tag_set->flags = BLK_MQ_F_SHOULD_MERGE; 1820 1820 if (nullb->dev->no_sched) 1821 1821 nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED; 1822 1822 if (nullb->dev->shared_tag_bitmap)
+1 -2
drivers/block/ps3disk.c
··· 434 434 435 435 ps3disk_identify(dev); 436 436 437 - error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1, 438 - BLK_MQ_F_SHOULD_MERGE); 437 + error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1, 0); 439 438 if (error) 440 439 goto fail_teardown; 441 440
-1
drivers/block/rbd.c
··· 4964 4964 rbd_dev->tag_set.ops = &rbd_mq_ops; 4965 4965 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth; 4966 4966 rbd_dev->tag_set.numa_node = NUMA_NO_NODE; 4967 - rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 4968 4967 rbd_dev->tag_set.nr_hw_queues = num_present_cpus(); 4969 4968 rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request); 4970 4969
+1 -2
drivers/block/rnbd/rnbd-clt.c
··· 1209 1209 tag_set->ops = &rnbd_mq_ops; 1210 1210 tag_set->queue_depth = sess->queue_depth; 1211 1211 tag_set->numa_node = NUMA_NO_NODE; 1212 - tag_set->flags = BLK_MQ_F_SHOULD_MERGE | 1213 - BLK_MQ_F_TAG_QUEUE_SHARED; 1212 + tag_set->flags = BLK_MQ_F_TAG_QUEUE_SHARED; 1214 1213 tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE; 1215 1214 1216 1215 /* for HCTX_TYPE_DEFAULT, HCTX_TYPE_READ, HCTX_TYPE_POLL */
+1 -1
drivers/block/sunvdc.c
··· 829 829 } 830 830 831 831 err = blk_mq_alloc_sq_tag_set(&port->tag_set, &vdc_mq_ops, 832 - VDC_TX_RING_SIZE, BLK_MQ_F_SHOULD_MERGE); 832 + VDC_TX_RING_SIZE, 0); 833 833 if (err) 834 834 return err; 835 835
+1 -1
drivers/block/swim.c
··· 818 818 819 819 for (drive = 0; drive < swd->floppy_count; drive++) { 820 820 err = blk_mq_alloc_sq_tag_set(&swd->unit[drive].tag_set, 821 - &swim_mq_ops, 2, BLK_MQ_F_SHOULD_MERGE); 821 + &swim_mq_ops, 2, 0); 822 822 if (err) 823 823 goto exit_put_disks; 824 824
+1 -2
drivers/block/swim3.c
··· 1208 1208 fs = &floppy_states[floppy_count]; 1209 1209 memset(fs, 0, sizeof(*fs)); 1210 1210 1211 - rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2, 1212 - BLK_MQ_F_SHOULD_MERGE); 1211 + rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2, 0); 1213 1212 if (rc) 1214 1213 goto out_unregister; 1215 1214
-1
drivers/block/ublk_drv.c
··· 2205 2205 ub->tag_set.queue_depth = ub->dev_info.queue_depth; 2206 2206 ub->tag_set.numa_node = NUMA_NO_NODE; 2207 2207 ub->tag_set.cmd_size = sizeof(struct ublk_rq_data); 2208 - ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 2209 2208 ub->tag_set.driver_data = ub; 2210 2209 return blk_mq_alloc_tag_set(&ub->tag_set); 2211 2210 }
-1
drivers/block/virtio_blk.c
··· 1481 1481 vblk->tag_set.ops = &virtio_mq_ops; 1482 1482 vblk->tag_set.queue_depth = queue_depth; 1483 1483 vblk->tag_set.numa_node = NUMA_NO_NODE; 1484 - vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 1485 1484 vblk->tag_set.cmd_size = 1486 1485 sizeof(struct virtblk_req) + 1487 1486 sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
-1
drivers/block/xen-blkfront.c
··· 1131 1131 } else 1132 1132 info->tag_set.queue_depth = BLK_RING_SIZE(info); 1133 1133 info->tag_set.numa_node = NUMA_NO_NODE; 1134 - info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 1135 1134 info->tag_set.cmd_size = sizeof(struct blkif_req); 1136 1135 info->tag_set.driver_data = info; 1137 1136
-1
drivers/block/z2ram.c
··· 354 354 tag_set.nr_maps = 1; 355 355 tag_set.queue_depth = 16; 356 356 tag_set.numa_node = NUMA_NO_NODE; 357 - tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 358 357 ret = blk_mq_alloc_tag_set(&tag_set); 359 358 if (ret) 360 359 goto out_unregister_blkdev;
+1 -1
drivers/cdrom/gdrom.c
··· 777 777 probe_gdrom_setupcd(); 778 778 779 779 err = blk_mq_alloc_sq_tag_set(&gd.tag_set, &gdrom_mq_ops, 1, 780 - BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING); 780 + BLK_MQ_F_BLOCKING); 781 781 if (err) 782 782 goto probe_fail_free_cd_info; 783 783
+1 -1
drivers/md/dm-rq.c
··· 547 547 md->tag_set->ops = &dm_mq_ops; 548 548 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); 549 549 md->tag_set->numa_node = md->numa_node_id; 550 - md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING; 550 + md->tag_set->flags = BLK_MQ_F_STACKING; 551 551 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); 552 552 md->tag_set->driver_data = md; 553 553
+1 -2
drivers/memstick/core/ms_block.c
··· 2094 2094 if (msb->disk_id < 0) 2095 2095 return msb->disk_id; 2096 2096 2097 - rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2, 2098 - BLK_MQ_F_SHOULD_MERGE); 2097 + rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2, 0); 2099 2098 if (rc) 2100 2099 goto out_release_id; 2101 2100
+1 -2
drivers/memstick/core/mspro_block.c
··· 1139 1139 if (disk_id < 0) 1140 1140 return disk_id; 1141 1141 1142 - rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2, 1143 - BLK_MQ_F_SHOULD_MERGE); 1142 + rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2, 0); 1144 1143 if (rc) 1145 1144 goto out_release_id; 1146 1145
+1 -1
drivers/mmc/core/queue.c
··· 441 441 else 442 442 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; 443 443 mq->tag_set.numa_node = NUMA_NO_NODE; 444 - mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; 444 + mq->tag_set.flags = BLK_MQ_F_BLOCKING; 445 445 mq->tag_set.nr_hw_queues = 1; 446 446 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); 447 447 mq->tag_set.driver_data = mq;
+1 -1
drivers/mtd/mtd_blkdevs.c
··· 329 329 goto out_list_del; 330 330 331 331 ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2, 332 - BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING); 332 + BLK_MQ_F_BLOCKING); 333 333 if (ret) 334 334 goto out_kfree_tag_set; 335 335
+1 -1
drivers/mtd/ubi/block.c
··· 383 383 dev->tag_set.ops = &ubiblock_mq_ops; 384 384 dev->tag_set.queue_depth = 64; 385 385 dev->tag_set.numa_node = NUMA_NO_NODE; 386 - dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; 386 + dev->tag_set.flags = BLK_MQ_F_BLOCKING; 387 387 dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu); 388 388 dev->tag_set.driver_data = dev; 389 389 dev->tag_set.nr_hw_queues = 1;
-1
drivers/nvme/host/apple.c
··· 1275 1275 anv->tagset.timeout = NVME_IO_TIMEOUT; 1276 1276 anv->tagset.numa_node = NUMA_NO_NODE; 1277 1277 anv->tagset.cmd_size = sizeof(struct apple_nvme_iod); 1278 - anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE; 1279 1278 anv->tagset.driver_data = &anv->ioq; 1280 1279 1281 1280 ret = blk_mq_alloc_tag_set(&anv->tagset);
-1
drivers/nvme/host/core.c
··· 4639 4639 /* Reserved for fabric connect */ 4640 4640 set->reserved_tags = 1; 4641 4641 set->numa_node = ctrl->numa_node; 4642 - set->flags = BLK_MQ_F_SHOULD_MERGE; 4643 4642 if (ctrl->ops->flags & NVME_F_BLOCKING) 4644 4643 set->flags |= BLK_MQ_F_BLOCKING; 4645 4644 set->cmd_size = cmd_size;
-1
drivers/s390/block/dasd_genhd.c
··· 56 56 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); 57 57 block->tag_set.nr_hw_queues = nr_hw_queues; 58 58 block->tag_set.queue_depth = queue_depth; 59 - block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 60 59 block->tag_set.numa_node = NUMA_NO_NODE; 61 60 rc = blk_mq_alloc_tag_set(&block->tag_set); 62 61 if (rc)
-1
drivers/s390/block/scm_blk.c
··· 461 461 bdev->tag_set.cmd_size = sizeof(blk_status_t); 462 462 bdev->tag_set.nr_hw_queues = nr_requests; 463 463 bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; 464 - bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 465 464 bdev->tag_set.numa_node = NUMA_NO_NODE; 466 465 467 466 ret = blk_mq_alloc_tag_set(&bdev->tag_set);
-1
drivers/scsi/scsi_lib.c
··· 2065 2065 tag_set->queue_depth = shost->can_queue; 2066 2066 tag_set->cmd_size = cmd_size; 2067 2067 tag_set->numa_node = dev_to_node(shost->dma_dev); 2068 - tag_set->flags = BLK_MQ_F_SHOULD_MERGE; 2069 2068 tag_set->flags |= 2070 2069 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); 2071 2070 if (shost->queuecommand_may_block)
-1
include/linux/blk-mq.h
··· 668 668 669 669 /* Keep hctx_flag_name[] in sync with the definitions below */ 670 670 enum { 671 - BLK_MQ_F_SHOULD_MERGE = 1 << 0, 672 671 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1, 673 672 /* 674 673 * Set when this device requires underlying blk-mq device for