Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: pass io_comp_batch to rq_end_io_fn callback

Add a third parameter 'const struct io_comp_batch *' to the rq_end_io_fn
callback signature. This allows end_io handlers to access the completion
batch context when requests are completed via blk_mq_end_request_batch().

The io_comp_batch is passed from blk_mq_end_request_batch(), while NULL
is passed from __blk_mq_end_request() and blk_mq_put_rq_ref() which don't
have batch context.

This infrastructure change enables drivers to detect whether they're
being called from a batched completion path (like iopoll) and access
additional context stored in the io_comp_batch.

Update all rq_end_io_fn implementations:
- block/blk-mq.c: blk_end_sync_rq
- block/blk-flush.c: flush_end_io, mq_flush_data_end_io
- drivers/nvme/host/ioctl.c: nvme_uring_cmd_end_io
- drivers/nvme/host/core.c: nvme_keep_alive_end_io
- drivers/nvme/host/pci.c: abort_endio, nvme_del_queue_end, nvme_del_cq_end
- drivers/nvme/target/passthru.c: nvmet_passthru_req_done
- drivers/scsi/scsi_error.c: eh_lock_door_done
- drivers/scsi/sg.c: sg_rq_end_io
- drivers/scsi/st.c: st_scsi_execute_end
- drivers/target/target_core_pscsi.c: pscsi_req_done
- drivers/md/dm-rq.c: end_clone_request

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Ming Lei and committed by
Jens Axboe
5e2fde1a df73d3c6

+39 -21
+4 -2
block/blk-flush.c
··· 199 199 } 200 200 201 201 static enum rq_end_io_ret flush_end_io(struct request *flush_rq, 202 - blk_status_t error) 202 + blk_status_t error, 203 + const struct io_comp_batch *iob) 203 204 { 204 205 struct request_queue *q = flush_rq->q; 205 206 struct list_head *running; ··· 336 335 } 337 336 338 337 static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq, 339 - blk_status_t error) 338 + blk_status_t error, 339 + const struct io_comp_batch *iob) 340 340 { 341 341 struct request_queue *q = rq->q; 342 342 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+5 -4
block/blk-mq.c
··· 1156 1156 1157 1157 if (rq->end_io) { 1158 1158 rq_qos_done(rq->q, rq); 1159 - if (rq->end_io(rq, error) == RQ_END_IO_FREE) 1159 + if (rq->end_io(rq, error, NULL) == RQ_END_IO_FREE) 1160 1160 blk_mq_free_request(rq); 1161 1161 } else { 1162 1162 blk_mq_free_request(rq); ··· 1211 1211 * If end_io handler returns NONE, then it still has 1212 1212 * ownership of the request. 1213 1213 */ 1214 - if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE) 1214 + if (rq->end_io && rq->end_io(rq, 0, iob) == RQ_END_IO_NONE) 1215 1215 continue; 1216 1216 1217 1217 WRITE_ONCE(rq->state, MQ_RQ_IDLE); ··· 1458 1458 blk_status_t ret; 1459 1459 }; 1460 1460 1461 - static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret) 1461 + static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret, 1462 + const struct io_comp_batch *iob) 1462 1463 { 1463 1464 struct blk_rq_wait *wait = rq->end_io_data; 1464 1465 ··· 1689 1688 void blk_mq_put_rq_ref(struct request *rq) 1690 1689 { 1691 1690 if (is_flush_rq(rq)) { 1692 - if (rq->end_io(rq, 0) == RQ_END_IO_FREE) 1691 + if (rq->end_io(rq, 0, NULL) == RQ_END_IO_FREE) 1693 1692 blk_mq_free_request(rq); 1694 1693 } else if (req_ref_put_and_test(rq)) { 1695 1694 __blk_mq_free_request(rq);
+2 -1
drivers/md/dm-rq.c
··· 295 295 } 296 296 297 297 static enum rq_end_io_ret end_clone_request(struct request *clone, 298 - blk_status_t error) 298 + blk_status_t error, 299 + const struct io_comp_batch *iob) 299 300 { 300 301 struct dm_rq_target_io *tio = clone->end_io_data; 301 302
+2 -1
drivers/nvme/host/core.c
··· 1333 1333 } 1334 1334 1335 1335 static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, 1336 - blk_status_t status) 1336 + blk_status_t status, 1337 + const struct io_comp_batch *iob) 1337 1338 { 1338 1339 struct nvme_ctrl *ctrl = rq->end_io_data; 1339 1340 unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
+2 -1
drivers/nvme/host/ioctl.c
··· 410 410 } 411 411 412 412 static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, 413 - blk_status_t err) 413 + blk_status_t err, 414 + const struct io_comp_batch *iob) 414 415 { 415 416 struct io_uring_cmd *ioucmd = req->end_io_data; 416 417 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+7 -4
drivers/nvme/host/pci.c
··· 1615 1615 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 1616 1616 } 1617 1617 1618 - static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error) 1618 + static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error, 1619 + const struct io_comp_batch *iob) 1619 1620 { 1620 1621 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1621 1622 ··· 2859 2858 } 2860 2859 2861 2860 static enum rq_end_io_ret nvme_del_queue_end(struct request *req, 2862 - blk_status_t error) 2861 + blk_status_t error, 2862 + const struct io_comp_batch *iob) 2863 2863 { 2864 2864 struct nvme_queue *nvmeq = req->end_io_data; 2865 2865 ··· 2870 2868 } 2871 2869 2872 2870 static enum rq_end_io_ret nvme_del_cq_end(struct request *req, 2873 - blk_status_t error) 2871 + blk_status_t error, 2872 + const struct io_comp_batch *iob) 2874 2873 { 2875 2874 struct nvme_queue *nvmeq = req->end_io_data; 2876 2875 2877 2876 if (error) 2878 2877 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 2879 2878 2880 - return nvme_del_queue_end(req, error); 2879 + return nvme_del_queue_end(req, error, iob); 2881 2880 } 2882 2881 2883 2882 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
+2 -1
drivers/nvme/target/passthru.c
··· 247 247 } 248 248 249 249 static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq, 250 - blk_status_t blk_status) 250 + blk_status_t blk_status, 251 + const struct io_comp_batch *iob) 251 252 { 252 253 struct nvmet_req *req = rq->end_io_data; 253 254
+2 -1
drivers/scsi/scsi_error.c
··· 2085 2085 } 2086 2086 2087 2087 static enum rq_end_io_ret eh_lock_door_done(struct request *req, 2088 - blk_status_t status) 2088 + blk_status_t status, 2089 + const struct io_comp_batch *iob) 2089 2090 { 2090 2091 blk_mq_free_request(req); 2091 2092 return RQ_END_IO_NONE;
+4 -2
drivers/scsi/sg.c
··· 177 177 } Sg_device; 178 178 179 179 /* tasklet or soft irq callback */ 180 - static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status); 180 + static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status, 181 + const struct io_comp_batch *iob); 181 182 static int sg_start_req(Sg_request *srp, unsigned char *cmd); 182 183 static int sg_finish_rem_req(Sg_request * srp); 183 184 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); ··· 1310 1309 * level when a command is completed (or has failed). 1311 1310 */ 1312 1311 static enum rq_end_io_ret 1313 - sg_rq_end_io(struct request *rq, blk_status_t status) 1312 + sg_rq_end_io(struct request *rq, blk_status_t status, 1313 + const struct io_comp_batch *iob) 1314 1314 { 1315 1315 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 1316 1316 struct sg_request *srp = rq->end_io_data;
+2 -1
drivers/scsi/st.c
··· 525 525 } 526 526 527 527 static enum rq_end_io_ret st_scsi_execute_end(struct request *req, 528 - blk_status_t status) 528 + blk_status_t status, 529 + const struct io_comp_batch *iob) 529 530 { 530 531 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); 531 532 struct st_request *SRpnt = req->end_io_data;
+4 -2
drivers/target/target_core_pscsi.c
··· 39 39 } 40 40 41 41 static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd); 42 - static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t); 42 + static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t, 43 + const struct io_comp_batch *); 43 44 44 45 /* pscsi_attach_hba(): 45 46 * ··· 1002 1001 } 1003 1002 1004 1003 static enum rq_end_io_ret pscsi_req_done(struct request *req, 1005 - blk_status_t status) 1004 + blk_status_t status, 1005 + const struct io_comp_batch *iob) 1006 1006 { 1007 1007 struct se_cmd *cmd = req->end_io_data; 1008 1008 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
+3 -1
include/linux/blk-mq.h
··· 13 13 14 14 struct blk_mq_tags; 15 15 struct blk_flush_queue; 16 + struct io_comp_batch; 16 17 17 18 #define BLKDEV_MIN_RQ 4 18 19 #define BLKDEV_DEFAULT_RQ 128 ··· 23 22 RQ_END_IO_FREE, 24 23 }; 25 24 26 - typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t); 25 + typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t, 26 + const struct io_comp_batch *); 27 27 28 28 /* 29 29 * request flags */