Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

io_uring/uring_cmd: avoid double indirect call in task work dispatch

io_uring task work dispatch makes an indirect call to struct io_kiocb's
io_task_work.func field to allow running arbitrary task work functions.
In the uring_cmd case, this calls io_uring_cmd_work(), which immediately
makes another indirect call to struct io_uring_cmd's task_work_cb field.
Change the uring_cmd task work callbacks to functions whose signatures
match io_req_tw_func_t. Add a function io_uring_cmd_from_tw() to convert
from the task work's struct io_tw_req argument to struct io_uring_cmd *.
Define a constant IO_URING_CMD_TASK_WORK_ISSUE_FLAGS to avoid
manufacturing issue_flags in the uring_cmd task work callbacks. Now
uring_cmd task work dispatch makes a single indirect call to the
uring_cmd implementation's callback. This also allows removing the
task_work_cb field from struct io_uring_cmd, freeing up 8 bytes for
future storage.
Since fuse_uring_send_in_task() now has access to the io_tw_token_t,
check its cancel field directly instead of relying on the
IO_URING_F_TASK_DEAD issue flag.

Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Caleb Sander Mateos and committed by
Jens Axboe
20fb3d05 c33e779a

+41 -47
+4 -2
block/ioctl.c
··· 769 769 bool nowait; 770 770 }; 771 771 772 - static void blk_cmd_complete(struct io_uring_cmd *cmd, unsigned int issue_flags) 772 + static void blk_cmd_complete(struct io_tw_req tw_req, io_tw_token_t tw) 773 773 { 774 + struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req); 774 775 struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd); 775 776 776 777 if (bic->res == -EAGAIN && bic->nowait) 777 778 io_uring_cmd_issue_blocking(cmd); 778 779 else 779 - io_uring_cmd_done(cmd, bic->res, issue_flags); 780 + io_uring_cmd_done(cmd, bic->res, 781 + IO_URING_CMD_TASK_WORK_ISSUE_FLAGS); 780 782 } 781 783 782 784 static void bio_cmd_bio_end_io(struct bio *bio)
+11 -11
drivers/block/ublk_drv.c
··· 1302 1302 return true; 1303 1303 } 1304 1304 1305 - static void ublk_dispatch_req(struct ublk_queue *ubq, 1306 - struct request *req, 1307 - unsigned int issue_flags) 1305 + static void ublk_dispatch_req(struct ublk_queue *ubq, struct request *req) 1308 1306 { 1307 + unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS; 1309 1308 int tag = req->tag; 1310 1309 struct ublk_io *io = &ubq->ios[tag]; 1311 1310 ··· 1347 1348 ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags); 1348 1349 } 1349 1350 1350 - static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd, 1351 - unsigned int issue_flags) 1351 + static void ublk_cmd_tw_cb(struct io_tw_req tw_req, io_tw_token_t tw) 1352 1352 { 1353 + struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req); 1353 1354 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); 1354 1355 struct ublk_queue *ubq = pdu->ubq; 1355 1356 1356 - ublk_dispatch_req(ubq, pdu->req, issue_flags); 1357 + ublk_dispatch_req(ubq, pdu->req); 1357 1358 } 1358 1359 1359 1360 static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq) ··· 1365 1366 io_uring_cmd_complete_in_task(cmd, ublk_cmd_tw_cb); 1366 1367 } 1367 1368 1368 - static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd, 1369 - unsigned int issue_flags) 1369 + static void ublk_cmd_list_tw_cb(struct io_tw_req tw_req, io_tw_token_t tw) 1370 1370 { 1371 + struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req); 1371 1372 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); 1372 1373 struct request *rq = pdu->req_list; 1373 1374 struct request *next; ··· 1375 1376 do { 1376 1377 next = rq->rq_next; 1377 1378 rq->rq_next = NULL; 1378 - ublk_dispatch_req(rq->mq_hctx->driver_data, rq, issue_flags); 1379 + ublk_dispatch_req(rq->mq_hctx->driver_data, rq); 1379 1380 rq = next; 1380 1381 } while (rq); 1381 1382 } ··· 2522 2523 return NULL; 2523 2524 } 2524 2525 2525 - static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd, 2526 - unsigned int issue_flags) 2526 + static void ublk_ch_uring_cmd_cb(struct io_tw_req tw_req, io_tw_token_t tw) 2527 2527 { 2528 + unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS; 2529 + struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req); 2528 2530 int ret = ublk_ch_uring_cmd_local(cmd, issue_flags); 2529 2531 2530 2532 if (ret != -EIOCBQUEUED)
+4 -3
drivers/nvme/host/ioctl.c
··· 398 398 return io_uring_cmd_to_pdu(ioucmd, struct nvme_uring_cmd_pdu); 399 399 } 400 400 401 - static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd, 402 - unsigned issue_flags) 401 + static void nvme_uring_task_cb(struct io_tw_req tw_req, io_tw_token_t tw) 403 402 { 403 + struct io_uring_cmd *ioucmd = io_uring_cmd_from_tw(tw_req); 404 404 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); 405 405 406 406 if (pdu->bio) 407 407 blk_rq_unmap_user(pdu->bio); 408 - io_uring_cmd_done32(ioucmd, pdu->status, pdu->result, issue_flags); 408 + io_uring_cmd_done32(ioucmd, pdu->status, pdu->result, 409 + IO_URING_CMD_TASK_WORK_ISSUE_FLAGS); 409 410 } 410 411 411 412 static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+3 -2
fs/btrfs/ioctl.c
··· 4649 4649 struct btrfs_uring_priv *priv; 4650 4650 }; 4651 4651 4652 - static void btrfs_uring_read_finished(struct io_uring_cmd *cmd, unsigned int issue_flags) 4652 + static void btrfs_uring_read_finished(struct io_tw_req tw_req, io_tw_token_t tw) 4653 4653 { 4654 + struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req); 4654 4655 struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(cmd, struct io_btrfs_cmd); 4655 4656 struct btrfs_uring_priv *priv = bc->priv; 4656 4657 struct btrfs_inode *inode = BTRFS_I(file_inode(priv->iocb.ki_filp)); ··· 4696 4695 btrfs_unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state); 4697 4696 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 4698 4697 4699 - io_uring_cmd_done(cmd, ret, issue_flags); 4698 + io_uring_cmd_done(cmd, ret, IO_URING_CMD_TASK_WORK_ISSUE_FLAGS); 4700 4699 add_rchar(current, ret); 4701 4700 4702 4701 for (index = 0; index < priv->nr_pages; index++)
+4 -3
fs/fuse/dev_uring.c
··· 1209 1209 * User buffers are not mapped yet - the application does not have permission 1210 1210 * to write to it - this has to be executed in ring task context. 1211 1211 */ 1212 - static void fuse_uring_send_in_task(struct io_uring_cmd *cmd, 1213 - unsigned int issue_flags) 1212 + static void fuse_uring_send_in_task(struct io_tw_req tw_req, io_tw_token_t tw) 1214 1213 { 1214 + unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS; 1215 + struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req); 1215 1216 struct fuse_ring_ent *ent = uring_cmd_to_ring_ent(cmd); 1216 1217 struct fuse_ring_queue *queue = ent->queue; 1217 1218 int err; 1218 1219 1219 - if (!(issue_flags & IO_URING_F_TASK_DEAD)) { 1220 + if (!tw.cancel) { 1220 1221 err = fuse_uring_prepare_send(ent, ent->fuse_req); 1221 1222 if (err) { 1222 1223 fuse_uring_next_fuse_req(ent, queue, issue_flags);
+13 -9
include/linux/io_uring/cmd.h
··· 11 11 /* io_uring_cmd is being issued again */ 12 12 #define IORING_URING_CMD_REISSUE (1U << 31) 13 13 14 - typedef void (*io_uring_cmd_tw_t)(struct io_uring_cmd *cmd, 15 - unsigned issue_flags); 16 - 17 14 struct io_uring_cmd { 18 15 struct file *file; 19 16 const struct io_uring_sqe *sqe; 20 - /* callback to defer completions to task context */ 21 - io_uring_cmd_tw_t task_work_cb; 22 17 u32 cmd_op; 23 18 u32 flags; 24 19 u8 pdu[32]; /* available inline for free use */ 20 + u8 unused[8]; 25 21 }; 26 22 27 23 static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) ··· 56 60 unsigned issue_flags, bool is_cqe32); 57 61 58 62 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, 59 - io_uring_cmd_tw_t task_work_cb, 63 + io_req_tw_func_t task_work_cb, 60 64 unsigned flags); 61 65 62 66 /* ··· 105 109 { 106 110 } 107 111 static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, 108 - io_uring_cmd_tw_t task_work_cb, unsigned flags) 112 + io_req_tw_func_t task_work_cb, unsigned flags) 109 113 { 110 114 } 111 115 static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, ··· 128 132 } 129 133 #endif 130 134 135 + static inline struct io_uring_cmd *io_uring_cmd_from_tw(struct io_tw_req tw_req) 136 + { 137 + return io_kiocb_to_cmd(tw_req.req, struct io_uring_cmd); 138 + } 139 + 140 + /* task_work executor checks the deferred list completion */ 141 + #define IO_URING_CMD_TASK_WORK_ISSUE_FLAGS IO_URING_F_COMPLETE_DEFER 142 + 131 143 /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */ 132 144 static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd, 133 - io_uring_cmd_tw_t task_work_cb) 145 + io_req_tw_func_t task_work_cb) 134 146 { 135 147 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE); 136 148 } 137 149 138 150 static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, 139 - io_uring_cmd_tw_t task_work_cb) 151 + io_req_tw_func_t task_work_cb) 140 152 { 141 153 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0); 142 154 }
-1
include/linux/io_uring_types.h
··· 39 39 /* set when uring wants to cancel a previously issued command */ 40 40 IO_URING_F_CANCEL = (1 << 11), 41 41 IO_URING_F_COMPAT = (1 << 12), 42 - IO_URING_F_TASK_DEAD = (1 << 13), 43 42 }; 44 43 45 44 struct io_wq_work_node {
+2 -16
io_uring/uring_cmd.c
··· 113 113 } 114 114 EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable); 115 115 116 - static void io_uring_cmd_work(struct io_tw_req tw_req, io_tw_token_t tw) 117 - { 118 - struct io_kiocb *req = tw_req.req; 119 - struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 120 - unsigned int flags = IO_URING_F_COMPLETE_DEFER; 121 - 122 - if (unlikely(tw.cancel)) 123 - flags |= IO_URING_F_TASK_DEAD; 124 - 125 - /* task_work executor checks the deffered list completion */ 126 - ioucmd->task_work_cb(ioucmd, flags); 127 - } 128 - 129 116 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, 130 - io_uring_cmd_tw_t task_work_cb, 117 + io_req_tw_func_t task_work_cb, 131 118 unsigned flags) 132 119 { 133 120 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); ··· 122 135 if (WARN_ON_ONCE(req->flags & REQ_F_APOLL_MULTISHOT)) 123 136 return; 124 137 125 - ioucmd->task_work_cb = task_work_cb; 126 - req->io_task_work.func = io_uring_cmd_work; 138 + req->io_task_work.func = task_work_cb; 127 139 __io_req_task_work_add(req, flags); 128 140 } 129 141 EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);