Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

io_uring: add wrapper type for io_req_tw_func_t arg

In preparation for uring_cmd implementations to implement functions
with the io_req_tw_func_t signature, introduce a wrapper struct
io_tw_req to hide the struct io_kiocb * argument. The intention is for
only the io_uring core to access the inner struct io_kiocb *. uring_cmd
implementations should instead call a helper from io_uring/cmd.h to
convert struct io_tw_req to struct io_uring_cmd *.

Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Caleb Sander Mateos and committed by
Jens Axboe
c33e779a 4531d165

+61 -42
+5 -1
include/linux/io_uring_types.h
··· 615 615 REQ_F_SQE_COPIED = IO_REQ_FLAG(REQ_F_SQE_COPIED_BIT), 616 616 }; 617 617 618 - typedef void (*io_req_tw_func_t)(struct io_kiocb *req, io_tw_token_t tw); 618 + struct io_tw_req { 619 + struct io_kiocb *req; 620 + }; 621 + 622 + typedef void (*io_req_tw_func_t)(struct io_tw_req tw_req, io_tw_token_t tw); 619 623 620 624 struct io_task_work { 621 625 struct llist_node node;
+9 -7
io_uring/futex.c
··· 41 41 io_alloc_cache_free(&ctx->futex_cache, kfree); 42 42 } 43 43 44 - static void __io_futex_complete(struct io_kiocb *req, io_tw_token_t tw) 44 + static void __io_futex_complete(struct io_tw_req tw_req, io_tw_token_t tw) 45 45 { 46 - hlist_del_init(&req->hash_node); 47 - io_req_task_complete(req, tw); 46 + hlist_del_init(&tw_req.req->hash_node); 47 + io_req_task_complete(tw_req, tw); 48 48 } 49 49 50 - static void io_futex_complete(struct io_kiocb *req, io_tw_token_t tw) 50 + static void io_futex_complete(struct io_tw_req tw_req, io_tw_token_t tw) 51 51 { 52 + struct io_kiocb *req = tw_req.req; 52 53 struct io_ring_ctx *ctx = req->ctx; 53 54 54 55 io_tw_lock(ctx, tw); 55 56 io_cache_free(&ctx->futex_cache, req->async_data); 56 57 io_req_async_data_clear(req, 0); 57 - __io_futex_complete(req, tw); 58 + __io_futex_complete(tw_req, tw); 58 59 } 59 60 60 - static void io_futexv_complete(struct io_kiocb *req, io_tw_token_t tw) 61 + static void io_futexv_complete(struct io_tw_req tw_req, io_tw_token_t tw) 61 62 { 63 + struct io_kiocb *req = tw_req.req; 62 64 struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex); 63 65 struct futex_vector *futexv = req->async_data; 64 66 ··· 75 73 } 76 74 77 75 io_req_async_data_free(req); 78 - __io_futex_complete(req, tw); 76 + __io_futex_complete(tw_req, tw); 79 77 } 80 78 81 79 static bool io_futexv_claim(struct io_futex *iof)
+12 -9
io_uring/io_uring.c
··· 291 291 mutex_lock(&ctx->uring_lock); 292 292 ts.cancel = io_should_terminate_tw(ctx); 293 293 llist_for_each_entry_safe(req, tmp, node, io_task_work.node) 294 - req->io_task_work.func(req, ts); 294 + req->io_task_work.func((struct io_tw_req){req}, ts); 295 295 io_submit_flush_completions(ctx); 296 296 mutex_unlock(&ctx->uring_lock); 297 297 percpu_ref_put(&ctx->refs); ··· 539 539 io_wq_enqueue(tctx->io_wq, &req->work); 540 540 } 541 541 542 - static void io_req_queue_iowq_tw(struct io_kiocb *req, io_tw_token_t tw) 542 + static void io_req_queue_iowq_tw(struct io_tw_req tw_req, io_tw_token_t tw) 543 543 { 544 - io_queue_iowq(req); 544 + io_queue_iowq(tw_req.req); 545 545 } 546 546 547 547 void io_req_queue_iowq(struct io_kiocb *req) ··· 1166 1166 } 1167 1167 INDIRECT_CALL_2(req->io_task_work.func, 1168 1168 io_poll_task_func, io_req_rw_complete, 1169 - req, ts); 1169 + (struct io_tw_req){req}, ts); 1170 1170 node = next; 1171 1171 (*count)++; 1172 1172 if (unlikely(need_resched())) { ··· 1389 1389 io_task_work.node); 1390 1390 INDIRECT_CALL_2(req->io_task_work.func, 1391 1391 io_poll_task_func, io_req_rw_complete, 1392 - req, tw); 1392 + (struct io_tw_req){req}, tw); 1393 1393 *node = next; 1394 1394 if (++ret >= events) 1395 1395 break; ··· 1459 1459 return ret; 1460 1460 } 1461 1461 1462 - static void io_req_task_cancel(struct io_kiocb *req, io_tw_token_t tw) 1462 + static void io_req_task_cancel(struct io_tw_req tw_req, io_tw_token_t tw) 1463 1463 { 1464 + struct io_kiocb *req = tw_req.req; 1465 + 1464 1466 io_tw_lock(req->ctx, tw); 1465 1467 io_req_defer_failed(req, req->cqe.res); 1466 1468 } 1467 1469 1468 - void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw) 1470 + void io_req_task_submit(struct io_tw_req tw_req, io_tw_token_t tw) 1469 1471 { 1472 + struct io_kiocb *req = tw_req.req; 1470 1473 struct io_ring_ctx *ctx = req->ctx; 1471 1474 1472 1475 io_tw_lock(ctx, tw); ··· 1705 1702 return 0; 1706 1703 } 1707 1704 1708 - void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw) 1705 + void io_req_task_complete(struct io_tw_req tw_req, io_tw_token_t tw) 1709 1706 { 1710 - io_req_complete_defer(req); 1707 + io_req_complete_defer(tw_req.req); 1711 1708 } 1712 1709 1713 1710 /*
+2 -2
io_uring/io_uring.h
··· 149 149 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags); 150 150 void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags); 151 151 void io_req_task_queue(struct io_kiocb *req); 152 - void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw); 152 + void io_req_task_complete(struct io_tw_req tw_req, io_tw_token_t tw); 153 153 void io_req_task_queue_fail(struct io_kiocb *req, int ret); 154 - void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw); 154 + void io_req_task_submit(struct io_tw_req tw_req, io_tw_token_t tw); 155 155 struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries); 156 156 struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count); 157 157 void tctx_task_work(struct callback_head *cb);
+2 -1
io_uring/msg_ring.c
··· 70 70 return target_ctx->task_complete; 71 71 } 72 72 73 - static void io_msg_tw_complete(struct io_kiocb *req, io_tw_token_t tw) 73 + static void io_msg_tw_complete(struct io_tw_req tw_req, io_tw_token_t tw) 74 74 { 75 + struct io_kiocb *req = tw_req.req; 75 76 struct io_ring_ctx *ctx = req->ctx; 76 77 77 78 io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
+3 -2
io_uring/notif.c
··· 11 11 12 12 static const struct ubuf_info_ops io_ubuf_ops; 13 13 14 - static void io_notif_tw_complete(struct io_kiocb *notif, io_tw_token_t tw) 14 + static void io_notif_tw_complete(struct io_tw_req tw_req, io_tw_token_t tw) 15 15 { 16 + struct io_kiocb *notif = tw_req.req; 16 17 struct io_notif_data *nd = io_notif_to_data(notif); 17 18 struct io_ring_ctx *ctx = notif->ctx; 18 19 ··· 35 34 } 36 35 37 36 nd = nd->next; 38 - io_req_task_complete(notif, tw); 37 + io_req_task_complete((struct io_tw_req){notif}, tw); 39 38 } while (nd); 40 39 } 41 40
+6 -5
io_uring/poll.c
··· 310 310 return IOU_POLL_NO_ACTION; 311 311 } 312 312 313 - void io_poll_task_func(struct io_kiocb *req, io_tw_token_t tw) 313 + void io_poll_task_func(struct io_tw_req tw_req, io_tw_token_t tw) 314 314 { 315 + struct io_kiocb *req = tw_req.req; 315 316 int ret; 316 317 317 318 ret = io_poll_check_events(req, tw); ··· 333 332 poll = io_kiocb_to_cmd(req, struct io_poll); 334 333 req->cqe.res = mangle_poll(req->cqe.res & poll->events); 335 334 } else if (ret == IOU_POLL_REISSUE) { 336 - io_req_task_submit(req, tw); 335 + io_req_task_submit(tw_req, tw); 337 336 return; 338 337 } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) { 339 338 req->cqe.res = ret; ··· 341 340 } 342 341 343 342 io_req_set_res(req, req->cqe.res, 0); 344 - io_req_task_complete(req, tw); 343 + io_req_task_complete(tw_req, tw); 345 344 } else { 346 345 io_tw_lock(req->ctx, tw); 347 346 348 347 if (ret == IOU_POLL_REMOVE_POLL_USE_RES) 349 - io_req_task_complete(req, tw); 348 + io_req_task_complete(tw_req, tw); 350 349 else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE) 351 - io_req_task_submit(req, tw); 350 + io_req_task_submit(tw_req, tw); 352 351 else 353 352 io_req_defer_failed(req, ret); 354 353 }
+1 -1
io_uring/poll.h
··· 46 46 bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx, 47 47 bool cancel_all); 48 48 49 - void io_poll_task_func(struct io_kiocb *req, io_tw_token_t tw); 49 + void io_poll_task_func(struct io_tw_req tw_req, io_tw_token_t tw);
+3 -2
io_uring/rw.c
··· 564 564 return res; 565 565 } 566 566 567 - void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw) 567 + void io_req_rw_complete(struct io_tw_req tw_req, io_tw_token_t tw) 568 568 { 569 + struct io_kiocb *req = tw_req.req; 569 570 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 570 571 struct kiocb *kiocb = &rw->kiocb; 571 572 ··· 582 581 req->cqe.flags |= io_put_kbuf(req, req->cqe.res, NULL); 583 582 584 583 io_req_rw_cleanup(req, 0); 585 - io_req_task_complete(req, tw); 584 + io_req_task_complete(tw_req, tw); 586 585 } 587 586 588 587 static void io_complete_rw(struct kiocb *kiocb, long res)
+1 -1
io_uring/rw.h
··· 46 46 int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags); 47 47 void io_readv_writev_cleanup(struct io_kiocb *req); 48 48 void io_rw_fail(struct io_kiocb *req); 49 - void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw); 49 + void io_req_rw_complete(struct io_tw_req tw_req, io_tw_token_t tw); 50 50 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 51 51 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags); 52 52 void io_rw_cache_free(const void *entry);
+11 -7
io_uring/timeout.c
··· 68 68 69 69 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer); 70 70 71 - static void io_timeout_complete(struct io_kiocb *req, io_tw_token_t tw) 71 + static void io_timeout_complete(struct io_tw_req tw_req, io_tw_token_t tw) 72 72 { 73 + struct io_kiocb *req = tw_req.req; 73 74 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 74 75 struct io_timeout_data *data = req->async_data; 75 76 struct io_ring_ctx *ctx = req->ctx; ··· 86 85 } 87 86 } 88 87 89 - io_req_task_complete(req, tw); 88 + io_req_task_complete(tw_req, tw); 90 89 } 91 90 92 91 static __cold bool io_flush_killed_timeouts(struct list_head *list, int err) ··· 158 157 io_flush_killed_timeouts(&list, 0); 159 158 } 160 159 161 - static void io_req_tw_fail_links(struct io_kiocb *link, io_tw_token_t tw) 160 + static void io_req_tw_fail_links(struct io_tw_req tw_req, io_tw_token_t tw) 162 161 { 162 + struct io_kiocb *link = tw_req.req; 163 + 163 164 io_tw_lock(link->ctx, tw); 164 165 while (link) { 165 166 struct io_kiocb *nxt = link->link; ··· 171 168 res = link->cqe.res; 172 169 link->link = NULL; 173 170 io_req_set_res(link, res, 0); 174 - io_req_task_complete(link, tw); 171 + io_req_task_complete((struct io_tw_req){link}, tw); 175 172 link = nxt; 176 173 } 177 174 } ··· 320 317 return 0; 321 318 } 322 319 323 - static void io_req_task_link_timeout(struct io_kiocb *req, io_tw_token_t tw) 320 + static void io_req_task_link_timeout(struct io_tw_req tw_req, io_tw_token_t tw) 324 321 { 322 + struct io_kiocb *req = tw_req.req; 325 323 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 326 324 struct io_kiocb *prev = timeout->prev; 327 325 int ret; ··· 339 335 ret = -ECANCELED; 340 336 } 341 337 io_req_set_res(req, ret ?: -ETIME, 0); 342 - io_req_task_complete(req, tw); 338 + io_req_task_complete(tw_req, tw); 343 339 io_put_req(prev); 344 340 } else { 345 341 io_req_set_res(req, -ETIME, 0); 346 - io_req_task_complete(req, tw); 342 + io_req_task_complete(tw_req, tw); 347 343 } 348 344 } 349 345
+2 -1
io_uring/uring_cmd.c
··· 113 113 } 114 114 EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable); 115 115 116 - static void io_uring_cmd_work(struct io_kiocb *req, io_tw_token_t tw) 116 + static void io_uring_cmd_work(struct io_tw_req tw_req, io_tw_token_t tw) 117 117 { 118 + struct io_kiocb *req = tw_req.req; 118 119 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 119 120 unsigned int flags = IO_URING_F_COMPLETE_DEFER; 120 121
+4 -3
io_uring/waitid.c
··· 16 16 #include "waitid.h" 17 17 #include "../kernel/exit.h" 18 18 19 - static void io_waitid_cb(struct io_kiocb *req, io_tw_token_t tw); 19 + static void io_waitid_cb(struct io_tw_req tw_req, io_tw_token_t tw); 20 20 21 21 #define IO_WAITID_CANCEL_FLAG BIT(31) 22 22 #define IO_WAITID_REF_MASK GENMASK(30, 0) ··· 194 194 return true; 195 195 } 196 196 197 - static void io_waitid_cb(struct io_kiocb *req, io_tw_token_t tw) 197 + static void io_waitid_cb(struct io_tw_req tw_req, io_tw_token_t tw) 198 198 { 199 + struct io_kiocb *req = tw_req.req; 199 200 struct io_waitid_async *iwa = req->async_data; 200 201 struct io_ring_ctx *ctx = req->ctx; 201 202 int ret; ··· 230 229 } 231 230 232 231 io_waitid_complete(req, ret); 233 - io_req_task_complete(req, tw); 232 + io_req_task_complete(tw_req, tw); 234 233 } 235 234 236 235 static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,