Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#ifndef IOU_TW_H
3#define IOU_TW_H
4
5#include <linux/sched.h>
6#include <linux/percpu-refcount.h>
7#include <linux/io_uring_types.h>
8
9#define IO_LOCAL_TW_DEFAULT_MAX 20
10
11/*
12 * Terminate the request if either of these conditions are true:
13 *
14 * 1) It's being executed by the original task, but that task is marked
15 * with PF_EXITING as it's exiting.
16 * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
17 * our fallback task_work.
18 * 3) The ring has been closed and is going away.
19 */
20static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx)
21{
22 return (current->flags & (PF_EXITING | PF_KTHREAD)) || percpu_ref_is_dying(&ctx->refs);
23}
24
25void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags);
26struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries);
27void tctx_task_work(struct callback_head *cb);
28int io_run_local_work(struct io_ring_ctx *ctx, int min_events, int max_events);
29int io_run_task_work_sig(struct io_ring_ctx *ctx);
30
31__cold void io_fallback_req_func(struct work_struct *work);
32__cold void io_move_task_work_from_local(struct io_ring_ctx *ctx);
33int io_run_local_work_locked(struct io_ring_ctx *ctx, int min_events);
34
35void io_req_local_work_add(struct io_kiocb *req, unsigned flags);
36void io_req_normal_work_add(struct io_kiocb *req);
37struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count);
38
39static inline void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
40{
41 if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)
42 io_req_local_work_add(req, flags);
43 else
44 io_req_normal_work_add(req);
45}
46
47static inline void io_req_task_work_add(struct io_kiocb *req)
48{
49 __io_req_task_work_add(req, 0);
50}
51
52static inline int io_run_task_work(void)
53{
54 bool ret = false;
55
56 /*
57 * Always check-and-clear the task_work notification signal. With how
58 * signaling works for task_work, we can find it set with nothing to
59 * run. We need to clear it for that case, like get_signal() does.
60 */
61 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
62 clear_notify_signal();
63 /*
64 * PF_IO_WORKER never returns to userspace, so check here if we have
65 * notify work that needs processing.
66 */
67 if (current->flags & PF_IO_WORKER) {
68 if (test_thread_flag(TIF_NOTIFY_RESUME)) {
69 __set_current_state(TASK_RUNNING);
70 resume_user_mode_work(NULL);
71 }
72 if (current->io_uring) {
73 unsigned int count = 0;
74
75 __set_current_state(TASK_RUNNING);
76 tctx_task_work_run(current->io_uring, UINT_MAX, &count);
77 if (count)
78 ret = true;
79 }
80 }
81 if (task_work_pending(current)) {
82 __set_current_state(TASK_RUNNING);
83 task_work_run();
84 ret = true;
85 }
86
87 return ret;
88}
89
90static inline bool io_local_work_pending(struct io_ring_ctx *ctx)
91{
92 return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist);
93}
94
95static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
96{
97 return task_work_pending(current) || io_local_work_pending(ctx);
98}
99
100static inline void io_tw_lock(struct io_ring_ctx *ctx, io_tw_token_t tw)
101{
102 lockdep_assert_held(&ctx->uring_lock);
103}
104
105static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
106{
107 return likely(ctx->submitter_task == current);
108}
109
110static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
111{
112 return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
113 ctx->submitter_task == current);
114}
115
116#endif