Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef IOU_CORE_H
2#define IOU_CORE_H
3
4#include <linux/errno.h>
5#include <linux/lockdep.h>
6#include <linux/resume_user_mode.h>
7#include <linux/kasan.h>
8#include <linux/poll.h>
9#include <linux/io_uring_types.h>
10#include <uapi/linux/eventpoll.h>
11#include "alloc_cache.h"
12#include "io-wq.h"
13#include "slist.h"
14#include "filetable.h"
15#include "opdef.h"
16
17#ifndef CREATE_TRACE_POINTS
18#include <trace/events/io_uring.h>
19#endif
20
21enum {
22 IOU_COMPLETE = 0,
23
24 IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
25
26 /*
27 * The request has more work to do and should be retried. io_uring will
28 * attempt to wait on the file for eligible opcodes, but otherwise
29 * it'll be handed to iowq for blocking execution. It works for normal
30 * requests as well as for the multi shot mode.
31 */
32 IOU_RETRY = -EAGAIN,
33
34 /*
35 * Requeue the task_work to restart operations on this request. The
36 * actual value isn't important, should just be not an otherwise
37 * valid error code, yet less than -MAX_ERRNO and valid internally.
38 */
39 IOU_REQUEUE = -3072,
40};
41
42struct io_wait_queue {
43 struct wait_queue_entry wq;
44 struct io_ring_ctx *ctx;
45 unsigned cq_tail;
46 unsigned cq_min_tail;
47 unsigned nr_timeouts;
48 int hit_timeout;
49 ktime_t min_timeout;
50 ktime_t timeout;
51 struct hrtimer t;
52
53#ifdef CONFIG_NET_RX_BUSY_POLL
54 ktime_t napi_busy_poll_dt;
55 bool napi_prefer_busy_poll;
56#endif
57};
58
59static inline bool io_should_wake(struct io_wait_queue *iowq)
60{
61 struct io_ring_ctx *ctx = iowq->ctx;
62 int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
63
64 /*
65 * Wake up if we have enough events, or if a timeout occurred since we
66 * started waiting. For timeouts, we always want to return to userspace,
67 * regardless of event count.
68 */
69 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
70}
71
72#define IORING_MAX_ENTRIES 32768
73#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
74
75unsigned long rings_size(unsigned int flags, unsigned int sq_entries,
76 unsigned int cq_entries, size_t *sq_offset);
77int io_uring_fill_params(unsigned entries, struct io_uring_params *p);
78bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
79int io_run_task_work_sig(struct io_ring_ctx *ctx);
80void io_req_defer_failed(struct io_kiocb *req, s32 res);
81bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
82void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
83bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
84void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
85
86void io_req_track_inflight(struct io_kiocb *req);
87struct file *io_file_get_normal(struct io_kiocb *req, int fd);
88struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
89 unsigned issue_flags);
90
91void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
92void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags);
93void io_req_task_queue(struct io_kiocb *req);
94void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw);
95void io_req_task_queue_fail(struct io_kiocb *req, int ret);
96void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw);
97struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries);
98struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count);
99void tctx_task_work(struct callback_head *cb);
100__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
101int io_uring_alloc_task_context(struct task_struct *task,
102 struct io_ring_ctx *ctx);
103
104int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
105 int start, int end);
106void io_req_queue_iowq(struct io_kiocb *req);
107
108int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw);
109int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
110int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
111void __io_submit_flush_completions(struct io_ring_ctx *ctx);
112
113struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
114void io_wq_submit_work(struct io_wq_work *work);
115
116void io_free_req(struct io_kiocb *req);
117void io_queue_next(struct io_kiocb *req);
118void io_task_refs_refill(struct io_uring_task *tctx);
119bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
120
121bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
122 bool cancel_all);
123
124void io_activate_pollwq(struct io_ring_ctx *ctx);
125
126static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
127{
128#if defined(CONFIG_PROVE_LOCKING)
129 lockdep_assert(in_task());
130
131 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
132 lockdep_assert_held(&ctx->uring_lock);
133
134 if (ctx->flags & IORING_SETUP_IOPOLL) {
135 lockdep_assert_held(&ctx->uring_lock);
136 } else if (!ctx->task_complete) {
137 lockdep_assert_held(&ctx->completion_lock);
138 } else if (ctx->submitter_task) {
139 /*
140 * ->submitter_task may be NULL and we can still post a CQE,
141 * if the ring has been setup with IORING_SETUP_R_DISABLED.
142 * Not from an SQE, as those cannot be submitted, but via
143 * updating tagged resources.
144 */
145 if (!percpu_ref_is_dying(&ctx->refs))
146 lockdep_assert(current == ctx->submitter_task);
147 }
148#endif
149}
150
151static inline bool io_is_compat(struct io_ring_ctx *ctx)
152{
153 return IS_ENABLED(CONFIG_COMPAT) && unlikely(ctx->compat);
154}
155
156static inline void io_req_task_work_add(struct io_kiocb *req)
157{
158 __io_req_task_work_add(req, 0);
159}
160
161static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
162{
163 if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
164 ctx->submit_state.cq_flush)
165 __io_submit_flush_completions(ctx);
166}
167
168#define io_for_each_link(pos, head) \
169 for (pos = (head); pos; pos = pos->link)
170
171static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
172 struct io_uring_cqe **ret,
173 bool overflow)
174{
175 io_lockdep_assert_cq_locked(ctx);
176
177 if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
178 if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
179 return false;
180 }
181 *ret = ctx->cqe_cached;
182 ctx->cached_cq_tail++;
183 ctx->cqe_cached++;
184 if (ctx->flags & IORING_SETUP_CQE32)
185 ctx->cqe_cached++;
186 return true;
187}
188
189static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
190{
191 return io_get_cqe_overflow(ctx, ret, false);
192}
193
194static inline bool io_defer_get_uncommited_cqe(struct io_ring_ctx *ctx,
195 struct io_uring_cqe **cqe_ret)
196{
197 io_lockdep_assert_cq_locked(ctx);
198
199 ctx->submit_state.cq_flush = true;
200 return io_get_cqe(ctx, cqe_ret);
201}
202
203static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
204 struct io_kiocb *req)
205{
206 struct io_uring_cqe *cqe;
207
208 /*
209 * If we can't get a cq entry, userspace overflowed the
210 * submission (by quite a lot). Increment the overflow count in
211 * the ring.
212 */
213 if (unlikely(!io_get_cqe(ctx, &cqe)))
214 return false;
215
216
217 memcpy(cqe, &req->cqe, sizeof(*cqe));
218 if (ctx->flags & IORING_SETUP_CQE32) {
219 memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
220 memset(&req->big_cqe, 0, sizeof(req->big_cqe));
221 }
222
223 if (trace_io_uring_complete_enabled())
224 trace_io_uring_complete(req->ctx, req, cqe);
225 return true;
226}
227
228static inline void req_set_fail(struct io_kiocb *req)
229{
230 req->flags |= REQ_F_FAIL;
231 if (req->flags & REQ_F_CQE_SKIP) {
232 req->flags &= ~REQ_F_CQE_SKIP;
233 req->flags |= REQ_F_SKIP_LINK_CQES;
234 }
235}
236
237static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
238{
239 req->cqe.res = res;
240 req->cqe.flags = cflags;
241}
242
243static inline void *io_uring_alloc_async_data(struct io_alloc_cache *cache,
244 struct io_kiocb *req)
245{
246 if (cache) {
247 req->async_data = io_cache_alloc(cache, GFP_KERNEL);
248 } else {
249 const struct io_issue_def *def = &io_issue_defs[req->opcode];
250
251 WARN_ON_ONCE(!def->async_size);
252 req->async_data = kmalloc(def->async_size, GFP_KERNEL);
253 }
254 if (req->async_data)
255 req->flags |= REQ_F_ASYNC_DATA;
256 return req->async_data;
257}
258
259static inline bool req_has_async_data(struct io_kiocb *req)
260{
261 return req->flags & REQ_F_ASYNC_DATA;
262}
263
264static inline void io_put_file(struct io_kiocb *req)
265{
266 if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
267 fput(req->file);
268}
269
270static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
271 unsigned issue_flags)
272{
273 lockdep_assert_held(&ctx->uring_lock);
274 if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
275 mutex_unlock(&ctx->uring_lock);
276}
277
278static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
279 unsigned issue_flags)
280{
281 /*
282 * "Normal" inline submissions always hold the uring_lock, since we
283 * grab it from the system call. Same is true for the SQPOLL offload.
284 * The only exception is when we've detached the request and issue it
285 * from an async worker thread, grab the lock for that case.
286 */
287 if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
288 mutex_lock(&ctx->uring_lock);
289 lockdep_assert_held(&ctx->uring_lock);
290}
291
292static inline void io_commit_cqring(struct io_ring_ctx *ctx)
293{
294 /* order cqe stores with ring update */
295 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
296}
297
298static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
299{
300 if (wq_has_sleeper(&ctx->poll_wq))
301 __wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
302 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
303}
304
305static inline void io_cqring_wake(struct io_ring_ctx *ctx)
306{
307 /*
308 * Trigger waitqueue handler on all waiters on our waitqueue. This
309 * won't necessarily wake up all the tasks, io_should_wake() will make
310 * that decision.
311 *
312 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
313 * set in the mask so that if we recurse back into our own poll
314 * waitqueue handlers, we know we have a dependency between eventfd or
315 * epoll and should terminate multishot poll at that point.
316 */
317 if (wq_has_sleeper(&ctx->cq_wait))
318 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
319 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
320}
321
322static inline bool io_sqring_full(struct io_ring_ctx *ctx)
323{
324 struct io_rings *r = ctx->rings;
325
326 /*
327 * SQPOLL must use the actual sqring head, as using the cached_sq_head
328 * is race prone if the SQPOLL thread has grabbed entries but not yet
329 * committed them to the ring. For !SQPOLL, this doesn't matter, but
330 * since this helper is just used for SQPOLL sqring waits (or POLLOUT),
331 * just read the actual sqring head unconditionally.
332 */
333 return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
334}
335
336static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
337{
338 struct io_rings *rings = ctx->rings;
339 unsigned int entries;
340
341 /* make sure SQ entry isn't read before tail */
342 entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
343 return min(entries, ctx->sq_entries);
344}
345
346static inline int io_run_task_work(void)
347{
348 bool ret = false;
349
350 /*
351 * Always check-and-clear the task_work notification signal. With how
352 * signaling works for task_work, we can find it set with nothing to
353 * run. We need to clear it for that case, like get_signal() does.
354 */
355 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
356 clear_notify_signal();
357 /*
358 * PF_IO_WORKER never returns to userspace, so check here if we have
359 * notify work that needs processing.
360 */
361 if (current->flags & PF_IO_WORKER) {
362 if (test_thread_flag(TIF_NOTIFY_RESUME)) {
363 __set_current_state(TASK_RUNNING);
364 resume_user_mode_work(NULL);
365 }
366 if (current->io_uring) {
367 unsigned int count = 0;
368
369 __set_current_state(TASK_RUNNING);
370 tctx_task_work_run(current->io_uring, UINT_MAX, &count);
371 if (count)
372 ret = true;
373 }
374 }
375 if (task_work_pending(current)) {
376 __set_current_state(TASK_RUNNING);
377 task_work_run();
378 ret = true;
379 }
380
381 return ret;
382}
383
384static inline bool io_local_work_pending(struct io_ring_ctx *ctx)
385{
386 return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist);
387}
388
389static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
390{
391 return task_work_pending(current) || io_local_work_pending(ctx);
392}
393
394static inline void io_tw_lock(struct io_ring_ctx *ctx, io_tw_token_t tw)
395{
396 lockdep_assert_held(&ctx->uring_lock);
397}
398
399/*
400 * Don't complete immediately but use deferred completion infrastructure.
401 * Protected by ->uring_lock and can only be used either with
402 * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
403 */
404static inline void io_req_complete_defer(struct io_kiocb *req)
405 __must_hold(&req->ctx->uring_lock)
406{
407 struct io_submit_state *state = &req->ctx->submit_state;
408
409 lockdep_assert_held(&req->ctx->uring_lock);
410
411 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
412}
413
414static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
415{
416 if (unlikely(ctx->off_timeout_used ||
417 ctx->has_evfd || ctx->poll_activated))
418 __io_commit_cqring_flush(ctx);
419}
420
421static inline void io_get_task_refs(int nr)
422{
423 struct io_uring_task *tctx = current->io_uring;
424
425 tctx->cached_refs -= nr;
426 if (unlikely(tctx->cached_refs < 0))
427 io_task_refs_refill(tctx);
428}
429
430static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
431{
432 return !ctx->submit_state.free_list.next;
433}
434
435extern struct kmem_cache *req_cachep;
436
437static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
438{
439 struct io_kiocb *req;
440
441 req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
442 wq_stack_extract(&ctx->submit_state.free_list);
443 return req;
444}
445
446static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
447{
448 if (unlikely(io_req_cache_empty(ctx))) {
449 if (!__io_alloc_req_refill(ctx))
450 return false;
451 }
452 *req = io_extract_req(ctx);
453 return true;
454}
455
456static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
457{
458 return likely(ctx->submitter_task == current);
459}
460
461static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
462{
463 return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
464 ctx->submitter_task == current);
465}
466
467/*
468 * Terminate the request if either of these conditions are true:
469 *
470 * 1) It's being executed by the original task, but that task is marked
471 * with PF_EXITING as it's exiting.
472 * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
473 * our fallback task_work.
474 */
475static inline bool io_should_terminate_tw(void)
476{
477 return current->flags & (PF_KTHREAD | PF_EXITING);
478}
479
480static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
481{
482 io_req_set_res(req, res, 0);
483 req->io_task_work.func = io_req_task_complete;
484 io_req_task_work_add(req);
485}
486
487/*
488 * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
489 * slot.
490 */
491static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
492{
493 if (ctx->flags & IORING_SETUP_SQE128)
494 return 2 * sizeof(struct io_uring_sqe);
495 return sizeof(struct io_uring_sqe);
496}
497
498static inline bool io_file_can_poll(struct io_kiocb *req)
499{
500 if (req->flags & REQ_F_CAN_POLL)
501 return true;
502 if (req->file && file_can_poll(req->file)) {
503 req->flags |= REQ_F_CAN_POLL;
504 return true;
505 }
506 return false;
507}
508
509static inline ktime_t io_get_time(struct io_ring_ctx *ctx)
510{
511 if (ctx->clockid == CLOCK_MONOTONIC)
512 return ktime_get();
513
514 return ktime_get_with_offset(ctx->clock_offset);
515}
516
517enum {
518 IO_CHECK_CQ_OVERFLOW_BIT,
519 IO_CHECK_CQ_DROPPED_BIT,
520};
521
522static inline bool io_has_work(struct io_ring_ctx *ctx)
523{
524 return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
525 io_local_work_pending(ctx);
526}
527#endif