Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/blk-mq.h>
7#include <linux/mm.h>
8#include <linux/slab.h>
9#include <linux/fsnotify.h>
10#include <linux/poll.h>
11#include <linux/nospec.h>
12#include <linux/compat.h>
13#include <linux/io_uring/cmd.h>
14#include <linux/indirect_call_wrapper.h>
15
16#include <uapi/linux/io_uring.h>
17
18#include "io_uring.h"
19#include "opdef.h"
20#include "kbuf.h"
21#include "alloc_cache.h"
22#include "rsrc.h"
23#include "poll.h"
24#include "rw.h"
25
26static void io_complete_rw(struct kiocb *kiocb, long res);
27static void io_complete_rw_iopoll(struct kiocb *kiocb, long res);
28
29struct io_rw {
30 /* NOTE: kiocb has the file as the first member, so don't do it here */
31 struct kiocb kiocb;
32 u64 addr;
33 u32 len;
34 rwf_t flags;
35};
36
37static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask)
38{
39 /* If FMODE_NOWAIT is set for a file, we're golden */
40 if (req->flags & REQ_F_SUPPORT_NOWAIT)
41 return true;
42 /* No FMODE_NOWAIT, if we can poll, check the status */
43 if (io_file_can_poll(req)) {
44 struct poll_table_struct pt = { ._key = mask };
45
46 return vfs_poll(req->file, &pt) & mask;
47 }
48 /* No FMODE_NOWAIT support, and file isn't pollable. Tough luck. */
49 return false;
50}
51
52#ifdef CONFIG_COMPAT
53static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
54{
55 struct compat_iovec __user *uiov;
56 compat_ssize_t clen;
57
58 uiov = u64_to_user_ptr(rw->addr);
59 if (!access_ok(uiov, sizeof(*uiov)))
60 return -EFAULT;
61 if (__get_user(clen, &uiov->iov_len))
62 return -EFAULT;
63 if (clen < 0)
64 return -EINVAL;
65
66 rw->len = clen;
67 return 0;
68}
69#endif
70
71static int io_iov_buffer_select_prep(struct io_kiocb *req)
72{
73 struct iovec __user *uiov;
74 struct iovec iov;
75 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
76
77 if (rw->len != 1)
78 return -EINVAL;
79
80#ifdef CONFIG_COMPAT
81 if (req->ctx->compat)
82 return io_iov_compat_buffer_select_prep(rw);
83#endif
84
85 uiov = u64_to_user_ptr(rw->addr);
86 if (copy_from_user(&iov, uiov, sizeof(*uiov)))
87 return -EFAULT;
88 rw->len = iov.iov_len;
89 return 0;
90}
91
92static int __io_import_iovec(int ddir, struct io_kiocb *req,
93 struct io_async_rw *io,
94 unsigned int issue_flags)
95{
96 const struct io_issue_def *def = &io_issue_defs[req->opcode];
97 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
98 struct iovec *iov;
99 void __user *buf;
100 int nr_segs, ret;
101 size_t sqe_len;
102
103 buf = u64_to_user_ptr(rw->addr);
104 sqe_len = rw->len;
105
106 if (!def->vectored || req->flags & REQ_F_BUFFER_SELECT) {
107 if (io_do_buffer_select(req)) {
108 buf = io_buffer_select(req, &sqe_len, issue_flags);
109 if (!buf)
110 return -ENOBUFS;
111 rw->addr = (unsigned long) buf;
112 rw->len = sqe_len;
113 }
114
115 return import_ubuf(ddir, buf, sqe_len, &io->iter);
116 }
117
118 if (io->free_iovec) {
119 nr_segs = io->free_iov_nr;
120 iov = io->free_iovec;
121 } else {
122 iov = &io->fast_iov;
123 nr_segs = 1;
124 }
125 ret = __import_iovec(ddir, buf, sqe_len, nr_segs, &iov, &io->iter,
126 req->ctx->compat);
127 if (unlikely(ret < 0))
128 return ret;
129 if (iov) {
130 req->flags |= REQ_F_NEED_CLEANUP;
131 io->free_iov_nr = io->iter.nr_segs;
132 kfree(io->free_iovec);
133 io->free_iovec = iov;
134 }
135 return 0;
136}
137
138static inline int io_import_iovec(int rw, struct io_kiocb *req,
139 struct io_async_rw *io,
140 unsigned int issue_flags)
141{
142 int ret;
143
144 ret = __io_import_iovec(rw, req, io, issue_flags);
145 if (unlikely(ret < 0))
146 return ret;
147
148 iov_iter_save_state(&io->iter, &io->iter_state);
149 return 0;
150}
151
152static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags)
153{
154 struct io_async_rw *rw = req->async_data;
155
156 if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
157 return;
158
159 io_alloc_cache_kasan(&rw->free_iovec, &rw->free_iov_nr);
160 if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) {
161 req->async_data = NULL;
162 req->flags &= ~REQ_F_ASYNC_DATA;
163 }
164}
165
166static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
167{
168 /*
169 * Disable quick recycling for anything that's gone through io-wq.
170 * In theory, this should be fine to cleanup. However, some read or
171 * write iter handling touches the iovec AFTER having called into the
172 * handler, eg to reexpand or revert. This means we can have:
173 *
174 * task io-wq
175 * issue
176 * punt to io-wq
177 * issue
178 * blkdev_write_iter()
179 * ->ki_complete()
180 * io_complete_rw()
181 * queue tw complete
182 * run tw
183 * req_rw_cleanup
184 * iov_iter_count() <- look at iov_iter again
185 *
186 * which can lead to a UAF. This is only possible for io-wq offload
187 * as the cleanup can run in parallel. As io-wq is not the fast path,
188 * just leave cleanup to the end.
189 *
190 * This is really a bug in the core code that does this, any issue
191 * path should assume that a successful (or -EIOCBQUEUED) return can
192 * mean that the underlying data can be gone at any time. But that
193 * should be fixed seperately, and then this check could be killed.
194 */
195 if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) {
196 req->flags &= ~REQ_F_NEED_CLEANUP;
197 io_rw_recycle(req, issue_flags);
198 }
199}
200
201static int io_rw_alloc_async(struct io_kiocb *req)
202{
203 struct io_ring_ctx *ctx = req->ctx;
204 struct io_async_rw *rw;
205
206 rw = io_uring_alloc_async_data(&ctx->rw_cache, req);
207 if (!rw)
208 return -ENOMEM;
209 if (rw->free_iovec)
210 req->flags |= REQ_F_NEED_CLEANUP;
211 rw->bytes_done = 0;
212 return 0;
213}
214
215static int io_prep_rw_setup(struct io_kiocb *req, int ddir, bool do_import)
216{
217 struct io_async_rw *rw;
218
219 if (io_rw_alloc_async(req))
220 return -ENOMEM;
221
222 if (!do_import || io_do_buffer_select(req))
223 return 0;
224
225 rw = req->async_data;
226 return io_import_iovec(ddir, req, rw, 0);
227}
228
229static inline void io_meta_save_state(struct io_async_rw *io)
230{
231 io->meta_state.seed = io->meta.seed;
232 iov_iter_save_state(&io->meta.iter, &io->meta_state.iter_meta);
233}
234
235static inline void io_meta_restore(struct io_async_rw *io, struct kiocb *kiocb)
236{
237 if (kiocb->ki_flags & IOCB_HAS_METADATA) {
238 io->meta.seed = io->meta_state.seed;
239 iov_iter_restore(&io->meta.iter, &io->meta_state.iter_meta);
240 }
241}
242
243static int io_prep_rw_pi(struct io_kiocb *req, struct io_rw *rw, int ddir,
244 u64 attr_ptr, u64 attr_type_mask)
245{
246 struct io_uring_attr_pi pi_attr;
247 struct io_async_rw *io;
248 int ret;
249
250 if (copy_from_user(&pi_attr, u64_to_user_ptr(attr_ptr),
251 sizeof(pi_attr)))
252 return -EFAULT;
253
254 if (pi_attr.rsvd)
255 return -EINVAL;
256
257 io = req->async_data;
258 io->meta.flags = pi_attr.flags;
259 io->meta.app_tag = pi_attr.app_tag;
260 io->meta.seed = pi_attr.seed;
261 ret = import_ubuf(ddir, u64_to_user_ptr(pi_attr.addr),
262 pi_attr.len, &io->meta.iter);
263 if (unlikely(ret < 0))
264 return ret;
265 req->flags |= REQ_F_HAS_METADATA;
266 io_meta_save_state(io);
267 return ret;
268}
269
270static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
271 int ddir, bool do_import)
272{
273 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
274 unsigned ioprio;
275 u64 attr_type_mask;
276 int ret;
277
278 rw->kiocb.ki_pos = READ_ONCE(sqe->off);
279 /* used for fixed read/write too - just read unconditionally */
280 req->buf_index = READ_ONCE(sqe->buf_index);
281
282 ioprio = READ_ONCE(sqe->ioprio);
283 if (ioprio) {
284 ret = ioprio_check_cap(ioprio);
285 if (ret)
286 return ret;
287
288 rw->kiocb.ki_ioprio = ioprio;
289 } else {
290 rw->kiocb.ki_ioprio = get_current_ioprio();
291 }
292 rw->kiocb.dio_complete = NULL;
293 rw->kiocb.ki_flags = 0;
294
295 if (req->ctx->flags & IORING_SETUP_IOPOLL)
296 rw->kiocb.ki_complete = io_complete_rw_iopoll;
297 else
298 rw->kiocb.ki_complete = io_complete_rw;
299
300 rw->addr = READ_ONCE(sqe->addr);
301 rw->len = READ_ONCE(sqe->len);
302 rw->flags = READ_ONCE(sqe->rw_flags);
303 ret = io_prep_rw_setup(req, ddir, do_import);
304
305 if (unlikely(ret))
306 return ret;
307
308 attr_type_mask = READ_ONCE(sqe->attr_type_mask);
309 if (attr_type_mask) {
310 u64 attr_ptr;
311
312 /* only PI attribute is supported currently */
313 if (attr_type_mask != IORING_RW_ATTR_FLAG_PI)
314 return -EINVAL;
315
316 attr_ptr = READ_ONCE(sqe->attr_ptr);
317 ret = io_prep_rw_pi(req, rw, ddir, attr_ptr, attr_type_mask);
318 }
319 return ret;
320}
321
322int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe)
323{
324 return io_prep_rw(req, sqe, ITER_DEST, true);
325}
326
327int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe)
328{
329 return io_prep_rw(req, sqe, ITER_SOURCE, true);
330}
331
332static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe,
333 int ddir)
334{
335 const bool do_import = !(req->flags & REQ_F_BUFFER_SELECT);
336 int ret;
337
338 ret = io_prep_rw(req, sqe, ddir, do_import);
339 if (unlikely(ret))
340 return ret;
341 if (do_import)
342 return 0;
343
344 /*
345 * Have to do this validation here, as this is in io_read() rw->len
346 * might have chanaged due to buffer selection
347 */
348 return io_iov_buffer_select_prep(req);
349}
350
351int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
352{
353 return io_prep_rwv(req, sqe, ITER_DEST);
354}
355
356int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe)
357{
358 return io_prep_rwv(req, sqe, ITER_SOURCE);
359}
360
361static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe,
362 int ddir)
363{
364 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
365 struct io_ring_ctx *ctx = req->ctx;
366 struct io_rsrc_node *node;
367 struct io_async_rw *io;
368 int ret;
369
370 ret = io_prep_rw(req, sqe, ddir, false);
371 if (unlikely(ret))
372 return ret;
373
374 node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
375 if (!node)
376 return -EFAULT;
377 io_req_assign_buf_node(req, node);
378
379 io = req->async_data;
380 ret = io_import_fixed(ddir, &io->iter, node->buf, rw->addr, rw->len);
381 iov_iter_save_state(&io->iter, &io->iter_state);
382 return ret;
383}
384
385int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
386{
387 return io_prep_rw_fixed(req, sqe, ITER_DEST);
388}
389
390int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
391{
392 return io_prep_rw_fixed(req, sqe, ITER_SOURCE);
393}
394
395/*
396 * Multishot read is prepared just like a normal read/write request, only
397 * difference is that we set the MULTISHOT flag.
398 */
399int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
400{
401 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
402 int ret;
403
404 /* must be used with provided buffers */
405 if (!(req->flags & REQ_F_BUFFER_SELECT))
406 return -EINVAL;
407
408 ret = io_prep_rw(req, sqe, ITER_DEST, false);
409 if (unlikely(ret))
410 return ret;
411
412 if (rw->addr || rw->len)
413 return -EINVAL;
414
415 req->flags |= REQ_F_APOLL_MULTISHOT;
416 return 0;
417}
418
419void io_readv_writev_cleanup(struct io_kiocb *req)
420{
421 lockdep_assert_held(&req->ctx->uring_lock);
422 io_rw_recycle(req, 0);
423}
424
425static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
426{
427 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
428
429 if (rw->kiocb.ki_pos != -1)
430 return &rw->kiocb.ki_pos;
431
432 if (!(req->file->f_mode & FMODE_STREAM)) {
433 req->flags |= REQ_F_CUR_POS;
434 rw->kiocb.ki_pos = req->file->f_pos;
435 return &rw->kiocb.ki_pos;
436 }
437
438 rw->kiocb.ki_pos = 0;
439 return NULL;
440}
441
442static bool io_rw_should_reissue(struct io_kiocb *req)
443{
444#ifdef CONFIG_BLOCK
445 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
446 umode_t mode = file_inode(req->file)->i_mode;
447 struct io_async_rw *io = req->async_data;
448 struct io_ring_ctx *ctx = req->ctx;
449
450 if (!S_ISBLK(mode) && !S_ISREG(mode))
451 return false;
452 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
453 !(ctx->flags & IORING_SETUP_IOPOLL)))
454 return false;
455 /*
456 * If ref is dying, we might be running poll reap from the exit work.
457 * Don't attempt to reissue from that path, just let it fail with
458 * -EAGAIN.
459 */
460 if (percpu_ref_is_dying(&ctx->refs))
461 return false;
462
463 io_meta_restore(io, &rw->kiocb);
464 iov_iter_restore(&io->iter, &io->iter_state);
465 return true;
466#else
467 return false;
468#endif
469}
470
471static void io_req_end_write(struct io_kiocb *req)
472{
473 if (req->flags & REQ_F_ISREG) {
474 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
475
476 kiocb_end_write(&rw->kiocb);
477 }
478}
479
480/*
481 * Trigger the notifications after having done some IO, and finish the write
482 * accounting, if any.
483 */
484static void io_req_io_end(struct io_kiocb *req)
485{
486 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
487
488 if (rw->kiocb.ki_flags & IOCB_WRITE) {
489 io_req_end_write(req);
490 fsnotify_modify(req->file);
491 } else {
492 fsnotify_access(req->file);
493 }
494}
495
496static void __io_complete_rw_common(struct io_kiocb *req, long res)
497{
498 if (res == req->cqe.res)
499 return;
500 if (res == -EAGAIN && io_rw_should_reissue(req)) {
501 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
502 } else {
503 req_set_fail(req);
504 req->cqe.res = res;
505 }
506}
507
508static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
509{
510 struct io_async_rw *io = req->async_data;
511
512 /* add previously done IO, if any */
513 if (req_has_async_data(req) && io->bytes_done > 0) {
514 if (res < 0)
515 res = io->bytes_done;
516 else
517 res += io->bytes_done;
518 }
519 return res;
520}
521
522void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
523{
524 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
525 struct kiocb *kiocb = &rw->kiocb;
526
527 if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
528 long res = kiocb->dio_complete(rw->kiocb.private);
529
530 io_req_set_res(req, io_fixup_rw_res(req, res), 0);
531 }
532
533 io_req_io_end(req);
534
535 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
536 req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0);
537
538 io_req_rw_cleanup(req, 0);
539 io_req_task_complete(req, ts);
540}
541
542static void io_complete_rw(struct kiocb *kiocb, long res)
543{
544 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
545 struct io_kiocb *req = cmd_to_io_kiocb(rw);
546
547 if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
548 __io_complete_rw_common(req, res);
549 io_req_set_res(req, io_fixup_rw_res(req, res), 0);
550 }
551 req->io_task_work.func = io_req_rw_complete;
552 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
553}
554
555static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
556{
557 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
558 struct io_kiocb *req = cmd_to_io_kiocb(rw);
559
560 if (kiocb->ki_flags & IOCB_WRITE)
561 io_req_end_write(req);
562 if (unlikely(res != req->cqe.res)) {
563 if (res == -EAGAIN && io_rw_should_reissue(req)) {
564 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
565 return;
566 }
567 req->cqe.res = res;
568 }
569
570 /* order with io_iopoll_complete() checking ->iopoll_completed */
571 smp_store_release(&req->iopoll_completed, 1);
572}
573
574static inline void io_rw_done(struct io_kiocb *req, ssize_t ret)
575{
576 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
577
578 /* IO was queued async, completion will happen later */
579 if (ret == -EIOCBQUEUED)
580 return;
581
582 /* transform internal restart error codes */
583 if (unlikely(ret < 0)) {
584 switch (ret) {
585 case -ERESTARTSYS:
586 case -ERESTARTNOINTR:
587 case -ERESTARTNOHAND:
588 case -ERESTART_RESTARTBLOCK:
589 /*
590 * We can't just restart the syscall, since previously
591 * submitted sqes may already be in progress. Just fail
592 * this IO with EINTR.
593 */
594 ret = -EINTR;
595 break;
596 }
597 }
598
599 if (req->ctx->flags & IORING_SETUP_IOPOLL)
600 io_complete_rw_iopoll(&rw->kiocb, ret);
601 else
602 io_complete_rw(&rw->kiocb, ret);
603}
604
605static int kiocb_done(struct io_kiocb *req, ssize_t ret,
606 unsigned int issue_flags)
607{
608 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
609 unsigned final_ret = io_fixup_rw_res(req, ret);
610
611 if (ret >= 0 && req->flags & REQ_F_CUR_POS)
612 req->file->f_pos = rw->kiocb.ki_pos;
613 if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) {
614 __io_complete_rw_common(req, ret);
615 /*
616 * Safe to call io_end from here as we're inline
617 * from the submission path.
618 */
619 io_req_io_end(req);
620 io_req_set_res(req, final_ret, io_put_kbuf(req, ret, issue_flags));
621 io_req_rw_cleanup(req, issue_flags);
622 return IOU_OK;
623 } else {
624 io_rw_done(req, ret);
625 }
626
627 return IOU_ISSUE_SKIP_COMPLETE;
628}
629
630static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
631{
632 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
633}
634
635/*
636 * For files that don't have ->read_iter() and ->write_iter(), handle them
637 * by looping over ->read() or ->write() manually.
638 */
639static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
640{
641 struct kiocb *kiocb = &rw->kiocb;
642 struct file *file = kiocb->ki_filp;
643 ssize_t ret = 0;
644 loff_t *ppos;
645
646 /*
647 * Don't support polled IO through this interface, and we can't
648 * support non-blocking either. For the latter, this just causes
649 * the kiocb to be handled from an async context.
650 */
651 if (kiocb->ki_flags & IOCB_HIPRI)
652 return -EOPNOTSUPP;
653 if ((kiocb->ki_flags & IOCB_NOWAIT) &&
654 !(kiocb->ki_filp->f_flags & O_NONBLOCK))
655 return -EAGAIN;
656
657 ppos = io_kiocb_ppos(kiocb);
658
659 while (iov_iter_count(iter)) {
660 void __user *addr;
661 size_t len;
662 ssize_t nr;
663
664 if (iter_is_ubuf(iter)) {
665 addr = iter->ubuf + iter->iov_offset;
666 len = iov_iter_count(iter);
667 } else if (!iov_iter_is_bvec(iter)) {
668 addr = iter_iov_addr(iter);
669 len = iter_iov_len(iter);
670 } else {
671 addr = u64_to_user_ptr(rw->addr);
672 len = rw->len;
673 }
674
675 if (ddir == READ)
676 nr = file->f_op->read(file, addr, len, ppos);
677 else
678 nr = file->f_op->write(file, addr, len, ppos);
679
680 if (nr < 0) {
681 if (!ret)
682 ret = nr;
683 break;
684 }
685 ret += nr;
686 if (!iov_iter_is_bvec(iter)) {
687 iov_iter_advance(iter, nr);
688 } else {
689 rw->addr += nr;
690 rw->len -= nr;
691 if (!rw->len)
692 break;
693 }
694 if (nr != len)
695 break;
696 }
697
698 return ret;
699}
700
701/*
702 * This is our waitqueue callback handler, registered through __folio_lock_async()
703 * when we initially tried to do the IO with the iocb armed our waitqueue.
704 * This gets called when the page is unlocked, and we generally expect that to
705 * happen when the page IO is completed and the page is now uptodate. This will
706 * queue a task_work based retry of the operation, attempting to copy the data
707 * again. If the latter fails because the page was NOT uptodate, then we will
708 * do a thread based blocking retry of the operation. That's the unexpected
709 * slow path.
710 */
711static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
712 int sync, void *arg)
713{
714 struct wait_page_queue *wpq;
715 struct io_kiocb *req = wait->private;
716 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
717 struct wait_page_key *key = arg;
718
719 wpq = container_of(wait, struct wait_page_queue, wait);
720
721 if (!wake_page_match(wpq, key))
722 return 0;
723
724 rw->kiocb.ki_flags &= ~IOCB_WAITQ;
725 list_del_init(&wait->entry);
726 io_req_task_queue(req);
727 return 1;
728}
729
730/*
731 * This controls whether a given IO request should be armed for async page
732 * based retry. If we return false here, the request is handed to the async
733 * worker threads for retry. If we're doing buffered reads on a regular file,
734 * we prepare a private wait_page_queue entry and retry the operation. This
735 * will either succeed because the page is now uptodate and unlocked, or it
736 * will register a callback when the page is unlocked at IO completion. Through
737 * that callback, io_uring uses task_work to setup a retry of the operation.
738 * That retry will attempt the buffered read again. The retry will generally
739 * succeed, or in rare cases where it fails, we then fall back to using the
740 * async worker threads for a blocking retry.
741 */
742static bool io_rw_should_retry(struct io_kiocb *req)
743{
744 struct io_async_rw *io = req->async_data;
745 struct wait_page_queue *wait = &io->wpq;
746 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
747 struct kiocb *kiocb = &rw->kiocb;
748
749 /*
750 * Never retry for NOWAIT or a request with metadata, we just complete
751 * with -EAGAIN.
752 */
753 if (req->flags & (REQ_F_NOWAIT | REQ_F_HAS_METADATA))
754 return false;
755
756 /* Only for buffered IO */
757 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
758 return false;
759
760 /*
761 * just use poll if we can, and don't attempt if the fs doesn't
762 * support callback based unlocks
763 */
764 if (io_file_can_poll(req) ||
765 !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC))
766 return false;
767
768 wait->wait.func = io_async_buf_func;
769 wait->wait.private = req;
770 wait->wait.flags = 0;
771 INIT_LIST_HEAD(&wait->wait.entry);
772 kiocb->ki_flags |= IOCB_WAITQ;
773 kiocb->ki_flags &= ~IOCB_NOWAIT;
774 kiocb->ki_waitq = wait;
775 return true;
776}
777
778static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
779{
780 struct file *file = rw->kiocb.ki_filp;
781
782 if (likely(file->f_op->read_iter))
783 return file->f_op->read_iter(&rw->kiocb, iter);
784 else if (file->f_op->read)
785 return loop_rw_iter(READ, rw, iter);
786 else
787 return -EINVAL;
788}
789
790static bool need_complete_io(struct io_kiocb *req)
791{
792 return req->flags & REQ_F_ISREG ||
793 S_ISBLK(file_inode(req->file)->i_mode);
794}
795
796static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
797{
798 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
799 struct kiocb *kiocb = &rw->kiocb;
800 struct io_ring_ctx *ctx = req->ctx;
801 struct file *file = req->file;
802 int ret;
803
804 if (unlikely(!(file->f_mode & mode)))
805 return -EBADF;
806
807 if (!(req->flags & REQ_F_FIXED_FILE))
808 req->flags |= io_file_get_flags(file);
809
810 kiocb->ki_flags = file->f_iocb_flags;
811 ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type);
812 if (unlikely(ret))
813 return ret;
814 kiocb->ki_flags |= IOCB_ALLOC_CACHE;
815
816 /*
817 * If the file is marked O_NONBLOCK, still allow retry for it if it
818 * supports async. Otherwise it's impossible to use O_NONBLOCK files
819 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
820 */
821 if (kiocb->ki_flags & IOCB_NOWAIT ||
822 ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT))))
823 req->flags |= REQ_F_NOWAIT;
824
825 if (ctx->flags & IORING_SETUP_IOPOLL) {
826 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
827 return -EOPNOTSUPP;
828 kiocb->private = NULL;
829 kiocb->ki_flags |= IOCB_HIPRI;
830 req->iopoll_completed = 0;
831 if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {
832 /* make sure every req only blocks once*/
833 req->flags &= ~REQ_F_IOPOLL_STATE;
834 req->iopoll_start = ktime_get_ns();
835 }
836 } else {
837 if (kiocb->ki_flags & IOCB_HIPRI)
838 return -EINVAL;
839 }
840
841 if (req->flags & REQ_F_HAS_METADATA) {
842 struct io_async_rw *io = req->async_data;
843
844 /*
845 * We have a union of meta fields with wpq used for buffered-io
846 * in io_async_rw, so fail it here.
847 */
848 if (!(req->file->f_flags & O_DIRECT))
849 return -EOPNOTSUPP;
850 kiocb->ki_flags |= IOCB_HAS_METADATA;
851 kiocb->private = &io->meta;
852 }
853
854 return 0;
855}
856
857static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
858{
859 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
860 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
861 struct io_async_rw *io = req->async_data;
862 struct kiocb *kiocb = &rw->kiocb;
863 ssize_t ret;
864 loff_t *ppos;
865
866 if (io_do_buffer_select(req)) {
867 ret = io_import_iovec(ITER_DEST, req, io, issue_flags);
868 if (unlikely(ret < 0))
869 return ret;
870 }
871 ret = io_rw_init_file(req, FMODE_READ, READ);
872 if (unlikely(ret))
873 return ret;
874 req->cqe.res = iov_iter_count(&io->iter);
875
876 if (force_nonblock) {
877 /* If the file doesn't support async, just async punt */
878 if (unlikely(!io_file_supports_nowait(req, EPOLLIN)))
879 return -EAGAIN;
880 kiocb->ki_flags |= IOCB_NOWAIT;
881 } else {
882 /* Ensure we clear previously set non-block flag */
883 kiocb->ki_flags &= ~IOCB_NOWAIT;
884 }
885
886 ppos = io_kiocb_update_pos(req);
887
888 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
889 if (unlikely(ret))
890 return ret;
891
892 ret = io_iter_do_read(rw, &io->iter);
893
894 /*
895 * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT
896 * issue, even though they should be returning -EAGAIN. To be safe,
897 * retry from blocking context for either.
898 */
899 if (ret == -EOPNOTSUPP && force_nonblock)
900 ret = -EAGAIN;
901
902 if (ret == -EAGAIN) {
903 /* If we can poll, just do that. */
904 if (io_file_can_poll(req))
905 return -EAGAIN;
906 /* IOPOLL retry should happen for io-wq threads */
907 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
908 goto done;
909 /* no retry on NONBLOCK nor RWF_NOWAIT */
910 if (req->flags & REQ_F_NOWAIT)
911 goto done;
912 ret = 0;
913 } else if (ret == -EIOCBQUEUED) {
914 return IOU_ISSUE_SKIP_COMPLETE;
915 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
916 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) ||
917 (issue_flags & IO_URING_F_MULTISHOT)) {
918 /* read all, failed, already did sync or don't want to retry */
919 goto done;
920 }
921
922 /*
923 * Don't depend on the iter state matching what was consumed, or being
924 * untouched in case of error. Restore it and we'll advance it
925 * manually if we need to.
926 */
927 iov_iter_restore(&io->iter, &io->iter_state);
928 io_meta_restore(io, kiocb);
929
930 do {
931 /*
932 * We end up here because of a partial read, either from
933 * above or inside this loop. Advance the iter by the bytes
934 * that were consumed.
935 */
936 iov_iter_advance(&io->iter, ret);
937 if (!iov_iter_count(&io->iter))
938 break;
939 io->bytes_done += ret;
940 iov_iter_save_state(&io->iter, &io->iter_state);
941
942 /* if we can retry, do so with the callbacks armed */
943 if (!io_rw_should_retry(req)) {
944 kiocb->ki_flags &= ~IOCB_WAITQ;
945 return -EAGAIN;
946 }
947
948 req->cqe.res = iov_iter_count(&io->iter);
949 /*
950 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
951 * we get -EIOCBQUEUED, then we'll get a notification when the
952 * desired page gets unlocked. We can also get a partial read
953 * here, and if we do, then just retry at the new offset.
954 */
955 ret = io_iter_do_read(rw, &io->iter);
956 if (ret == -EIOCBQUEUED)
957 return IOU_ISSUE_SKIP_COMPLETE;
958 /* we got some bytes, but not all. retry. */
959 kiocb->ki_flags &= ~IOCB_WAITQ;
960 iov_iter_restore(&io->iter, &io->iter_state);
961 } while (ret > 0);
962done:
963 /* it's faster to check here then delegate to kfree */
964 return ret;
965}
966
967int io_read(struct io_kiocb *req, unsigned int issue_flags)
968{
969 int ret;
970
971 ret = __io_read(req, issue_flags);
972 if (ret >= 0)
973 return kiocb_done(req, ret, issue_flags);
974
975 return ret;
976}
977
978int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
979{
980 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
981 unsigned int cflags = 0;
982 int ret;
983
984 /*
985 * Multishot MUST be used on a pollable file
986 */
987 if (!io_file_can_poll(req))
988 return -EBADFD;
989
990 /* make it sync, multishot doesn't support async execution */
991 rw->kiocb.ki_complete = NULL;
992 ret = __io_read(req, issue_flags);
993
994 /*
995 * If we get -EAGAIN, recycle our buffer and just let normal poll
996 * handling arm it.
997 */
998 if (ret == -EAGAIN) {
999 /*
1000 * Reset rw->len to 0 again to avoid clamping future mshot
1001 * reads, in case the buffer size varies.
1002 */
1003 if (io_kbuf_recycle(req, issue_flags))
1004 rw->len = 0;
1005 if (issue_flags & IO_URING_F_MULTISHOT)
1006 return IOU_ISSUE_SKIP_COMPLETE;
1007 return -EAGAIN;
1008 } else if (ret <= 0) {
1009 io_kbuf_recycle(req, issue_flags);
1010 if (ret < 0)
1011 req_set_fail(req);
1012 } else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1013 cflags = io_put_kbuf(req, ret, issue_flags);
1014 } else {
1015 /*
1016 * Any successful return value will keep the multishot read
1017 * armed, if it's still set. Put our buffer and post a CQE. If
1018 * we fail to post a CQE, or multishot is no longer set, then
1019 * jump to the termination path. This request is then done.
1020 */
1021 cflags = io_put_kbuf(req, ret, issue_flags);
1022 rw->len = 0; /* similarly to above, reset len to 0 */
1023
1024 if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
1025 if (issue_flags & IO_URING_F_MULTISHOT) {
1026 /*
1027 * Force retry, as we might have more data to
1028 * be read and otherwise it won't get retried
1029 * until (if ever) another poll is triggered.
1030 */
1031 io_poll_multishot_retry(req);
1032 return IOU_ISSUE_SKIP_COMPLETE;
1033 }
1034 return -EAGAIN;
1035 }
1036 }
1037
1038 /*
1039 * Either an error, or we've hit overflow posting the CQE. For any
1040 * multishot request, hitting overflow will terminate it.
1041 */
1042 io_req_set_res(req, ret, cflags);
1043 io_req_rw_cleanup(req, issue_flags);
1044 if (issue_flags & IO_URING_F_MULTISHOT)
1045 return IOU_STOP_MULTISHOT;
1046 return IOU_OK;
1047}
1048
1049static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb)
1050{
1051 struct inode *inode;
1052 bool ret;
1053
1054 if (!(req->flags & REQ_F_ISREG))
1055 return true;
1056 if (!(kiocb->ki_flags & IOCB_NOWAIT)) {
1057 kiocb_start_write(kiocb);
1058 return true;
1059 }
1060
1061 inode = file_inode(kiocb->ki_filp);
1062 ret = sb_start_write_trylock(inode->i_sb);
1063 if (ret)
1064 __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
1065 return ret;
1066}
1067
1068int io_write(struct io_kiocb *req, unsigned int issue_flags)
1069{
1070 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1071 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1072 struct io_async_rw *io = req->async_data;
1073 struct kiocb *kiocb = &rw->kiocb;
1074 ssize_t ret, ret2;
1075 loff_t *ppos;
1076
1077 ret = io_rw_init_file(req, FMODE_WRITE, WRITE);
1078 if (unlikely(ret))
1079 return ret;
1080 req->cqe.res = iov_iter_count(&io->iter);
1081
1082 if (force_nonblock) {
1083 /* If the file doesn't support async, just async punt */
1084 if (unlikely(!io_file_supports_nowait(req, EPOLLOUT)))
1085 goto ret_eagain;
1086
1087 /* Check if we can support NOWAIT. */
1088 if (!(kiocb->ki_flags & IOCB_DIRECT) &&
1089 !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) &&
1090 (req->flags & REQ_F_ISREG))
1091 goto ret_eagain;
1092
1093 kiocb->ki_flags |= IOCB_NOWAIT;
1094 } else {
1095 /* Ensure we clear previously set non-block flag */
1096 kiocb->ki_flags &= ~IOCB_NOWAIT;
1097 }
1098
1099 ppos = io_kiocb_update_pos(req);
1100
1101 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
1102 if (unlikely(ret))
1103 return ret;
1104
1105 if (unlikely(!io_kiocb_start_write(req, kiocb)))
1106 return -EAGAIN;
1107 kiocb->ki_flags |= IOCB_WRITE;
1108
1109 if (likely(req->file->f_op->write_iter))
1110 ret2 = req->file->f_op->write_iter(kiocb, &io->iter);
1111 else if (req->file->f_op->write)
1112 ret2 = loop_rw_iter(WRITE, rw, &io->iter);
1113 else
1114 ret2 = -EINVAL;
1115
1116 /*
1117 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
1118 * retry them without IOCB_NOWAIT.
1119 */
1120 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
1121 ret2 = -EAGAIN;
1122 /* no retry on NONBLOCK nor RWF_NOWAIT */
1123 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
1124 goto done;
1125 if (!force_nonblock || ret2 != -EAGAIN) {
1126 /* IOPOLL retry should happen for io-wq threads */
1127 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
1128 goto ret_eagain;
1129
1130 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
1131 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
1132 req->cqe.res, ret2);
1133
1134 /* This is a partial write. The file pos has already been
1135 * updated, setup the async struct to complete the request
1136 * in the worker. Also update bytes_done to account for
1137 * the bytes already written.
1138 */
1139 iov_iter_save_state(&io->iter, &io->iter_state);
1140 io->bytes_done += ret2;
1141
1142 if (kiocb->ki_flags & IOCB_WRITE)
1143 io_req_end_write(req);
1144 return -EAGAIN;
1145 }
1146done:
1147 return kiocb_done(req, ret2, issue_flags);
1148 } else {
1149ret_eagain:
1150 iov_iter_restore(&io->iter, &io->iter_state);
1151 io_meta_restore(io, kiocb);
1152 if (kiocb->ki_flags & IOCB_WRITE)
1153 io_req_end_write(req);
1154 return -EAGAIN;
1155 }
1156}
1157
1158void io_rw_fail(struct io_kiocb *req)
1159{
1160 int res;
1161
1162 res = io_fixup_rw_res(req, req->cqe.res);
1163 io_req_set_res(req, res, req->cqe.flags);
1164}
1165
1166static int io_uring_classic_poll(struct io_kiocb *req, struct io_comp_batch *iob,
1167 unsigned int poll_flags)
1168{
1169 struct file *file = req->file;
1170
1171 if (req->opcode == IORING_OP_URING_CMD) {
1172 struct io_uring_cmd *ioucmd;
1173
1174 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1175 return file->f_op->uring_cmd_iopoll(ioucmd, iob, poll_flags);
1176 } else {
1177 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1178
1179 return file->f_op->iopoll(&rw->kiocb, iob, poll_flags);
1180 }
1181}
1182
1183static u64 io_hybrid_iopoll_delay(struct io_ring_ctx *ctx, struct io_kiocb *req)
1184{
1185 struct hrtimer_sleeper timer;
1186 enum hrtimer_mode mode;
1187 ktime_t kt;
1188 u64 sleep_time;
1189
1190 if (req->flags & REQ_F_IOPOLL_STATE)
1191 return 0;
1192
1193 if (ctx->hybrid_poll_time == LLONG_MAX)
1194 return 0;
1195
1196 /* Using half the running time to do schedule */
1197 sleep_time = ctx->hybrid_poll_time / 2;
1198
1199 kt = ktime_set(0, sleep_time);
1200 req->flags |= REQ_F_IOPOLL_STATE;
1201
1202 mode = HRTIMER_MODE_REL;
1203 hrtimer_setup_sleeper_on_stack(&timer, CLOCK_MONOTONIC, mode);
1204 hrtimer_set_expires(&timer.timer, kt);
1205 set_current_state(TASK_INTERRUPTIBLE);
1206 hrtimer_sleeper_start_expires(&timer, mode);
1207
1208 if (timer.task)
1209 io_schedule();
1210
1211 hrtimer_cancel(&timer.timer);
1212 __set_current_state(TASK_RUNNING);
1213 destroy_hrtimer_on_stack(&timer.timer);
1214 return sleep_time;
1215}
1216
1217static int io_uring_hybrid_poll(struct io_kiocb *req,
1218 struct io_comp_batch *iob, unsigned int poll_flags)
1219{
1220 struct io_ring_ctx *ctx = req->ctx;
1221 u64 runtime, sleep_time;
1222 int ret;
1223
1224 sleep_time = io_hybrid_iopoll_delay(ctx, req);
1225 ret = io_uring_classic_poll(req, iob, poll_flags);
1226 runtime = ktime_get_ns() - req->iopoll_start - sleep_time;
1227
1228 /*
1229 * Use minimum sleep time if we're polling devices with different
1230 * latencies. We could get more completions from the faster ones.
1231 */
1232 if (ctx->hybrid_poll_time > runtime)
1233 ctx->hybrid_poll_time = runtime;
1234
1235 return ret;
1236}
1237
1238int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1239{
1240 struct io_wq_work_node *pos, *start, *prev;
1241 unsigned int poll_flags = 0;
1242 DEFINE_IO_COMP_BATCH(iob);
1243 int nr_events = 0;
1244
1245 /*
1246 * Only spin for completions if we don't have multiple devices hanging
1247 * off our complete list.
1248 */
1249 if (ctx->poll_multi_queue || force_nonspin)
1250 poll_flags |= BLK_POLL_ONESHOT;
1251
1252 wq_list_for_each(pos, start, &ctx->iopoll_list) {
1253 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1254 int ret;
1255
1256 /*
1257 * Move completed and retryable entries to our local lists.
1258 * If we find a request that requires polling, break out
1259 * and complete those lists first, if we have entries there.
1260 */
1261 if (READ_ONCE(req->iopoll_completed))
1262 break;
1263
1264 if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL)
1265 ret = io_uring_hybrid_poll(req, &iob, poll_flags);
1266 else
1267 ret = io_uring_classic_poll(req, &iob, poll_flags);
1268
1269 if (unlikely(ret < 0))
1270 return ret;
1271 else if (ret)
1272 poll_flags |= BLK_POLL_ONESHOT;
1273
1274 /* iopoll may have completed current req */
1275 if (!rq_list_empty(&iob.req_list) ||
1276 READ_ONCE(req->iopoll_completed))
1277 break;
1278 }
1279
1280 if (!rq_list_empty(&iob.req_list))
1281 iob.complete(&iob);
1282 else if (!pos)
1283 return 0;
1284
1285 prev = start;
1286 wq_list_for_each_resume(pos, prev) {
1287 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1288
1289 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
1290 if (!smp_load_acquire(&req->iopoll_completed))
1291 break;
1292 nr_events++;
1293 req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0);
1294 if (req->opcode != IORING_OP_URING_CMD)
1295 io_req_rw_cleanup(req, 0);
1296 }
1297 if (unlikely(!nr_events))
1298 return 0;
1299
1300 pos = start ? start->next : ctx->iopoll_list.first;
1301 wq_list_cut(&ctx->iopoll_list, prev, start);
1302
1303 if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
1304 return 0;
1305 ctx->submit_state.compl_reqs.first = pos;
1306 __io_submit_flush_completions(ctx);
1307 return nr_events;
1308}
1309
1310void io_rw_cache_free(const void *entry)
1311{
1312 struct io_async_rw *rw = (struct io_async_rw *) entry;
1313
1314 if (rw->free_iovec)
1315 kfree(rw->free_iovec);
1316 kfree(rw);
1317}