at v6.15 4.5 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2#ifndef _LINUX_IO_URING_CMD_H 3#define _LINUX_IO_URING_CMD_H 4 5#include <uapi/linux/io_uring.h> 6#include <linux/io_uring_types.h> 7#include <linux/blk-mq.h> 8 9/* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */ 10#define IORING_URING_CMD_CANCELABLE (1U << 30) 11 12struct io_uring_cmd { 13 struct file *file; 14 const struct io_uring_sqe *sqe; 15 /* callback to defer completions to task context */ 16 void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned); 17 u32 cmd_op; 18 u32 flags; 19 u8 pdu[32]; /* available inline for free use */ 20}; 21 22struct io_uring_cmd_data { 23 void *op_data; 24}; 25 26static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) 27{ 28 return sqe->cmd; 29} 30 31static inline void io_uring_cmd_private_sz_check(size_t cmd_sz) 32{ 33 BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu)); 34} 35#define io_uring_cmd_to_pdu(cmd, pdu_type) ( \ 36 io_uring_cmd_private_sz_check(sizeof(pdu_type)), \ 37 ((pdu_type *)&(cmd)->pdu) \ 38) 39 40#if defined(CONFIG_IO_URING) 41int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, 42 struct iov_iter *iter, 43 struct io_uring_cmd *ioucmd, 44 unsigned int issue_flags); 45int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd, 46 const struct iovec __user *uvec, 47 size_t uvec_segs, 48 int ddir, struct iov_iter *iter, 49 unsigned issue_flags); 50 51/* 52 * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd 53 * and the corresponding io_uring request. 54 * 55 * Note: the caller should never hard code @issue_flags and is only allowed 56 * to pass the mask provided by the core io_uring code. 57 */ 58void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, u64 res2, 59 unsigned issue_flags); 60 61void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, 62 void (*task_work_cb)(struct io_uring_cmd *, unsigned), 63 unsigned flags); 64 65/* 66 * Note: the caller should never hard code @issue_flags and only use the 67 * mask provided by the core io_uring code. 68 */ 69void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, 70 unsigned int issue_flags); 71 72/* Execute the request from a blocking context */ 73void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd); 74 75#else 76static inline int 77io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, 78 struct iov_iter *iter, struct io_uring_cmd *ioucmd, 79 unsigned int issue_flags) 80{ 81 return -EOPNOTSUPP; 82} 83static inline int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd, 84 const struct iovec __user *uvec, 85 size_t uvec_segs, 86 int ddir, struct iov_iter *iter, 87 unsigned issue_flags) 88{ 89 return -EOPNOTSUPP; 90} 91static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, 92 u64 ret2, unsigned issue_flags) 93{ 94} 95static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, 96 void (*task_work_cb)(struct io_uring_cmd *, unsigned), 97 unsigned flags) 98{ 99} 100static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, 101 unsigned int issue_flags) 102{ 103} 104static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd) 105{ 106} 107#endif 108 109/* 110 * Polled completions must ensure they are coming from a poll queue, and 111 * hence are completed inside the usual poll handling loops. 112 */ 113static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd, 114 ssize_t ret, ssize_t res2) 115{ 116 lockdep_assert(in_task()); 117 io_uring_cmd_done(ioucmd, ret, res2, 0); 118} 119 120/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */ 121static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd, 122 void (*task_work_cb)(struct io_uring_cmd *, unsigned)) 123{ 124 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE); 125} 126 127static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, 128 void (*task_work_cb)(struct io_uring_cmd *, unsigned)) 129{ 130 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0); 131} 132 133static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd) 134{ 135 return cmd_to_io_kiocb(cmd)->tctx->task; 136} 137 138static inline struct io_uring_cmd_data *io_uring_cmd_get_async_data(struct io_uring_cmd *cmd) 139{ 140 return cmd_to_io_kiocb(cmd)->async_data; 141} 142 143int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq, 144 void (*release)(void *), unsigned int index, 145 unsigned int issue_flags); 146int io_buffer_unregister_bvec(struct io_uring_cmd *cmd, unsigned int index, 147 unsigned int issue_flags); 148 149#endif /* _LINUX_IO_URING_CMD_H */