at v6.14 3.7 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2#ifndef _LINUX_IO_URING_CMD_H 3#define _LINUX_IO_URING_CMD_H 4 5#include <uapi/linux/io_uring.h> 6#include <linux/io_uring_types.h> 7 8/* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */ 9#define IORING_URING_CMD_CANCELABLE (1U << 30) 10 11struct io_uring_cmd { 12 struct file *file; 13 const struct io_uring_sqe *sqe; 14 /* callback to defer completions to task context */ 15 void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned); 16 u32 cmd_op; 17 u32 flags; 18 u8 pdu[32]; /* available inline for free use */ 19}; 20 21struct io_uring_cmd_data { 22 void *op_data; 23 struct io_uring_sqe sqes[2]; 24}; 25 26static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) 27{ 28 return sqe->cmd; 29} 30 31static inline void io_uring_cmd_private_sz_check(size_t cmd_sz) 32{ 33 BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu)); 34} 35#define io_uring_cmd_to_pdu(cmd, pdu_type) ( \ 36 io_uring_cmd_private_sz_check(sizeof(pdu_type)), \ 37 ((pdu_type *)&(cmd)->pdu) \ 38) 39 40#if defined(CONFIG_IO_URING) 41int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, 42 struct iov_iter *iter, void *ioucmd); 43 44/* 45 * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd 46 * and the corresponding io_uring request. 47 * 48 * Note: the caller should never hard code @issue_flags and is only allowed 49 * to pass the mask provided by the core io_uring code. 50 */ 51void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, u64 res2, 52 unsigned issue_flags); 53 54void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, 55 void (*task_work_cb)(struct io_uring_cmd *, unsigned), 56 unsigned flags); 57 58/* 59 * Note: the caller should never hard code @issue_flags and only use the 60 * mask provided by the core io_uring code. 61 */ 62void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, 63 unsigned int issue_flags); 64 65/* Execute the request from a blocking context */ 66void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd); 67 68#else 69static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, 70 struct iov_iter *iter, void *ioucmd) 71{ 72 return -EOPNOTSUPP; 73} 74static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, 75 u64 ret2, unsigned issue_flags) 76{ 77} 78static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, 79 void (*task_work_cb)(struct io_uring_cmd *, unsigned), 80 unsigned flags) 81{ 82} 83static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, 84 unsigned int issue_flags) 85{ 86} 87static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd) 88{ 89} 90#endif 91 92/* 93 * Polled completions must ensure they are coming from a poll queue, and 94 * hence are completed inside the usual poll handling loops. 95 */ 96static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd, 97 ssize_t ret, ssize_t res2) 98{ 99 lockdep_assert(in_task()); 100 io_uring_cmd_done(ioucmd, ret, res2, 0); 101} 102 103/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */ 104static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd, 105 void (*task_work_cb)(struct io_uring_cmd *, unsigned)) 106{ 107 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE); 108} 109 110static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, 111 void (*task_work_cb)(struct io_uring_cmd *, unsigned)) 112{ 113 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0); 114} 115 116static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd) 117{ 118 return cmd_to_io_kiocb(cmd)->tctx->task; 119} 120 121static inline struct io_uring_cmd_data *io_uring_cmd_get_async_data(struct io_uring_cmd *cmd) 122{ 123 return cmd_to_io_kiocb(cmd)->async_data; 124} 125 126#endif /* _LINUX_IO_URING_CMD_H */