io_uring/msg_ring: fix missing lock on overflow for IOPOLL

If the target ring is configured with IOPOLL, then we always need to hold
the target ring uring_lock before posting CQEs. We could just grab it
unconditionally, but since we don't expect many target rings to be of this
type, make grabbing the uring_lock conditional on the ring type.

Link: https://lore.kernel.org/io-uring/Y8krlYa52%2F0YGqkg@ip-172-31-85-199.ec2.internal/
Reported-by: Xingyuan Mo <hdthky0@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

Changed files
+30 -9
io_uring
+30 -9
io_uring/msg_ring.c
··· 65 65 struct io_ring_ctx *target_ctx = req->file->private_data; 66 66 int ret = 0; 67 67 68 - if (current->flags & PF_EXITING) 68 + if (current->flags & PF_EXITING) { 69 69 ret = -EOWNERDEAD; 70 - else if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) 71 - ret = -EOVERFLOW; 70 + } else { 71 + /* 72 + * If the target ring is using IOPOLL mode, then we need to be 73 + * holding the uring_lock for posting completions. Other ring 74 + * types rely on the regular completion locking, which is 75 + * handled while posting. 76 + */ 77 + if (target_ctx->flags & IORING_SETUP_IOPOLL) 78 + mutex_lock(&target_ctx->uring_lock); 79 + if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) 80 + ret = -EOVERFLOW; 81 + if (target_ctx->flags & IORING_SETUP_IOPOLL) 82 + mutex_unlock(&target_ctx->uring_lock); 83 + } 72 84 73 85 if (ret < 0) 74 86 req_set_fail(req); 75 87 io_req_queue_tw_complete(req, ret); 76 88 } 77 89 78 - static int io_msg_ring_data(struct io_kiocb *req) 90 + static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) 79 91 { 80 92 struct io_ring_ctx *target_ctx = req->file->private_data; 81 93 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); 94 + int ret; 82 95 83 96 if (msg->src_fd || msg->dst_fd || msg->flags) 84 97 return -EINVAL; ··· 106 93 return IOU_ISSUE_SKIP_COMPLETE; 107 94 } 108 95 109 - if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) 110 - return 0; 111 - 112 - return -EOVERFLOW; 96 + ret = -EOVERFLOW; 97 + if (target_ctx->flags & IORING_SETUP_IOPOLL) { 98 + if (unlikely(io_double_lock_ctx(target_ctx, issue_flags))) 99 + return -EAGAIN; 100 + if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) 101 + ret = 0; 102 + io_double_unlock_ctx(target_ctx); 103 + } else { 104 + if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) 105 + ret = 0; 106 + } 107 + return ret; 113 108 } 114 109 115 110 static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags) ··· 244 223 245 224 switch (msg->cmd) { 246 225 case IORING_MSG_DATA: 247 - ret = io_msg_ring_data(req); 226 + ret = io_msg_ring_data(req, issue_flags); 248 227 break; 249 228 case IORING_MSG_SEND_FD: 250 229 ret = io_msg_send_fd(req, issue_flags);