Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

io_uring: hold 'ctx' reference around task_work queue + execute

We're holding the request reference, but we need to go one higher
to ensure that the ctx remains valid after the request has finished.
If the ring is closed with pending task_work inflight, and the
given io_kiocb finishes sync during issue, then we need a reference
to the ring itself around the task_work execution cycle.

Cc: stable@vger.kernel.org # v5.7+
Reported-by: syzbot+9b260fc33297966f5a8e@syzkaller.appspotmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>

+15
+15
fs/io_uring.c
··· 1821 1821 static void io_req_task_submit(struct callback_head *cb) 1822 1822 { 1823 1823 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); 1824 + struct io_ring_ctx *ctx = req->ctx; 1824 1825 1825 1826 __io_req_task_submit(req); 1827 + percpu_ref_put(&ctx->refs); 1826 1828 } 1827 1829 1828 1830 static void io_req_task_queue(struct io_kiocb *req) ··· 1832 1830 int ret; 1833 1831 1834 1832 init_task_work(&req->task_work, io_req_task_submit); 1833 + percpu_ref_get(&req->ctx->refs); 1835 1834 1836 1835 ret = io_req_task_work_add(req, &req->task_work); 1837 1836 if (unlikely(ret)) { ··· 2321 2318 refcount_inc(&req->refs); 2322 2319 io_queue_async_work(req); 2323 2320 } 2321 + 2322 + percpu_ref_put(&ctx->refs); 2324 2323 } 2325 2324 #endif 2326 2325 ··· 2335 2330 return false; 2336 2331 2337 2332 init_task_work(&req->task_work, io_rw_resubmit); 2333 + percpu_ref_get(&req->ctx->refs); 2334 + 2338 2335 ret = io_req_task_work_add(req, &req->task_work); 2339 2336 if (!ret) 2340 2337 return true; ··· 3040 3033 list_del_init(&wait->entry); 3041 3034 3042 3035 init_task_work(&req->task_work, io_req_task_submit); 3036 + percpu_ref_get(&req->ctx->refs); 3037 + 3043 3038 /* submit ref gets dropped, acquire a new one */ 3044 3039 refcount_inc(&req->refs); 3045 3040 ret = io_req_task_work_add(req, &req->task_work); ··· 4574 4565 4575 4566 req->result = mask; 4576 4567 init_task_work(&req->task_work, func); 4568 + percpu_ref_get(&req->ctx->refs); 4569 + 4577 4570 /* 4578 4571 * If this fails, then the task is exiting. When a task exits, the 4579 4572 * work gets canceled, so just cancel this request as well instead ··· 4663 4652 static void io_poll_task_func(struct callback_head *cb) 4664 4653 { 4665 4654 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); 4655 + struct io_ring_ctx *ctx = req->ctx; 4666 4656 struct io_kiocb *nxt = NULL; 4667 4657 4668 4658 io_poll_task_handler(req, &nxt); 4669 4659 if (nxt) 4670 4660 __io_req_task_submit(nxt); 4661 + percpu_ref_put(&ctx->refs); 4671 4662 } 4672 4663 4673 4664 static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, ··· 4765 4752 4766 4753 if (io_poll_rewait(req, &apoll->poll)) { 4767 4754 spin_unlock_irq(&ctx->completion_lock); 4755 + percpu_ref_put(&ctx->refs); 4768 4756 return; 4769 4757 } 4770 4758 ··· 4781 4767 else 4782 4768 __io_req_task_cancel(req, -ECANCELED); 4783 4769 4770 + percpu_ref_put(&ctx->refs); 4784 4771 kfree(apoll->double_poll); 4785 4772 kfree(apoll); 4786 4773 }