+7
-7
fs/io_uring.c
+7
-7
fs/io_uring.c
···
5019
* Can't handle multishot for double wait for now, turn it
5020
* into one-shot mode.
5021
*/
5022
-
if (!(req->poll.events & EPOLLONESHOT))
5023
-
req->poll.events |= EPOLLONESHOT;
5024
/* double add on the same waitqueue head, ignore */
5025
-
if (poll->head == head)
5026
return;
5027
poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5028
if (!poll) {
···
9035
9036
static void io_uring_clean_tctx(struct io_uring_task *tctx)
9037
{
9038
struct io_tctx_node *node;
9039
unsigned long index;
9040
9041
xa_for_each(&tctx->xa, index, node)
9042
io_uring_del_task_file(index);
9043
-
if (tctx->io_wq) {
9044
-
io_wq_put_and_exit(tctx->io_wq);
9045
-
tctx->io_wq = NULL;
9046
-
}
9047
}
9048
9049
static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
···
5019
* Can't handle multishot for double wait for now, turn it
5020
* into one-shot mode.
5021
*/
5022
+
if (!(poll_one->events & EPOLLONESHOT))
5023
+
poll_one->events |= EPOLLONESHOT;
5024
/* double add on the same waitqueue head, ignore */
5025
+
if (poll_one->head == head)
5026
return;
5027
poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5028
if (!poll) {
···
9035
9036
static void io_uring_clean_tctx(struct io_uring_task *tctx)
9037
{
9038
+
struct io_wq *wq = tctx->io_wq;
9039
struct io_tctx_node *node;
9040
unsigned long index;
9041
9042
+
tctx->io_wq = NULL;
9043
xa_for_each(&tctx->xa, index, node)
9044
io_uring_del_task_file(index);
9045
+
if (wq)
9046
+
io_wq_put_and_exit(wq);
9047
}
9048
9049
static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)