+28
-24
fs/io_uring.c
+28
-24
fs/io_uring.c
···
2037
2037
return false;
2038
2038
}
2039
2039
2040
+
static void io_link_work_cb(struct io_wq_work **workptr)
2041
+
{
2042
+
struct io_wq_work *work = *workptr;
2043
+
struct io_kiocb *link = work->data;
2044
+
2045
+
io_queue_linked_timeout(link);
2046
+
work->func = io_wq_submit_work;
2047
+
}
2048
+
2049
+
static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
2050
+
{
2051
+
struct io_kiocb *link;
2052
+
2053
+
io_prep_async_work(nxt, &link);
2054
+
*workptr = &nxt->work;
2055
+
if (link) {
2056
+
nxt->work.flags |= IO_WQ_WORK_CB;
2057
+
nxt->work.func = io_link_work_cb;
2058
+
nxt->work.data = link;
2059
+
}
2060
+
}
2061
+
2040
2062
static void io_fsync_finish(struct io_wq_work **workptr)
2041
2063
{
2042
2064
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
···
2077
2055
io_cqring_add_event(req, ret);
2078
2056
io_put_req_find_next(req, &nxt);
2079
2057
if (nxt)
2080
-
*workptr = &nxt->work;
2058
+
io_wq_assign_next(workptr, nxt);
2081
2059
}
2082
2060
2083
2061
static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
···
2133
2111
io_cqring_add_event(req, ret);
2134
2112
io_put_req_find_next(req, &nxt);
2135
2113
if (nxt)
2136
-
*workptr = &nxt->work;
2114
+
io_wq_assign_next(workptr, nxt);
2137
2115
}
2138
2116
2139
2117
static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
···
2399
2377
return;
2400
2378
__io_accept(req, &nxt, false);
2401
2379
if (nxt)
2402
-
*workptr = &nxt->work;
2380
+
io_wq_assign_next(workptr, nxt);
2403
2381
}
2404
2382
#endif
2405
2383
···
2630
2608
req_set_fail_links(req);
2631
2609
io_put_req_find_next(req, &nxt);
2632
2610
if (nxt)
2633
-
*workptr = &nxt->work;
2611
+
io_wq_assign_next(workptr, nxt);
2634
2612
}
2635
2613
2636
2614
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
···
3293
3271
return 0;
3294
3272
}
3295
3273
3296
-
static void io_link_work_cb(struct io_wq_work **workptr)
3297
-
{
3298
-
struct io_wq_work *work = *workptr;
3299
-
struct io_kiocb *link = work->data;
3300
-
3301
-
io_queue_linked_timeout(link);
3302
-
work->func = io_wq_submit_work;
3303
-
}
3304
-
3305
3274
static void io_wq_submit_work(struct io_wq_work **workptr)
3306
3275
{
3307
3276
struct io_wq_work *work = *workptr;
···
3329
3316
}
3330
3317
3331
3318
/* if a dependent link is ready, pass it back */
3332
-
if (!ret && nxt) {
3333
-
struct io_kiocb *link;
3334
-
3335
-
io_prep_async_work(nxt, &link);
3336
-
*workptr = &nxt->work;
3337
-
if (link) {
3338
-
nxt->work.flags |= IO_WQ_WORK_CB;
3339
-
nxt->work.func = io_link_work_cb;
3340
-
nxt->work.data = link;
3341
-
}
3342
-
}
3319
+
if (!ret && nxt)
3320
+
io_wq_assign_next(workptr, nxt);
3343
3321
}
3344
3322
3345
3323
static bool io_req_op_valid(int op)