Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests: ublk: pass 'ublk_thread *' to ->queue_io() and ->tgt_io_done()

'struct thread' is task local structure, and the related code will become
more readable if we pass it via parameter.

Meantime pass 'ublk_thread *' to ublk_io_alloc_sqes(), and this way is
natural since we use per-thread io_uring for handling IO.

More importantly it helps much for removing the current ubq_daemon or
per-io-task limit.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20250713143415.2857561-13-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Ming Lei and committed by
Jens Axboe
e0054835 b36c7325

+58 -44
+5 -3
tools/testing/selftests/ublk/fault_inject.c
··· 38 38 return 0; 39 39 } 40 40 41 - static int ublk_fault_inject_queue_io(struct ublk_queue *q, int tag) 41 + static int ublk_fault_inject_queue_io(struct ublk_thread *t, 42 + struct ublk_queue *q, int tag) 42 43 { 43 44 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); 44 45 struct io_uring_sqe *sqe; ··· 47 46 .tv_nsec = (long long)q->dev->private_data, 48 47 }; 49 48 50 - ublk_io_alloc_sqes(ublk_get_io(q, tag), &sqe, 1); 49 + ublk_io_alloc_sqes(t, &sqe, 1); 51 50 io_uring_prep_timeout(sqe, &ts, 1, 0); 52 51 sqe->user_data = build_user_data(tag, ublksrv_get_op(iod), 0, q->q_id, 1); 53 52 ··· 56 55 return 0; 57 56 } 58 57 59 - static void ublk_fault_inject_tgt_io_done(struct ublk_queue *q, 58 + static void ublk_fault_inject_tgt_io_done(struct ublk_thread *t, 59 + struct ublk_queue *q, 60 60 const struct io_uring_cqe *cqe) 61 61 { 62 62 unsigned tag = user_data_to_tag(cqe->user_data);
+14 -11
tools/testing/selftests/ublk/file_backed.c
··· 13 13 assert(0); 14 14 } 15 15 16 - static int loop_queue_flush_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag) 16 + static int loop_queue_flush_io(struct ublk_thread *t, struct ublk_queue *q, 17 + const struct ublksrv_io_desc *iod, int tag) 17 18 { 18 19 unsigned ublk_op = ublksrv_get_op(iod); 19 20 struct io_uring_sqe *sqe[1]; 20 21 21 - ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1); 22 + ublk_io_alloc_sqes(t, sqe, 1); 22 23 io_uring_prep_fsync(sqe[0], 1 /*fds[1]*/, IORING_FSYNC_DATASYNC); 23 24 io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE); 24 25 /* bit63 marks us as tgt io */ ··· 27 26 return 1; 28 27 } 29 28 30 - static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag) 29 + static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q, 30 + const struct ublksrv_io_desc *iod, int tag) 31 31 { 32 32 unsigned ublk_op = ublksrv_get_op(iod); 33 33 unsigned zc = ublk_queue_use_zc(q); ··· 38 36 void *addr = (zc | auto_zc) ? NULL : (void *)iod->addr; 39 37 40 38 if (!zc || auto_zc) { 41 - ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1); 39 + ublk_io_alloc_sqes(t, sqe, 1); 42 40 if (!sqe[0]) 43 41 return -ENOMEM; 44 42 ··· 54 52 return 1; 55 53 } 56 54 57 - ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 3); 55 + ublk_io_alloc_sqes(t, sqe, 3); 58 56 59 57 io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index); 60 58 sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK; ··· 74 72 return 2; 75 73 } 76 74 77 - static int loop_queue_tgt_io(struct ublk_queue *q, int tag) 75 + static int loop_queue_tgt_io(struct ublk_thread *t, struct ublk_queue *q, int tag) 78 76 { 79 77 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); 80 78 unsigned ublk_op = ublksrv_get_op(iod); ··· 82 80 83 81 switch (ublk_op) { 84 82 case UBLK_IO_OP_FLUSH: 85 - ret = loop_queue_flush_io(q, iod, tag); 83 + ret = loop_queue_flush_io(t, q, iod, tag); 86 84 break; 87 85 case UBLK_IO_OP_WRITE_ZEROES: 88 86 case UBLK_IO_OP_DISCARD: ··· 90 88 break; 91 89 case UBLK_IO_OP_READ: 92 90 case UBLK_IO_OP_WRITE: 93 - ret = loop_queue_tgt_rw_io(q, iod, tag); 91 + ret = loop_queue_tgt_rw_io(t, q, iod, tag); 94 92 break; 95 93 default: 96 94 ret = -EINVAL; ··· 102 100 return ret; 103 101 } 104 102 105 - static int ublk_loop_queue_io(struct ublk_queue *q, int tag) 103 + static int ublk_loop_queue_io(struct ublk_thread *t, struct ublk_queue *q, 104 + int tag) 106 105 { 107 - int queued = loop_queue_tgt_io(q, tag); 106 + int queued = loop_queue_tgt_io(t, q, tag); 108 107 109 108 ublk_queued_tgt_io(q, tag, queued); 110 109 return 0; 111 110 } 112 111 113 - static void ublk_loop_io_done(struct ublk_queue *q, 112 + static void ublk_loop_io_done(struct ublk_thread *t, struct ublk_queue *q, 114 113 const struct io_uring_cqe *cqe) 115 114 { 116 115 unsigned tag = user_data_to_tag(cqe->user_data);
+7 -6
tools/testing/selftests/ublk/kublk.c
··· 620 620 if (io_uring_sq_space_left(&t->ring) < 1) 621 621 io_uring_submit(&t->ring); 622 622 623 - ublk_io_alloc_sqes(io, sqe, 1); 623 + ublk_io_alloc_sqes(t, sqe, 1); 624 624 if (!sqe[0]) { 625 625 ublk_err("%s: run out of sqe. thread %u, tag %d\n", 626 626 __func__, t->idx, io->tag); ··· 714 714 return (t->state & UBLKSRV_THREAD_STOPPING) && ublk_thread_is_idle(t); 715 715 } 716 716 717 - static inline void ublksrv_handle_tgt_cqe(struct ublk_queue *q, 718 - struct io_uring_cqe *cqe) 717 + static inline void ublksrv_handle_tgt_cqe(struct ublk_thread *t, 718 + struct ublk_queue *q, 719 + struct io_uring_cqe *cqe) 719 720 { 720 721 if (cqe->res < 0 && cqe->res != -EAGAIN) 721 722 ublk_err("%s: failed tgt io: res %d qid %u tag %u, cmd_op %u\n", ··· 725 724 user_data_to_op(cqe->user_data)); 726 725 727 726 if (q->tgt_ops->tgt_io_done) 728 - q->tgt_ops->tgt_io_done(q, cqe); 727 + q->tgt_ops->tgt_io_done(t, q, cqe); 729 728 } 730 729 731 730 static void ublk_handle_cqe(struct ublk_thread *t, ··· 752 751 753 752 /* Don't retrieve io in case of target io */ 754 753 if (is_target_io(cqe->user_data)) { 755 - ublksrv_handle_tgt_cqe(q, cqe); 754 + ublksrv_handle_tgt_cqe(t, q, cqe); 756 755 return; 757 756 } 758 757 ··· 767 766 if (cqe->res == UBLK_IO_RES_OK) { 768 767 assert(tag < q->q_depth); 769 768 if (q->tgt_ops->queue_io) 770 - q->tgt_ops->queue_io(q, tag); 769 + q->tgt_ops->queue_io(t, q, tag); 771 770 } else if (cqe->res == UBLK_IO_RES_NEED_GET_DATA) { 772 771 io->flags |= UBLKSRV_NEED_GET_DATA | UBLKSRV_IO_FREE; 773 772 ublk_queue_io_cmd(io);
+5 -4
tools/testing/selftests/ublk/kublk.h
··· 144 144 int (*init_tgt)(const struct dev_ctx *ctx, struct ublk_dev *); 145 145 void (*deinit_tgt)(struct ublk_dev *); 146 146 147 - int (*queue_io)(struct ublk_queue *, int tag); 148 - void (*tgt_io_done)(struct ublk_queue *, const struct io_uring_cqe *); 147 + int (*queue_io)(struct ublk_thread *, struct ublk_queue *, int tag); 148 + void (*tgt_io_done)(struct ublk_thread *, struct ublk_queue *, 149 + const struct io_uring_cqe *); 149 150 150 151 /* 151 152 * Target specific command line handling ··· 314 313 return container_of(io, struct ublk_queue, ios[io->tag]); 315 314 } 316 315 317 - static inline int ublk_io_alloc_sqes(struct ublk_io *io, 316 + static inline int ublk_io_alloc_sqes(struct ublk_thread *t, 318 317 struct io_uring_sqe *sqes[], int nr_sqes) 319 318 { 320 - struct io_uring *ring = &io->t->ring; 319 + struct io_uring *ring = &t->ring; 321 320 unsigned left = io_uring_sq_space_left(ring); 322 321 int i; 323 322
+12 -9
tools/testing/selftests/ublk/null.c
··· 55 55 sqe->user_data = build_user_data(tag, ublk_op, 0, q_id, 1); 56 56 } 57 57 58 - static int null_queue_zc_io(struct ublk_queue *q, int tag) 58 + static int null_queue_zc_io(struct ublk_thread *t, struct ublk_queue *q, 59 + int tag) 59 60 { 60 61 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); 61 62 struct io_uring_sqe *sqe[3]; 62 63 63 - ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 3); 64 + ublk_io_alloc_sqes(t, sqe, 3); 64 65 65 66 io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index); 66 67 sqe[0]->user_data = build_user_data(tag, ··· 78 77 return 2; 79 78 } 80 79 81 - static int null_queue_auto_zc_io(struct ublk_queue *q, int tag) 80 + static int null_queue_auto_zc_io(struct ublk_thread *t, struct ublk_queue *q, 81 + int tag) 82 82 { 83 83 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); 84 84 struct io_uring_sqe *sqe[1]; 85 85 86 - ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1); 86 + ublk_io_alloc_sqes(t, sqe, 1); 87 87 __setup_nop_io(tag, iod, sqe[0], q->q_id); 88 88 return 1; 89 89 } 90 90 91 - static void ublk_null_io_done(struct ublk_queue *q, 92 - const struct io_uring_cqe *cqe) 91 + static void ublk_null_io_done(struct ublk_thread *t, struct ublk_queue *q, 92 + const struct io_uring_cqe *cqe) 93 93 { 94 94 unsigned tag = user_data_to_tag(cqe->user_data); 95 95 unsigned op = user_data_to_op(cqe->user_data); ··· 112 110 ublk_complete_io(q, tag, io->result); 113 111 } 114 112 115 - static int ublk_null_queue_io(struct ublk_queue *q, int tag) 113 + static int ublk_null_queue_io(struct ublk_thread *t, struct ublk_queue *q, 114 + int tag) 116 115 { 117 116 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); 118 117 unsigned auto_zc = ublk_queue_use_auto_zc(q); ··· 121 118 int queued; 122 119 123 120 if (auto_zc && !ublk_io_auto_zc_fallback(iod)) 124 - queued = null_queue_auto_zc_io(q, tag); 121 + queued = null_queue_auto_zc_io(t, q, tag); 125 122 else if (zc) 126 - queued = null_queue_zc_io(q, tag); 123 + queued = null_queue_zc_io(t, q, tag); 127 124 else { 128 125 ublk_complete_io(q, tag, iod->nr_sectors << 9); 129 126 return 0;
+15 -11
tools/testing/selftests/ublk/stripe.c
··· 123 123 assert(0); 124 124 } 125 125 126 - static int stripe_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag) 126 + static int stripe_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q, 127 + const struct ublksrv_io_desc *iod, int tag) 127 128 { 128 129 const struct stripe_conf *conf = get_chunk_shift(q); 129 130 unsigned auto_zc = (ublk_queue_use_auto_zc(q) != 0); ··· 139 138 io->private_data = s; 140 139 calculate_stripe_array(conf, iod, s, base); 141 140 142 - ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, s->nr + extra); 141 + ublk_io_alloc_sqes(t, sqe, s->nr + extra); 143 142 144 143 if (zc) { 145 144 io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, io->buf_index); ··· 177 176 return s->nr + zc; 178 177 } 179 178 180 - static int handle_flush(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag) 179 + static int handle_flush(struct ublk_thread *t, struct ublk_queue *q, 180 + const struct ublksrv_io_desc *iod, int tag) 181 181 { 182 182 const struct stripe_conf *conf = get_chunk_shift(q); 183 183 struct io_uring_sqe *sqe[NR_STRIPE]; 184 184 int i; 185 185 186 - ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, conf->nr_files); 186 + ublk_io_alloc_sqes(t, sqe, conf->nr_files); 187 187 for (i = 0; i < conf->nr_files; i++) { 188 188 io_uring_prep_fsync(sqe[i], i + 1, IORING_FSYNC_DATASYNC); 189 189 io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE); ··· 193 191 return conf->nr_files; 194 192 } 195 193 196 - static int stripe_queue_tgt_io(struct ublk_queue *q, int tag) 194 + static int stripe_queue_tgt_io(struct ublk_thread *t, struct ublk_queue *q, 195 + int tag) 197 196 { 198 197 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); 199 198 unsigned ublk_op = ublksrv_get_op(iod); ··· 202 199 203 200 switch (ublk_op) { 204 201 case UBLK_IO_OP_FLUSH: 205 - ret = handle_flush(q, iod, tag); 202 + ret = handle_flush(t, q, iod, tag); 206 203 break; 207 204 case UBLK_IO_OP_WRITE_ZEROES: 208 205 case UBLK_IO_OP_DISCARD: ··· 210 207 break; 211 208 case UBLK_IO_OP_READ: 212 209 case UBLK_IO_OP_WRITE: 213 - ret = stripe_queue_tgt_rw_io(q, iod, tag); 210 + ret = stripe_queue_tgt_rw_io(t, q, iod, tag); 214 211 break; 215 212 default: 216 213 ret = -EINVAL; ··· 221 218 return ret; 222 219 } 223 220 224 - static int ublk_stripe_queue_io(struct ublk_queue *q, int tag) 221 + static int ublk_stripe_queue_io(struct ublk_thread *t, struct ublk_queue *q, 222 + int tag) 225 223 { 226 - int queued = stripe_queue_tgt_io(q, tag); 224 + int queued = stripe_queue_tgt_io(t, q, tag); 227 225 228 226 ublk_queued_tgt_io(q, tag, queued); 229 227 return 0; 230 228 } 231 229 232 - static void ublk_stripe_io_done(struct ublk_queue *q, 233 - const struct io_uring_cqe *cqe) 230 + static void ublk_stripe_io_done(struct ublk_thread *t, struct ublk_queue *q, 231 + const struct io_uring_cqe *cqe) 234 232 { 235 233 unsigned tag = user_data_to_tag(cqe->user_data); 236 234 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);