Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests: ublk: add kernel selftests for ublk

Both ublk driver and userspace heavily depends on io_uring subsystem,
and tools/testing/selftests/ should be the best place for holding this
cross-subsystem tests.

Add basic read/write IO test over this ublk null disk, and make sure ublk
working.

More tests will be added.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20250228161919.2869102-2-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Ming Lei and committed by
Jens Axboe
6aecda00 ed9f3112

+1466
+1
MAINTAINERS
··· 24237 24237 F: Documentation/block/ublk.rst 24238 24238 F: drivers/block/ublk_drv.c 24239 24239 F: include/uapi/linux/ublk_cmd.h 24240 + F: tools/testing/selftests/ublk/ 24240 24241 24241 24242 UBSAN 24242 24243 M: Kees Cook <kees@kernel.org>
+1
tools/testing/selftests/Makefile
··· 113 113 TARGETS += tmpfs 114 114 TARGETS += tpm2 115 115 TARGETS += tty 116 + TARGETS += ublk 116 117 TARGETS += uevent 117 118 TARGETS += user_events 118 119 TARGETS += vDSO
+3
tools/testing/selftests/ublk/.gitignore
··· 1 + kublk 2 + /tools 3 + *-verify.state
+12
tools/testing/selftests/ublk/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + CFLAGS += -O3 -Wl,-no-as-needed -Wall -I $(top_srcdir) 4 + LDLIBS += -lpthread -lm -luring 5 + 6 + TEST_PROGS := test_null_01.sh 7 + 8 + TEST_GEN_PROGS_EXTENDED = kublk 9 + 10 + include ../lib.mk 11 + 12 + $(TEST_GEN_PROGS_EXTENDED): kublk.c null.c
+1
tools/testing/selftests/ublk/config
··· 1 + CONFIG_BLK_DEV_UBLK=m
+1081
tools/testing/selftests/ublk/kublk.c
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Description: uring_cmd based ublk 4 + */ 5 + 6 + #include "kublk.h" 7 + 8 + unsigned int ublk_dbg_mask = UBLK_LOG; 9 + static const struct ublk_tgt_ops *tgt_ops_list[] = { 10 + &null_tgt_ops, 11 + }; 12 + 13 + static const struct ublk_tgt_ops *ublk_find_tgt(const char *name) 14 + { 15 + const struct ublk_tgt_ops *ops; 16 + int i; 17 + 18 + if (name == NULL) 19 + return NULL; 20 + 21 + for (i = 0; sizeof(tgt_ops_list) / sizeof(ops); i++) 22 + if (strcmp(tgt_ops_list[i]->name, name) == 0) 23 + return tgt_ops_list[i]; 24 + return NULL; 25 + } 26 + 27 + static inline int ublk_setup_ring(struct io_uring *r, int depth, 28 + int cq_depth, unsigned flags) 29 + { 30 + struct io_uring_params p; 31 + 32 + memset(&p, 0, sizeof(p)); 33 + p.flags = flags | IORING_SETUP_CQSIZE; 34 + p.cq_entries = cq_depth; 35 + 36 + return io_uring_queue_init_params(depth, r, &p); 37 + } 38 + 39 + static void ublk_ctrl_init_cmd(struct ublk_dev *dev, 40 + struct io_uring_sqe *sqe, 41 + struct ublk_ctrl_cmd_data *data) 42 + { 43 + struct ublksrv_ctrl_dev_info *info = &dev->dev_info; 44 + struct ublksrv_ctrl_cmd *cmd = (struct ublksrv_ctrl_cmd *)ublk_get_sqe_cmd(sqe); 45 + 46 + sqe->fd = dev->ctrl_fd; 47 + sqe->opcode = IORING_OP_URING_CMD; 48 + sqe->ioprio = 0; 49 + 50 + if (data->flags & CTRL_CMD_HAS_BUF) { 51 + cmd->addr = data->addr; 52 + cmd->len = data->len; 53 + } 54 + 55 + if (data->flags & CTRL_CMD_HAS_DATA) 56 + cmd->data[0] = data->data[0]; 57 + 58 + cmd->dev_id = info->dev_id; 59 + cmd->queue_id = -1; 60 + 61 + ublk_set_sqe_cmd_op(sqe, data->cmd_op); 62 + 63 + io_uring_sqe_set_data(sqe, cmd); 64 + } 65 + 66 + static int __ublk_ctrl_cmd(struct ublk_dev *dev, 67 + struct ublk_ctrl_cmd_data *data) 68 + { 69 + struct io_uring_sqe *sqe; 70 + struct io_uring_cqe *cqe; 71 + int ret = -EINVAL; 72 + 73 + sqe = io_uring_get_sqe(&dev->ring); 74 + if (!sqe) { 75 + ublk_err("%s: can't get sqe ret %d\n", __func__, ret); 76 + return ret; 77 + } 78 + 79 + ublk_ctrl_init_cmd(dev, sqe, data); 80 + 81 + ret = io_uring_submit(&dev->ring); 82 + if (ret < 0) { 83 + ublk_err("uring submit ret %d\n", ret); 84 + return ret; 85 + } 86 + 87 + ret = io_uring_wait_cqe(&dev->ring, &cqe); 88 + if (ret < 0) { 89 + ublk_err("wait cqe: %s\n", strerror(-ret)); 90 + return ret; 91 + } 92 + io_uring_cqe_seen(&dev->ring, cqe); 93 + 94 + return cqe->res; 95 + } 96 + 97 + static int ublk_ctrl_stop_dev(struct ublk_dev *dev) 98 + { 99 + struct ublk_ctrl_cmd_data data = { 100 + .cmd_op = UBLK_CMD_STOP_DEV, 101 + }; 102 + 103 + return __ublk_ctrl_cmd(dev, &data); 104 + } 105 + 106 + static int ublk_ctrl_start_dev(struct ublk_dev *dev, 107 + int daemon_pid) 108 + { 109 + struct ublk_ctrl_cmd_data data = { 110 + .cmd_op = UBLK_U_CMD_START_DEV, 111 + .flags = CTRL_CMD_HAS_DATA, 112 + }; 113 + 114 + dev->dev_info.ublksrv_pid = data.data[0] = daemon_pid; 115 + 116 + return __ublk_ctrl_cmd(dev, &data); 117 + } 118 + 119 + static int ublk_ctrl_add_dev(struct ublk_dev *dev) 120 + { 121 + struct ublk_ctrl_cmd_data data = { 122 + .cmd_op = UBLK_U_CMD_ADD_DEV, 123 + .flags = CTRL_CMD_HAS_BUF, 124 + .addr = (__u64) (uintptr_t) &dev->dev_info, 125 + .len = sizeof(struct ublksrv_ctrl_dev_info), 126 + }; 127 + 128 + return __ublk_ctrl_cmd(dev, &data); 129 + } 130 + 131 + static int ublk_ctrl_del_dev(struct ublk_dev *dev) 132 + { 133 + struct ublk_ctrl_cmd_data data = { 134 + .cmd_op = UBLK_U_CMD_DEL_DEV, 135 + .flags = 0, 136 + }; 137 + 138 + return __ublk_ctrl_cmd(dev, &data); 139 + } 140 + 141 + static int ublk_ctrl_get_info(struct ublk_dev *dev) 142 + { 143 + struct ublk_ctrl_cmd_data data = { 144 + .cmd_op = UBLK_U_CMD_GET_DEV_INFO, 145 + .flags = CTRL_CMD_HAS_BUF, 146 + .addr = (__u64) (uintptr_t) &dev->dev_info, 147 + .len = sizeof(struct ublksrv_ctrl_dev_info), 148 + }; 149 + 150 + return __ublk_ctrl_cmd(dev, &data); 151 + } 152 + 153 + static int ublk_ctrl_set_params(struct ublk_dev *dev, 154 + struct ublk_params *params) 155 + { 156 + struct ublk_ctrl_cmd_data data = { 157 + .cmd_op = UBLK_U_CMD_SET_PARAMS, 158 + .flags = CTRL_CMD_HAS_BUF, 159 + .addr = (__u64) (uintptr_t) params, 160 + .len = sizeof(*params), 161 + }; 162 + params->len = sizeof(*params); 163 + return __ublk_ctrl_cmd(dev, &data); 164 + } 165 + 166 + static int ublk_ctrl_get_params(struct ublk_dev *dev, 167 + struct ublk_params *params) 168 + { 169 + struct ublk_ctrl_cmd_data data = { 170 + .cmd_op = UBLK_CMD_GET_PARAMS, 171 + .flags = CTRL_CMD_HAS_BUF, 172 + .addr = (__u64)params, 173 + .len = sizeof(*params), 174 + }; 175 + 176 + params->len = sizeof(*params); 177 + 178 + return __ublk_ctrl_cmd(dev, &data); 179 + } 180 + 181 + static int ublk_ctrl_get_features(struct ublk_dev *dev, 182 + __u64 *features) 183 + { 184 + struct ublk_ctrl_cmd_data data = { 185 + .cmd_op = UBLK_U_CMD_GET_FEATURES, 186 + .flags = CTRL_CMD_HAS_BUF, 187 + .addr = (__u64) (uintptr_t) features, 188 + .len = sizeof(*features), 189 + }; 190 + 191 + return __ublk_ctrl_cmd(dev, &data); 192 + } 193 + 194 + static const char *ublk_dev_state_desc(struct ublk_dev *dev) 195 + { 196 + switch (dev->dev_info.state) { 197 + case UBLK_S_DEV_DEAD: 198 + return "DEAD"; 199 + case UBLK_S_DEV_LIVE: 200 + return "LIVE"; 201 + case UBLK_S_DEV_QUIESCED: 202 + return "QUIESCED"; 203 + default: 204 + return "UNKNOWN"; 205 + }; 206 + } 207 + 208 + static void ublk_ctrl_dump(struct ublk_dev *dev) 209 + { 210 + struct ublksrv_ctrl_dev_info *info = &dev->dev_info; 211 + struct ublk_params p; 212 + int ret; 213 + 214 + ret = ublk_ctrl_get_params(dev, &p); 215 + if (ret < 0) { 216 + ublk_err("failed to get params %m\n"); 217 + return; 218 + } 219 + 220 + ublk_log("dev id %d: nr_hw_queues %d queue_depth %d block size %d dev_capacity %lld\n", 221 + info->dev_id, info->nr_hw_queues, info->queue_depth, 222 + 1 << p.basic.logical_bs_shift, p.basic.dev_sectors); 223 + ublk_log("\tmax rq size %d daemon pid %d flags 0x%llx state %s\n", 224 + info->max_io_buf_bytes, info->ublksrv_pid, info->flags, 225 + ublk_dev_state_desc(dev)); 226 + fflush(stdout); 227 + } 228 + 229 + static void ublk_ctrl_deinit(struct ublk_dev *dev) 230 + { 231 + close(dev->ctrl_fd); 232 + free(dev); 233 + } 234 + 235 + static struct ublk_dev *ublk_ctrl_init(void) 236 + { 237 + struct ublk_dev *dev = (struct ublk_dev *)calloc(1, sizeof(*dev)); 238 + struct ublksrv_ctrl_dev_info *info = &dev->dev_info; 239 + int ret; 240 + 241 + dev->ctrl_fd = open(CTRL_DEV, O_RDWR); 242 + if (dev->ctrl_fd < 0) { 243 + free(dev); 244 + return NULL; 245 + } 246 + 247 + info->max_io_buf_bytes = UBLK_IO_MAX_BYTES; 248 + 249 + ret = ublk_setup_ring(&dev->ring, UBLK_CTRL_RING_DEPTH, 250 + UBLK_CTRL_RING_DEPTH, IORING_SETUP_SQE128); 251 + if (ret < 0) { 252 + ublk_err("queue_init: %s\n", strerror(-ret)); 253 + free(dev); 254 + return NULL; 255 + } 256 + dev->nr_fds = 1; 257 + 258 + return dev; 259 + } 260 + 261 + static int __ublk_queue_cmd_buf_sz(unsigned depth) 262 + { 263 + int size = depth * sizeof(struct ublksrv_io_desc); 264 + unsigned int page_sz = getpagesize(); 265 + 266 + return round_up(size, page_sz); 267 + } 268 + 269 + static int ublk_queue_max_cmd_buf_sz(void) 270 + { 271 + return __ublk_queue_cmd_buf_sz(UBLK_MAX_QUEUE_DEPTH); 272 + } 273 + 274 + static int ublk_queue_cmd_buf_sz(struct ublk_queue *q) 275 + { 276 + return __ublk_queue_cmd_buf_sz(q->q_depth); 277 + } 278 + 279 + static void ublk_queue_deinit(struct ublk_queue *q) 280 + { 281 + int i; 282 + int nr_ios = q->q_depth; 283 + 284 + io_uring_unregister_ring_fd(&q->ring); 285 + 286 + if (q->ring.ring_fd > 0) { 287 + io_uring_unregister_files(&q->ring); 288 + close(q->ring.ring_fd); 289 + q->ring.ring_fd = -1; 290 + } 291 + 292 + if (q->io_cmd_buf) 293 + munmap(q->io_cmd_buf, ublk_queue_cmd_buf_sz(q)); 294 + 295 + for (i = 0; i < nr_ios; i++) 296 + free(q->ios[i].buf_addr); 297 + } 298 + 299 + static int ublk_queue_init(struct ublk_queue *q) 300 + { 301 + struct ublk_dev *dev = q->dev; 302 + int depth = dev->dev_info.queue_depth; 303 + int i, ret = -1; 304 + int cmd_buf_size, io_buf_size; 305 + unsigned long off; 306 + int ring_depth = dev->tgt.sq_depth, cq_depth = dev->tgt.cq_depth; 307 + 308 + q->tgt_ops = dev->tgt.ops; 309 + q->state = 0; 310 + q->q_depth = depth; 311 + q->cmd_inflight = 0; 312 + q->tid = gettid(); 313 + 314 + cmd_buf_size = ublk_queue_cmd_buf_sz(q); 315 + off = UBLKSRV_CMD_BUF_OFFSET + q->q_id * ublk_queue_max_cmd_buf_sz(); 316 + q->io_cmd_buf = (char *)mmap(0, cmd_buf_size, PROT_READ, 317 + MAP_SHARED | MAP_POPULATE, dev->fds[0], off); 318 + if (q->io_cmd_buf == MAP_FAILED) { 319 + ublk_err("ublk dev %d queue %d map io_cmd_buf failed %m\n", 320 + q->dev->dev_info.dev_id, q->q_id); 321 + goto fail; 322 + } 323 + 324 + io_buf_size = dev->dev_info.max_io_buf_bytes; 325 + for (i = 0; i < q->q_depth; i++) { 326 + q->ios[i].buf_addr = NULL; 327 + q->ios[i].flags = UBLKSRV_NEED_FETCH_RQ | UBLKSRV_IO_FREE; 328 + 329 + if (q->state & UBLKSRV_NO_BUF) 330 + continue; 331 + 332 + if (posix_memalign((void **)&q->ios[i].buf_addr, 333 + getpagesize(), io_buf_size)) { 334 + ublk_err("ublk dev %d queue %d io %d posix_memalign failed %m\n", 335 + dev->dev_info.dev_id, q->q_id, i); 336 + goto fail; 337 + } 338 + } 339 + 340 + ret = ublk_setup_ring(&q->ring, ring_depth, cq_depth, 341 + IORING_SETUP_COOP_TASKRUN); 342 + if (ret < 0) { 343 + ublk_err("ublk dev %d queue %d setup io_uring failed %d\n", 344 + q->dev->dev_info.dev_id, q->q_id, ret); 345 + goto fail; 346 + } 347 + 348 + io_uring_register_ring_fd(&q->ring); 349 + 350 + ret = io_uring_register_files(&q->ring, dev->fds, dev->nr_fds); 351 + if (ret) { 352 + ublk_err("ublk dev %d queue %d register files failed %d\n", 353 + q->dev->dev_info.dev_id, q->q_id, ret); 354 + goto fail; 355 + } 356 + 357 + return 0; 358 + fail: 359 + ublk_queue_deinit(q); 360 + ublk_err("ublk dev %d queue %d failed\n", 361 + dev->dev_info.dev_id, q->q_id); 362 + return -ENOMEM; 363 + } 364 + 365 + static int ublk_dev_prep(struct ublk_dev *dev) 366 + { 367 + int dev_id = dev->dev_info.dev_id; 368 + char buf[64]; 369 + int ret = 0; 370 + 371 + snprintf(buf, 64, "%s%d", UBLKC_DEV, dev_id); 372 + dev->fds[0] = open(buf, O_RDWR); 373 + if (dev->fds[0] < 0) { 374 + ret = -EBADF; 375 + ublk_err("can't open %s, ret %d\n", buf, dev->fds[0]); 376 + goto fail; 377 + } 378 + 379 + if (dev->tgt.ops->init_tgt) 380 + ret = dev->tgt.ops->init_tgt(dev); 381 + 382 + return ret; 383 + fail: 384 + close(dev->fds[0]); 385 + return ret; 386 + } 387 + 388 + static void ublk_dev_unprep(struct ublk_dev *dev) 389 + { 390 + if (dev->tgt.ops->deinit_tgt) 391 + dev->tgt.ops->deinit_tgt(dev); 392 + close(dev->fds[0]); 393 + } 394 + 395 + int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag) 396 + { 397 + struct ublksrv_io_cmd *cmd; 398 + struct io_uring_sqe *sqe; 399 + unsigned int cmd_op = 0; 400 + __u64 user_data; 401 + 402 + /* only freed io can be issued */ 403 + if (!(io->flags & UBLKSRV_IO_FREE)) 404 + return 0; 405 + 406 + /* we issue because we need either fetching or committing */ 407 + if (!(io->flags & 408 + (UBLKSRV_NEED_FETCH_RQ | UBLKSRV_NEED_COMMIT_RQ_COMP))) 409 + return 0; 410 + 411 + if (io->flags & UBLKSRV_NEED_COMMIT_RQ_COMP) 412 + cmd_op = UBLK_U_IO_COMMIT_AND_FETCH_REQ; 413 + else if (io->flags & UBLKSRV_NEED_FETCH_RQ) 414 + cmd_op = UBLK_U_IO_FETCH_REQ; 415 + 416 + if (io_uring_sq_space_left(&q->ring) < 1) 417 + io_uring_submit(&q->ring); 418 + 419 + sqe = ublk_queue_alloc_sqe(q); 420 + if (!sqe) { 421 + ublk_err("%s: run out of sqe %d, tag %d\n", 422 + __func__, q->q_id, tag); 423 + return -1; 424 + } 425 + 426 + cmd = (struct ublksrv_io_cmd *)ublk_get_sqe_cmd(sqe); 427 + 428 + if (cmd_op == UBLK_U_IO_COMMIT_AND_FETCH_REQ) 429 + cmd->result = io->result; 430 + 431 + /* These fields should be written once, never change */ 432 + ublk_set_sqe_cmd_op(sqe, cmd_op); 433 + sqe->fd = 0; /* dev->fds[0] */ 434 + sqe->opcode = IORING_OP_URING_CMD; 435 + sqe->flags = IOSQE_FIXED_FILE; 436 + sqe->rw_flags = 0; 437 + cmd->tag = tag; 438 + cmd->q_id = q->q_id; 439 + if (!(q->state & UBLKSRV_NO_BUF)) 440 + cmd->addr = (__u64) (uintptr_t) io->buf_addr; 441 + else 442 + cmd->addr = 0; 443 + 444 + user_data = build_user_data(tag, _IOC_NR(cmd_op), 0, 0); 445 + io_uring_sqe_set_data64(sqe, user_data); 446 + 447 + io->flags = 0; 448 + 449 + q->cmd_inflight += 1; 450 + 451 + ublk_dbg(UBLK_DBG_IO_CMD, "%s: (qid %d tag %u cmd_op %u) iof %x stopping %d\n", 452 + __func__, q->q_id, tag, cmd_op, 453 + io->flags, !!(q->state & UBLKSRV_QUEUE_STOPPING)); 454 + return 1; 455 + } 456 + 457 + static void ublk_submit_fetch_commands(struct ublk_queue *q) 458 + { 459 + int i = 0; 460 + 461 + for (i = 0; i < q->q_depth; i++) 462 + ublk_queue_io_cmd(q, &q->ios[i], i); 463 + } 464 + 465 + static int ublk_queue_is_idle(struct ublk_queue *q) 466 + { 467 + return !io_uring_sq_ready(&q->ring) && !q->io_inflight; 468 + } 469 + 470 + static int ublk_queue_is_done(struct ublk_queue *q) 471 + { 472 + return (q->state & UBLKSRV_QUEUE_STOPPING) && ublk_queue_is_idle(q); 473 + } 474 + 475 + static inline void ublksrv_handle_tgt_cqe(struct ublk_queue *q, 476 + struct io_uring_cqe *cqe) 477 + { 478 + unsigned tag = user_data_to_tag(cqe->user_data); 479 + 480 + if (cqe->res < 0 && cqe->res != -EAGAIN) 481 + ublk_err("%s: failed tgt io: res %d qid %u tag %u, cmd_op %u\n", 482 + __func__, cqe->res, q->q_id, 483 + user_data_to_tag(cqe->user_data), 484 + user_data_to_op(cqe->user_data)); 485 + 486 + if (q->tgt_ops->tgt_io_done) 487 + q->tgt_ops->tgt_io_done(q, tag, cqe); 488 + } 489 + 490 + static void ublk_handle_cqe(struct io_uring *r, 491 + struct io_uring_cqe *cqe, void *data) 492 + { 493 + struct ublk_queue *q = container_of(r, struct ublk_queue, ring); 494 + unsigned tag = user_data_to_tag(cqe->user_data); 495 + unsigned cmd_op = user_data_to_op(cqe->user_data); 496 + int fetch = (cqe->res != UBLK_IO_RES_ABORT) && 497 + !(q->state & UBLKSRV_QUEUE_STOPPING); 498 + struct ublk_io *io; 499 + 500 + if (cqe->res < 0 && cqe->res != -ENODEV) 501 + ublk_err("%s: res %d userdata %llx queue state %x\n", __func__, 502 + cqe->res, cqe->user_data, q->state); 503 + 504 + ublk_dbg(UBLK_DBG_IO_CMD, "%s: res %d (qid %d tag %u cmd_op %u target %d) stopping %d\n", 505 + __func__, cqe->res, q->q_id, tag, cmd_op, 506 + is_target_io(cqe->user_data), 507 + (q->state & UBLKSRV_QUEUE_STOPPING)); 508 + 509 + /* Don't retrieve io in case of target io */ 510 + if (is_target_io(cqe->user_data)) { 511 + ublksrv_handle_tgt_cqe(q, cqe); 512 + return; 513 + } 514 + 515 + io = &q->ios[tag]; 516 + q->cmd_inflight--; 517 + 518 + if (!fetch) { 519 + q->state |= UBLKSRV_QUEUE_STOPPING; 520 + io->flags &= ~UBLKSRV_NEED_FETCH_RQ; 521 + } 522 + 523 + if (cqe->res == UBLK_IO_RES_OK) { 524 + assert(tag < q->q_depth); 525 + if (q->tgt_ops->queue_io) 526 + q->tgt_ops->queue_io(q, tag); 527 + } else { 528 + /* 529 + * COMMIT_REQ will be completed immediately since no fetching 530 + * piggyback is required. 531 + * 532 + * Marking IO_FREE only, then this io won't be issued since 533 + * we only issue io with (UBLKSRV_IO_FREE | UBLKSRV_NEED_*) 534 + * 535 + * */ 536 + io->flags = UBLKSRV_IO_FREE; 537 + } 538 + } 539 + 540 + static int ublk_reap_events_uring(struct io_uring *r) 541 + { 542 + struct io_uring_cqe *cqe; 543 + unsigned head; 544 + int count = 0; 545 + 546 + io_uring_for_each_cqe(r, head, cqe) { 547 + ublk_handle_cqe(r, cqe, NULL); 548 + count += 1; 549 + } 550 + io_uring_cq_advance(r, count); 551 + 552 + return count; 553 + } 554 + 555 + static int ublk_process_io(struct ublk_queue *q) 556 + { 557 + int ret, reapped; 558 + 559 + ublk_dbg(UBLK_DBG_QUEUE, "dev%d-q%d: to_submit %d inflight cmd %u stopping %d\n", 560 + q->dev->dev_info.dev_id, 561 + q->q_id, io_uring_sq_ready(&q->ring), 562 + q->cmd_inflight, 563 + (q->state & UBLKSRV_QUEUE_STOPPING)); 564 + 565 + if (ublk_queue_is_done(q)) 566 + return -ENODEV; 567 + 568 + ret = io_uring_submit_and_wait(&q->ring, 1); 569 + reapped = ublk_reap_events_uring(&q->ring); 570 + 571 + ublk_dbg(UBLK_DBG_QUEUE, "submit result %d, reapped %d stop %d idle %d\n", 572 + ret, reapped, (q->state & UBLKSRV_QUEUE_STOPPING), 573 + (q->state & UBLKSRV_QUEUE_IDLE)); 574 + 575 + return reapped; 576 + } 577 + 578 + static void *ublk_io_handler_fn(void *data) 579 + { 580 + struct ublk_queue *q = data; 581 + int dev_id = q->dev->dev_info.dev_id; 582 + int ret; 583 + 584 + ret = ublk_queue_init(q); 585 + if (ret) { 586 + ublk_err("ublk dev %d queue %d init queue failed\n", 587 + dev_id, q->q_id); 588 + return NULL; 589 + } 590 + ublk_dbg(UBLK_DBG_QUEUE, "tid %d: ublk dev %d queue %d started\n", 591 + q->tid, dev_id, q->q_id); 592 + 593 + /* submit all io commands to ublk driver */ 594 + ublk_submit_fetch_commands(q); 595 + do { 596 + if (ublk_process_io(q) < 0) 597 + break; 598 + } while (1); 599 + 600 + ublk_dbg(UBLK_DBG_QUEUE, "ublk dev %d queue %d exited\n", dev_id, q->q_id); 601 + ublk_queue_deinit(q); 602 + return NULL; 603 + } 604 + 605 + static void ublk_set_parameters(struct ublk_dev *dev) 606 + { 607 + int ret; 608 + 609 + ret = ublk_ctrl_set_params(dev, &dev->tgt.params); 610 + if (ret) 611 + ublk_err("dev %d set basic parameter failed %d\n", 612 + dev->dev_info.dev_id, ret); 613 + } 614 + 615 + static int ublk_send_dev_event(const struct dev_ctx *ctx, int dev_id) 616 + { 617 + uint64_t id; 618 + int evtfd = ctx->_evtfd; 619 + 620 + if (evtfd < 0) 621 + return -EBADF; 622 + 623 + if (dev_id >= 0) 624 + id = dev_id + 1; 625 + else 626 + id = ERROR_EVTFD_DEVID; 627 + 628 + if (write(evtfd, &id, sizeof(id)) != sizeof(id)) 629 + return -EINVAL; 630 + 631 + return 0; 632 + } 633 + 634 + 635 + static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev) 636 + { 637 + int ret, i; 638 + void *thread_ret; 639 + const struct ublksrv_ctrl_dev_info *dinfo = &dev->dev_info; 640 + 641 + ublk_dbg(UBLK_DBG_DEV, "%s enter\n", __func__); 642 + 643 + ret = ublk_dev_prep(dev); 644 + if (ret) 645 + return ret; 646 + 647 + for (i = 0; i < dinfo->nr_hw_queues; i++) { 648 + dev->q[i].dev = dev; 649 + dev->q[i].q_id = i; 650 + pthread_create(&dev->q[i].thread, NULL, 651 + ublk_io_handler_fn, 652 + &dev->q[i]); 653 + } 654 + 655 + /* everything is fine now, start us */ 656 + ublk_set_parameters(dev); 657 + ret = ublk_ctrl_start_dev(dev, getpid()); 658 + if (ret < 0) { 659 + ublk_err("%s: ublk_ctrl_start_dev failed: %d\n", __func__, ret); 660 + goto fail; 661 + } 662 + 663 + ublk_ctrl_get_info(dev); 664 + ublk_send_dev_event(ctx, dev->dev_info.dev_id); 665 + 666 + /* wait until we are terminated */ 667 + for (i = 0; i < dinfo->nr_hw_queues; i++) 668 + pthread_join(dev->q[i].thread, &thread_ret); 669 + fail: 670 + ublk_dev_unprep(dev); 671 + ublk_dbg(UBLK_DBG_DEV, "%s exit\n", __func__); 672 + 673 + return ret; 674 + } 675 + 676 + static int wait_ublk_dev(char *dev_name, int evt_mask, unsigned timeout) 677 + { 678 + #define EV_SIZE (sizeof(struct inotify_event)) 679 + #define EV_BUF_LEN (128 * (EV_SIZE + 16)) 680 + struct pollfd pfd; 681 + int fd, wd; 682 + int ret = -EINVAL; 683 + 684 + fd = inotify_init(); 685 + if (fd < 0) { 686 + ublk_dbg(UBLK_DBG_DEV, "%s: inotify init failed\n", __func__); 687 + return fd; 688 + } 689 + 690 + wd = inotify_add_watch(fd, "/dev", evt_mask); 691 + if (wd == -1) { 692 + ublk_dbg(UBLK_DBG_DEV, "%s: add watch for /dev failed\n", __func__); 693 + goto fail; 694 + } 695 + 696 + pfd.fd = fd; 697 + pfd.events = POLL_IN; 698 + while (1) { 699 + int i = 0; 700 + char buffer[EV_BUF_LEN]; 701 + ret = poll(&pfd, 1, 1000 * timeout); 702 + 703 + if (ret == -1) { 704 + ublk_err("%s: poll inotify failed: %d\n", __func__, ret); 705 + goto rm_watch; 706 + } else if (ret == 0) { 707 + ublk_err("%s: poll inotify timeout\n", __func__); 708 + ret = -ETIMEDOUT; 709 + goto rm_watch; 710 + } 711 + 712 + ret = read(fd, buffer, EV_BUF_LEN); 713 + if (ret < 0) { 714 + ublk_err("%s: read inotify fd failed\n", __func__); 715 + goto rm_watch; 716 + } 717 + 718 + while (i < ret) { 719 + struct inotify_event *event = (struct inotify_event *)&buffer[i]; 720 + 721 + ublk_dbg(UBLK_DBG_DEV, "%s: inotify event %x %s\n", 722 + __func__, event->mask, event->name); 723 + if (event->mask & evt_mask) { 724 + if (!strcmp(event->name, dev_name)) { 725 + ret = 0; 726 + goto rm_watch; 727 + } 728 + } 729 + i += EV_SIZE + event->len; 730 + } 731 + } 732 + rm_watch: 733 + inotify_rm_watch(fd, wd); 734 + fail: 735 + close(fd); 736 + return ret; 737 + } 738 + 739 + static int ublk_stop_io_daemon(const struct ublk_dev *dev) 740 + { 741 + int daemon_pid = dev->dev_info.ublksrv_pid; 742 + int dev_id = dev->dev_info.dev_id; 743 + char ublkc[64]; 744 + int ret = 0; 745 + 746 + /* daemon may be dead already */ 747 + if (kill(daemon_pid, 0) < 0) 748 + goto wait; 749 + 750 + /* 751 + * Wait until ublk char device is closed, when our daemon is shutdown 752 + */ 753 + snprintf(ublkc, sizeof(ublkc), "%s%d", "ublkc", dev_id); 754 + ret = wait_ublk_dev(ublkc, IN_CLOSE_WRITE, 10); 755 + /* double check and inotify may not be 100% reliable */ 756 + if (ret == -ETIMEDOUT) 757 + /* the daemon doesn't exist now if kill(0) fails */ 758 + ret = kill(daemon_pid, 0) < 0; 759 + wait: 760 + waitpid(daemon_pid, NULL, 0); 761 + ublk_dbg(UBLK_DBG_DEV, "%s: pid %d dev_id %d ret %d\n", 762 + __func__, daemon_pid, dev_id, ret); 763 + 764 + return ret; 765 + } 766 + 767 + static int __cmd_dev_add(const struct dev_ctx *ctx) 768 + { 769 + unsigned nr_queues = ctx->nr_hw_queues; 770 + const char *tgt_type = ctx->tgt_type; 771 + unsigned depth = ctx->queue_depth; 772 + __u64 features; 773 + const struct ublk_tgt_ops *ops; 774 + struct ublksrv_ctrl_dev_info *info; 775 + struct ublk_dev *dev; 776 + int dev_id = ctx->dev_id; 777 + int ret; 778 + 779 + ops = ublk_find_tgt(tgt_type); 780 + if (!ops) { 781 + ublk_err("%s: no such tgt type, type %s\n", 782 + __func__, tgt_type); 783 + return -ENODEV; 784 + } 785 + 786 + if (nr_queues > UBLK_MAX_QUEUES || depth > UBLK_QUEUE_DEPTH) { 787 + ublk_err("%s: invalid nr_queues or depth queues %u depth %u\n", 788 + __func__, nr_queues, depth); 789 + return -EINVAL; 790 + } 791 + 792 + dev = ublk_ctrl_init(); 793 + if (!dev) { 794 + ublk_err("%s: can't alloc dev id %d, type %s\n", 795 + __func__, dev_id, tgt_type); 796 + return -ENOMEM; 797 + } 798 + 799 + /* kernel doesn't support get_features */ 800 + ret = ublk_ctrl_get_features(dev, &features); 801 + if (ret < 0) 802 + return -EINVAL; 803 + 804 + if (!(features & UBLK_F_CMD_IOCTL_ENCODE)) 805 + return -ENOTSUP; 806 + 807 + info = &dev->dev_info; 808 + info->dev_id = ctx->dev_id; 809 + info->nr_hw_queues = nr_queues; 810 + info->queue_depth = depth; 811 + info->flags = ctx->flags; 812 + dev->tgt.ops = ops; 813 + dev->tgt.sq_depth = depth; 814 + dev->tgt.cq_depth = depth; 815 + 816 + ret = ublk_ctrl_add_dev(dev); 817 + if (ret < 0) { 818 + ublk_err("%s: can't add dev id %d, type %s ret %d\n", 819 + __func__, dev_id, tgt_type, ret); 820 + goto fail; 821 + } 822 + 823 + ret = ublk_start_daemon(ctx, dev); 824 + ublk_dbg(UBLK_DBG_DEV, "%s: daemon exit %d\b", ret); 825 + 826 + fail: 827 + if (ret < 0) 828 + ublk_send_dev_event(ctx, -1); 829 + ublk_ctrl_deinit(dev); 830 + return ret; 831 + } 832 + 833 + static int __cmd_dev_list(struct dev_ctx *ctx); 834 + 835 + static int cmd_dev_add(struct dev_ctx *ctx) 836 + { 837 + int res; 838 + 839 + ctx->_evtfd = eventfd(0, 0); 840 + if (ctx->_evtfd < 0) { 841 + ublk_err("%s: failed to create eventfd %s\n", __func__, strerror(errno)); 842 + exit(-1); 843 + } 844 + 845 + setsid(); 846 + res = fork(); 847 + if (res == 0) { 848 + __cmd_dev_add(ctx); 849 + exit(EXIT_SUCCESS); 850 + } else if (res > 0) { 851 + uint64_t id; 852 + 853 + res = read(ctx->_evtfd, &id, sizeof(id)); 854 + close(ctx->_evtfd); 855 + if (res == sizeof(id) && id != ERROR_EVTFD_DEVID) { 856 + ctx->dev_id = id - 1; 857 + return __cmd_dev_list(ctx); 858 + } 859 + exit(EXIT_FAILURE); 860 + } else { 861 + return res; 862 + } 863 + } 864 + 865 + static int __cmd_dev_del(struct dev_ctx *ctx) 866 + { 867 + int number = ctx->dev_id; 868 + struct ublk_dev *dev; 869 + int ret; 870 + 871 + dev = ublk_ctrl_init(); 872 + dev->dev_info.dev_id = number; 873 + 874 + ret = ublk_ctrl_get_info(dev); 875 + if (ret < 0) 876 + goto fail; 877 + 878 + ret = ublk_ctrl_stop_dev(dev); 879 + if (ret < 0) 880 + ublk_err("%s: stop dev %d failed ret %d\n", __func__, number, ret); 881 + 882 + ret = ublk_stop_io_daemon(dev); 883 + if (ret < 0) 884 + ublk_err("%s: stop daemon id %d dev %d, ret %d\n", 885 + __func__, dev->dev_info.ublksrv_pid, number, ret); 886 + ublk_ctrl_del_dev(dev); 887 + fail: 888 + if (ret >= 0) 889 + ret = ublk_ctrl_get_info(dev); 890 + ublk_ctrl_deinit(dev); 891 + 892 + return (ret >= 0) ? 0 : ret; 893 + } 894 + 895 + static int cmd_dev_del(struct dev_ctx *ctx) 896 + { 897 + int i; 898 + 899 + if (ctx->dev_id >= 0 || !ctx->all) 900 + return __cmd_dev_del(ctx); 901 + 902 + for (i = 0; i < 255; i++) { 903 + ctx->dev_id = i; 904 + __cmd_dev_del(ctx); 905 + } 906 + return 0; 907 + } 908 + 909 + static int __cmd_dev_list(struct dev_ctx *ctx) 910 + { 911 + struct ublk_dev *dev = ublk_ctrl_init(); 912 + int ret; 913 + 914 + if (!dev) 915 + return -ENODEV; 916 + 917 + dev->dev_info.dev_id = ctx->dev_id; 918 + 919 + ret = ublk_ctrl_get_info(dev); 920 + if (ret < 0) { 921 + if (ctx->logging) 922 + ublk_err("%s: can't get dev info from %d: %d\n", 923 + __func__, ctx->dev_id, ret); 924 + } else { 925 + ublk_ctrl_dump(dev); 926 + } 927 + 928 + ublk_ctrl_deinit(dev); 929 + 930 + return ret; 931 + } 932 + 933 + static int cmd_dev_list(struct dev_ctx *ctx) 934 + { 935 + int i; 936 + 937 + if (ctx->dev_id >= 0 || !ctx->all) 938 + return __cmd_dev_list(ctx); 939 + 940 + ctx->logging = false; 941 + for (i = 0; i < 255; i++) { 942 + ctx->dev_id = i; 943 + __cmd_dev_list(ctx); 944 + } 945 + return 0; 946 + } 947 + 948 + static int cmd_dev_get_features(void) 949 + { 950 + #define const_ilog2(x) (63 - __builtin_clzll(x)) 951 + static const char *feat_map[] = { 952 + [const_ilog2(UBLK_F_SUPPORT_ZERO_COPY)] = "ZERO_COPY", 953 + [const_ilog2(UBLK_F_URING_CMD_COMP_IN_TASK)] = "COMP_IN_TASK", 954 + [const_ilog2(UBLK_F_NEED_GET_DATA)] = "GET_DATA", 955 + [const_ilog2(UBLK_F_USER_RECOVERY)] = "USER_RECOVERY", 956 + [const_ilog2(UBLK_F_USER_RECOVERY_REISSUE)] = "RECOVERY_REISSUE", 957 + [const_ilog2(UBLK_F_UNPRIVILEGED_DEV)] = "UNPRIVILEGED_DEV", 958 + [const_ilog2(UBLK_F_CMD_IOCTL_ENCODE)] = "CMD_IOCTL_ENCODE", 959 + [const_ilog2(UBLK_F_USER_COPY)] = "USER_COPY", 960 + [const_ilog2(UBLK_F_ZONED)] = "ZONED", 961 + [const_ilog2(UBLK_F_USER_RECOVERY_FAIL_IO)] = "RECOVERY_FAIL_IO", 962 + }; 963 + struct ublk_dev *dev; 964 + __u64 features = 0; 965 + int ret; 966 + 967 + dev = ublk_ctrl_init(); 968 + if (!dev) { 969 + fprintf(stderr, "ublksrv_ctrl_init failed id\n"); 970 + return -EOPNOTSUPP; 971 + } 972 + 973 + ret = ublk_ctrl_get_features(dev, &features); 974 + if (!ret) { 975 + int i; 976 + 977 + printf("ublk_drv features: 0x%llx\n", features); 978 + 979 + for (i = 0; i < sizeof(features) * 8; i++) { 980 + const char *feat; 981 + 982 + if (!((1ULL << i) & features)) 983 + continue; 984 + if (i < sizeof(feat_map) / sizeof(feat_map[0])) 985 + feat = feat_map[i]; 986 + else 987 + feat = "unknown"; 988 + printf("\t%-20s: 0x%llx\n", feat, 1ULL << i); 989 + } 990 + } 991 + 992 + return ret; 993 + } 994 + 995 + static int cmd_dev_help(char *exe) 996 + { 997 + printf("%s add -t [null] [-q nr_queues] [-d depth] [-n dev_id]\n", exe); 998 + printf("\t default: nr_queues=2(max 4), depth=128(max 128), dev_id=-1(auto allocation)\n"); 999 + printf("%s del [-n dev_id] -a \n", exe); 1000 + printf("\t -a delete all devices -n delete specified device\n"); 1001 + printf("%s list [-n dev_id] -a \n", exe); 1002 + printf("\t -a list all devices, -n list specified device, default -a \n"); 1003 + printf("%s features\n", exe); 1004 + return 0; 1005 + } 1006 + 1007 + int main(int argc, char *argv[]) 1008 + { 1009 + static const struct option longopts[] = { 1010 + { "all", 0, NULL, 'a' }, 1011 + { "type", 1, NULL, 't' }, 1012 + { "number", 1, NULL, 'n' }, 1013 + { "queues", 1, NULL, 'q' }, 1014 + { "depth", 1, NULL, 'd' }, 1015 + { "debug_mask", 1, NULL, 0 }, 1016 + { "quiet", 0, NULL, 0 }, 1017 + { 0, 0, 0, 0 } 1018 + }; 1019 + int option_idx, opt; 1020 + const char *cmd = argv[1]; 1021 + struct dev_ctx ctx = { 1022 + .queue_depth = 128, 1023 + .nr_hw_queues = 2, 1024 + .dev_id = -1, 1025 + .tgt_type = "unknown", 1026 + }; 1027 + int ret = -EINVAL, i; 1028 + 1029 + if (argc == 1) 1030 + return ret; 1031 + 1032 + optind = 2; 1033 + while ((opt = getopt_long(argc, argv, "t:n:d:q:a", 1034 + longopts, &option_idx)) != -1) { 1035 + switch (opt) { 1036 + case 'a': 1037 + ctx.all = 1; 1038 + break; 1039 + case 'n': 1040 + ctx.dev_id = strtol(optarg, NULL, 10); 1041 + break; 1042 + case 't': 1043 + if (strlen(optarg) < sizeof(ctx.tgt_type)) 1044 + strcpy(ctx.tgt_type, optarg); 1045 + break; 1046 + case 'q': 1047 + ctx.nr_hw_queues = strtol(optarg, NULL, 10); 1048 + break; 1049 + case 'd': 1050 + ctx.queue_depth = strtol(optarg, NULL, 10); 1051 + break; 1052 + case 0: 1053 + if (!strcmp(longopts[option_idx].name, "debug_mask")) 1054 + ublk_dbg_mask = strtol(optarg, NULL, 16); 1055 + if (!strcmp(longopts[option_idx].name, "quiet")) 1056 + ublk_dbg_mask = 0; 1057 + break; 1058 + } 1059 + } 1060 + 1061 + i = optind; 1062 + while (i < argc && ctx.nr_files < MAX_BACK_FILES) { 1063 + ctx.files[ctx.nr_files++] = argv[i++]; 1064 + } 1065 + 1066 + if (!strcmp(cmd, "add")) 1067 + ret = cmd_dev_add(&ctx); 1068 + else if (!strcmp(cmd, "del")) 1069 + ret = cmd_dev_del(&ctx); 1070 + else if (!strcmp(cmd, "list")) { 1071 + ctx.all = 1; 1072 + ret = cmd_dev_list(&ctx); 1073 + } else if (!strcmp(cmd, "help")) 1074 + ret = cmd_dev_help(argv[0]); 1075 + else if (!strcmp(cmd, "features")) 1076 + ret = cmd_dev_get_features(); 1077 + else 1078 + cmd_dev_help(argv[0]); 1079 + 1080 + return ret; 1081 + }
+252
tools/testing/selftests/ublk/kublk.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef KUBLK_INTERNAL_H 3 + #define KUBLK_INTERNAL_H 4 + 5 + #include <unistd.h> 6 + #include <stdlib.h> 7 + #include <assert.h> 8 + #include <stdio.h> 9 + #include <stdarg.h> 10 + #include <string.h> 11 + #include <pthread.h> 12 + #include <getopt.h> 13 + #include <limits.h> 14 + #include <poll.h> 15 + #include <sys/syscall.h> 16 + #include <sys/mman.h> 17 + #include <sys/ioctl.h> 18 + #include <sys/inotify.h> 19 + #include <sys/wait.h> 20 + #include <sys/eventfd.h> 21 + #include <liburing.h> 22 + #include <linux/ublk_cmd.h> 23 + 24 + #define __maybe_unused __attribute__((unused)) 25 + #define MAX_BACK_FILES 4 26 + #ifndef min 27 + #define min(a, b) ((a) < (b) ? (a) : (b)) 28 + #endif 29 + 30 + /****************** part 1: libublk ********************/ 31 + 32 + #define CTRL_DEV "/dev/ublk-control" 33 + #define UBLKC_DEV "/dev/ublkc" 34 + #define UBLKB_DEV "/dev/ublkb" 35 + #define UBLK_CTRL_RING_DEPTH 32 36 + #define ERROR_EVTFD_DEVID -2 37 + 38 + /* queue idle timeout */ 39 + #define UBLKSRV_IO_IDLE_SECS 20 40 + 41 + #define UBLK_IO_MAX_BYTES 65536 42 + #define UBLK_MAX_QUEUES 4 43 + #define UBLK_QUEUE_DEPTH 128 44 + 45 + #define UBLK_DBG_DEV (1U << 0) 46 + #define UBLK_DBG_QUEUE (1U << 1) 47 + #define UBLK_DBG_IO_CMD (1U << 2) 48 + #define UBLK_DBG_IO (1U << 3) 49 + #define UBLK_DBG_CTRL_CMD (1U << 4) 50 + #define UBLK_LOG (1U << 5) 51 + 52 + struct ublk_dev; 53 + struct ublk_queue; 54 + 55 + struct dev_ctx { 56 + char tgt_type[16]; 57 + unsigned long flags; 58 + unsigned nr_hw_queues; 59 + unsigned queue_depth; 60 + int dev_id; 61 + int nr_files; 62 + char *files[MAX_BACK_FILES]; 63 + unsigned int logging:1; 64 + unsigned int all:1; 65 + 66 + int _evtfd; 67 + }; 68 + 69 + struct ublk_ctrl_cmd_data { 70 + __u32 cmd_op; 71 + #define CTRL_CMD_HAS_DATA 1 72 + #define CTRL_CMD_HAS_BUF 2 73 + __u32 flags; 74 + 75 + __u64 data[2]; 76 + __u64 addr; 77 + __u32 len; 78 + }; 79 + 80 + struct ublk_io { 81 + char *buf_addr; 82 + 83 + #define UBLKSRV_NEED_FETCH_RQ (1UL << 0) 84 + #define UBLKSRV_NEED_COMMIT_RQ_COMP (1UL << 1) 85 + #define UBLKSRV_IO_FREE (1UL << 2) 86 + unsigned short flags; 87 + unsigned short refs; /* used by target code only */ 88 + 89 + int result; 90 + }; 91 + 92 + struct ublk_tgt_ops { 93 + const char *name; 94 + int (*init_tgt)(struct ublk_dev *); 95 + void (*deinit_tgt)(struct ublk_dev *); 96 + 97 + int (*queue_io)(struct ublk_queue *, int tag); 98 + void (*tgt_io_done)(struct ublk_queue *, 99 + int tag, const struct io_uring_cqe *); 100 + }; 101 + 102 + struct ublk_tgt { 103 + unsigned long dev_size; 104 + unsigned int sq_depth; 105 + unsigned int cq_depth; 106 + const struct ublk_tgt_ops *ops; 107 + struct ublk_params params; 108 + char backing_file[1024 - 8 - sizeof(struct ublk_params)]; 109 + }; 110 + 111 + struct ublk_queue { 112 + int q_id; 113 + int q_depth; 114 + unsigned int cmd_inflight; 115 + unsigned int io_inflight; 116 + struct ublk_dev *dev; 117 + const struct ublk_tgt_ops *tgt_ops; 118 + char *io_cmd_buf; 119 + struct io_uring ring; 120 + struct ublk_io ios[UBLK_QUEUE_DEPTH]; 121 + #define UBLKSRV_QUEUE_STOPPING (1U << 0) 122 + #define UBLKSRV_QUEUE_IDLE (1U << 1) 123 + #define UBLKSRV_NO_BUF (1U << 2) 124 + unsigned state; 125 + pid_t tid; 126 + pthread_t thread; 127 + }; 128 + 129 + struct ublk_dev { 130 + struct ublk_tgt tgt; 131 + struct ublksrv_ctrl_dev_info dev_info; 132 + struct ublk_queue q[UBLK_MAX_QUEUES]; 133 + 134 + int fds[2]; /* fds[0] points to /dev/ublkcN */ 135 + int nr_fds; 136 + int ctrl_fd; 137 + struct io_uring ring; 138 + }; 139 + 140 + #ifndef offsetof 141 + #define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) 142 + #endif 143 + 144 + #ifndef container_of 145 + #define container_of(ptr, type, member) ({ \ 146 + unsigned long __mptr = (unsigned long)(ptr); \ 147 + ((type *)(__mptr - offsetof(type, member))); }) 148 + #endif 149 + 150 + #define round_up(val, rnd) \ 151 + (((val) + ((rnd) - 1)) & ~((rnd) - 1)) 152 + 153 + 154 + extern unsigned int ublk_dbg_mask; 155 + extern int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag); 156 + 157 + static inline int is_target_io(__u64 user_data) 158 + { 159 + return (user_data & (1ULL << 63)) != 0; 160 + } 161 + 162 + static inline __u64 build_user_data(unsigned tag, unsigned op, 163 + unsigned tgt_data, unsigned is_target_io) 164 + { 165 + assert(!(tag >> 16) && !(op >> 8) && !(tgt_data >> 16)); 166 + 167 + return tag | (op << 16) | (tgt_data << 24) | (__u64)is_target_io << 63; 168 + } 169 + 170 + static inline unsigned int user_data_to_tag(__u64 user_data) 171 + { 172 + return user_data & 0xffff; 173 + } 174 + 175 + static inline unsigned int user_data_to_op(__u64 user_data) 176 + { 177 + return (user_data >> 16) & 0xff; 178 + } 179 + 180 + static inline void ublk_err(const char *fmt, ...) 181 + { 182 + va_list ap; 183 + 184 + va_start(ap, fmt); 185 + vfprintf(stderr, fmt, ap); 186 + } 187 + 188 + static inline void ublk_log(const char *fmt, ...) 189 + { 190 + if (ublk_dbg_mask & UBLK_LOG) { 191 + va_list ap; 192 + 193 + va_start(ap, fmt); 194 + vfprintf(stdout, fmt, ap); 195 + } 196 + } 197 + 198 + static inline void ublk_dbg(int level, const char *fmt, ...) 199 + { 200 + if (level & ublk_dbg_mask) { 201 + va_list ap; 202 + 203 + va_start(ap, fmt); 204 + vfprintf(stdout, fmt, ap); 205 + } 206 + } 207 + 208 + static inline struct io_uring_sqe *ublk_queue_alloc_sqe(struct ublk_queue *q) 209 + { 210 + unsigned left = io_uring_sq_space_left(&q->ring); 211 + 212 + if (left < 1) 213 + io_uring_submit(&q->ring); 214 + return io_uring_get_sqe(&q->ring); 215 + } 216 + 217 + static inline void *ublk_get_sqe_cmd(const struct io_uring_sqe *sqe) 218 + { 219 + return (void *)&sqe->cmd; 220 + } 221 + 222 + static inline void ublk_mark_io_done(struct ublk_io *io, int res) 223 + { 224 + io->flags |= (UBLKSRV_NEED_COMMIT_RQ_COMP | UBLKSRV_IO_FREE); 225 + io->result = res; 226 + } 227 + 228 + static inline const struct ublksrv_io_desc *ublk_get_iod(const struct ublk_queue *q, int tag) 229 + { 230 + return (struct ublksrv_io_desc *)&(q->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]); 231 + } 232 + 233 + static inline void ublk_set_sqe_cmd_op(struct io_uring_sqe *sqe, __u32 cmd_op) 234 + { 235 + __u32 *addr = (__u32 *)&sqe->off; 236 + 237 + addr[0] = cmd_op; 238 + addr[1] = 0; 239 + } 240 + 241 + static inline int ublk_complete_io(struct ublk_queue *q, unsigned tag, int res) 242 + { 243 + struct ublk_io *io = &q->ios[tag]; 244 + 245 + ublk_mark_io_done(io, res); 246 + 247 + return ublk_queue_io_cmd(q, io, tag); 248 + } 249 + 250 + extern const struct ublk_tgt_ops null_tgt_ops; 251 + 252 + #endif
+38
tools/testing/selftests/ublk/null.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #include "kublk.h" 4 + 5 + static int ublk_null_tgt_init(struct ublk_dev *dev) 6 + { 7 + const struct ublksrv_ctrl_dev_info *info = &dev->dev_info; 8 + unsigned long dev_size = 250UL << 30; 9 + 10 + dev->tgt.dev_size = dev_size; 11 + dev->tgt.params = (struct ublk_params) { 12 + .types = UBLK_PARAM_TYPE_BASIC, 13 + .basic = { 14 + .logical_bs_shift = 9, 15 + .physical_bs_shift = 12, 16 + .io_opt_shift = 12, 17 + .io_min_shift = 9, 18 + .max_sectors = info->max_io_buf_bytes >> 9, 19 + .dev_sectors = dev_size >> 9, 20 + }, 21 + }; 22 + 23 + return 0; 24 + } 25 + 26 + static int ublk_null_queue_io(struct ublk_queue *q, int tag) 27 + { 28 + const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); 29 + 30 + ublk_complete_io(q, tag, iod->nr_sectors << 9); 31 + return 0; 32 + } 33 + 34 + const struct ublk_tgt_ops null_tgt_ops = { 35 + .name = "null", 36 + .init_tgt = ublk_null_tgt_init, 37 + .queue_io = ublk_null_queue_io, 38 + };
+58
tools/testing/selftests/ublk/test_common.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + _check_root() { 5 + local ksft_skip=4 6 + 7 + if [ $UID != 0 ]; then 8 + echo please run this as root >&2 9 + exit $ksft_skip 10 + fi 11 + } 12 + 13 + _remove_ublk_devices() { 14 + ${UBLK_PROG} del -a 15 + } 16 + 17 + _get_ublk_dev_state() { 18 + ${UBLK_PROG} list -n "$1" | grep "state" | awk '{print $11}' 19 + } 20 + 21 + _get_ublk_daemon_pid() { 22 + ${UBLK_PROG} list -n "$1" | grep "pid" | awk '{print $7}' 23 + } 24 + 25 + _prep_test() { 26 + _check_root 27 + local type=$1 28 + shift 1 29 + echo "ublk $type: $@" 30 + } 31 + 32 + _show_result() 33 + { 34 + if [ $2 -ne 0 ]; then 35 + echo "$1 : [FAIL]" 36 + else 37 + echo "$1 : [PASS]" 38 + fi 39 + } 40 + 41 + _cleanup_test() { 42 + ${UBLK_PROG} del -n $1 43 + } 44 + 45 + _add_ublk_dev() { 46 + local kublk_temp=`mktemp /tmp/kublk-XXXXXX` 47 + ${UBLK_PROG} add $@ > ${kublk_temp} 2>&1 48 + if [ $? -ne 0 ]; then 49 + echo "fail to add ublk dev $@" 50 + exit -1 51 + fi 52 + local dev_id=`grep "dev id" ${kublk_temp} | awk -F '[ :]' '{print $3}'` 53 + udevadm settle 54 + rm -f ${kublk_temp} 55 + echo ${dev_id} 56 + } 57 + 58 + export UBLK_PROG=$(pwd)/kublk
+19
tools/testing/selftests/ublk/test_null_01.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + . test_common.sh 5 + 6 + TID="null_01" 7 + ERR_CODE=0 8 + 9 + _prep_test "null" "basic IO test" 10 + 11 + dev_id=`_add_ublk_dev -t null` 12 + 13 + # run fio over the two disks 14 + fio --name=job1 --filename=/dev/ublkb${dev_id} --ioengine=libaio --rw=readwrite --iodepth=32 --size=256M > /dev/null 2>&1 15 + ERR_CODE=$? 16 + 17 + _cleanup_test ${dev_id} "null" 18 + 19 + _show_result $TID $ERR_CODE