Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests: ublk: add support for user copy to kublk

The ublk selftests mock ublk server kublk supports every data copy mode
except user copy. Add support for user copy to kublk, enabled via the
--user_copy (-u) command line argument. On writes, issue pread() calls
to copy the write data into the ublk_io's buffer before dispatching the
write to the target implementation. On reads, issue pwrite() calls to
copy read data from the ublk_io's buffer before committing the request.
Copy in 2 KB chunks to provide some coverage of the offseting logic.

Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Caleb Sander Mateos and committed by
Jens Axboe
b9f0a94c 52bc4837

+64 -9
+4 -3
tools/testing/selftests/ublk/file_backed.c
··· 34 34 unsigned zc = ublk_queue_use_zc(q); 35 35 unsigned auto_zc = ublk_queue_use_auto_zc(q); 36 36 enum io_uring_op op = ublk_to_uring_op(iod, zc | auto_zc); 37 + struct ublk_io *io = ublk_get_io(q, tag); 37 38 struct io_uring_sqe *sqe[3]; 38 - void *addr = (zc | auto_zc) ? NULL : (void *)iod->addr; 39 + void *addr = io->buf_addr; 39 40 40 41 if (!zc || auto_zc) { 41 42 ublk_io_alloc_sqes(t, sqe, 1); ··· 57 56 58 57 ublk_io_alloc_sqes(t, sqe, 3); 59 58 60 - io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index); 59 + io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, io->buf_index); 61 60 sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK; 62 61 sqe[0]->user_data = build_user_data(tag, 63 62 ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1); ··· 69 68 sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK; 70 69 sqe[1]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1); 71 70 72 - io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index); 71 + io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, io->buf_index); 73 72 sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1); 74 73 75 74 return 2;
+48 -5
tools/testing/selftests/ublk/kublk.c
··· 596 596 sqe->addr = ublk_auto_buf_reg_to_sqe_addr(&buf); 597 597 } 598 598 599 + /* Copy in pieces to test the buffer offset logic */ 600 + #define UBLK_USER_COPY_LEN 2048 601 + 602 + static void ublk_user_copy(const struct ublk_io *io, __u8 match_ublk_op) 603 + { 604 + const struct ublk_queue *q = ublk_io_to_queue(io); 605 + const struct ublksrv_io_desc *iod = ublk_get_iod(q, io->tag); 606 + __u64 off = ublk_user_copy_offset(q->q_id, io->tag); 607 + __u8 ublk_op = ublksrv_get_op(iod); 608 + __u32 len = iod->nr_sectors << 9; 609 + void *addr = io->buf_addr; 610 + 611 + if (ublk_op != match_ublk_op) 612 + return; 613 + 614 + while (len) { 615 + __u32 copy_len = min(len, UBLK_USER_COPY_LEN); 616 + ssize_t copied; 617 + 618 + if (ublk_op == UBLK_IO_OP_WRITE) 619 + copied = pread(q->ublk_fd, addr, copy_len, off); 620 + else if (ublk_op == UBLK_IO_OP_READ) 621 + copied = pwrite(q->ublk_fd, addr, copy_len, off); 622 + else 623 + assert(0); 624 + assert(copied == (ssize_t)copy_len); 625 + addr += copy_len; 626 + off += copy_len; 627 + len -= copy_len; 628 + } 629 + } 630 + 599 631 int ublk_queue_io_cmd(struct ublk_thread *t, struct ublk_io *io) 600 632 { 601 633 struct ublk_queue *q = ublk_io_to_queue(io); ··· 650 618 651 619 if (io->flags & UBLKS_IO_NEED_GET_DATA) 652 620 cmd_op = UBLK_U_IO_NEED_GET_DATA; 653 - else if (io->flags & UBLKS_IO_NEED_COMMIT_RQ_COMP) 621 + else if (io->flags & UBLKS_IO_NEED_COMMIT_RQ_COMP) { 622 + if (ublk_queue_use_user_copy(q)) 623 + ublk_user_copy(io, UBLK_IO_OP_READ); 624 + 654 625 cmd_op = UBLK_U_IO_COMMIT_AND_FETCH_REQ; 655 - else if (io->flags & UBLKS_IO_NEED_FETCH_RQ) 626 + } else if (io->flags & UBLKS_IO_NEED_FETCH_RQ) 656 627 cmd_op = UBLK_U_IO_FETCH_REQ; 657 628 658 629 if (io_uring_sq_space_left(&t->ring) < 1) ··· 684 649 sqe[0]->rw_flags = 0; 685 650 cmd->tag = io->tag; 686 651 cmd->q_id = q->q_id; 687 - if (!ublk_queue_no_buf(q)) 652 + if (!ublk_queue_no_buf(q) && !ublk_queue_use_user_copy(q)) 688 653 cmd->addr = (__u64) (uintptr_t) io->buf_addr; 689 654 else 690 655 cmd->addr = 0; ··· 786 751 787 752 if (cqe->res == UBLK_IO_RES_OK) { 788 753 assert(tag < q->q_depth); 754 + 755 + if (ublk_queue_use_user_copy(q)) 756 + ublk_user_copy(io, UBLK_IO_OP_WRITE); 757 + 789 758 if (q->tgt_ops->queue_io) 790 759 q->tgt_ops->queue_io(t, q, tag); 791 760 } else if (cqe->res == UBLK_IO_RES_NEED_GET_DATA) { ··· 1546 1507 1547 1508 printf("%s %s -t [null|loop|stripe|fault_inject] [-q nr_queues] [-d depth] [-n dev_id]\n", 1548 1509 exe, recovery ? "recover" : "add"); 1549 - printf("\t[--foreground] [--quiet] [-z] [--auto_zc] [--auto_zc_fallback] [--debug_mask mask] [-r 0|1 ] [-g]\n"); 1510 + printf("\t[--foreground] [--quiet] [-z] [--auto_zc] [--auto_zc_fallback] [--debug_mask mask] [-r 0|1] [-g] [-u]\n"); 1550 1511 printf("\t[-e 0|1 ] [-i 0|1] [--no_ublk_fixed_fd]\n"); 1551 1512 printf("\t[--nthreads threads] [--per_io_tasks]\n"); 1552 1513 printf("\t[target options] [backfile1] [backfile2] ...\n"); ··· 1607 1568 { "get_data", 1, NULL, 'g'}, 1608 1569 { "auto_zc", 0, NULL, 0 }, 1609 1570 { "auto_zc_fallback", 0, NULL, 0 }, 1571 + { "user_copy", 0, NULL, 'u'}, 1610 1572 { "size", 1, NULL, 's'}, 1611 1573 { "nthreads", 1, NULL, 0 }, 1612 1574 { "per_io_tasks", 0, NULL, 0 }, ··· 1633 1593 1634 1594 opterr = 0; 1635 1595 optind = 2; 1636 - while ((opt = getopt_long(argc, argv, "t:n:d:q:r:e:i:s:gaz", 1596 + while ((opt = getopt_long(argc, argv, "t:n:d:q:r:e:i:s:gazu", 1637 1597 longopts, &option_idx)) != -1) { 1638 1598 switch (opt) { 1639 1599 case 'a': ··· 1672 1632 break; 1673 1633 case 'g': 1674 1634 ctx.flags |= UBLK_F_NEED_GET_DATA; 1635 + break; 1636 + case 'u': 1637 + ctx.flags |= UBLK_F_USER_COPY; 1675 1638 break; 1676 1639 case 's': 1677 1640 ctx.size = strtoull(optarg, NULL, 10);
+11
tools/testing/selftests/ublk/kublk.h
··· 208 208 return !!(iod->op_flags & UBLK_IO_F_NEED_REG_BUF); 209 209 } 210 210 211 + static inline __u64 ublk_user_copy_offset(unsigned q_id, unsigned tag) 212 + { 213 + return UBLKSRV_IO_BUF_OFFSET + 214 + ((__u64)q_id << UBLK_QID_OFF | (__u64)tag << UBLK_TAG_OFF); 215 + } 216 + 211 217 static inline int is_target_io(__u64 user_data) 212 218 { 213 219 return (user_data & (1ULL << 63)) != 0; ··· 409 403 static inline bool ublk_queue_auto_zc_fallback(const struct ublk_queue *q) 410 404 { 411 405 return !!(q->flags & UBLKS_Q_AUTO_BUF_REG_FALLBACK); 406 + } 407 + 408 + static inline bool ublk_queue_use_user_copy(const struct ublk_queue *q) 409 + { 410 + return !!(q->flags & UBLK_F_USER_COPY); 412 411 } 413 412 414 413 static inline int ublk_queue_no_buf(const struct ublk_queue *q)
+1 -1
tools/testing/selftests/ublk/stripe.c
··· 134 134 struct stripe_array *s = alloc_stripe_array(conf, iod); 135 135 struct ublk_io *io = ublk_get_io(q, tag); 136 136 int i, extra = zc ? 2 : 0; 137 - void *base = (zc | auto_zc) ? NULL : (void *)iod->addr; 137 + void *base = io->buf_addr; 138 138 139 139 io->private_data = s; 140 140 calculate_stripe_array(conf, iod, s, base);