Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'block-6.19-20251218' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull block fixes from Jens Axboe:

- ublk selftests for missing coverage

- two fixes for the block integrity code

- fix for the newly added newly added PR read keys ioctl, limiting the
memory that can be allocated

- work around for a deadlock that can occur with ublk, where partition
scanning ends up recursing back into file closure, which needs the
same mutex grabbed. Not the prettiest thing in the world, but an
acceptable work-around until we can eliminate the reliance on
disk->open_mutex for this

- fix for a race between enabling writeback throttling and new IO
submissions

- move a bit of bio flag handling code. No changes, but needed for a
patchset for a future kernel

- fix for an init time id leak failure in rnbd

- loop/zloop state check fix

* tag 'block-6.19-20251218' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
block: validate interval_exp integrity limit
block: validate pi_offset integrity limit
block: rnbd-clt: Fix leaked ID in init_dev()
ublk: fix deadlock when reading partition table
block: add allocation size check in blkdev_pr_read_keys()
Documentation: admin-guide: blockdev: replace zone_capacity with zone_capacity_mb when creating devices
zloop: use READ_ONCE() to read lo->lo_state in queue_rq path
loop: use READ_ONCE() to read lo->lo_state without locking
block: fix race between wbt_enable_default and IO submission
selftests: ublk: add user copy test cases
selftests: ublk: add support for user copy to kublk
selftests: ublk: forbid multiple data copy modes
selftests: ublk: don't share backing files between ublk servers
selftests: ublk: use auto_zc for PER_IO_DAEMON tests in stress_04
selftests: ublk: fix fio arguments in run_io_and_recover()
selftests: ublk: remove unused ios map in seq_io.bt
selftests: ublk: correct last_rw map type in seq_io.bt
selftests: ublk: fix overflow in ublk_queue_auto_zc_fallback()
block: move around bio flagging helpers

+450 -91
+1 -1
Documentation/admin-guide/blockdev/zoned_loop.rst
··· 134 134 135 135 $ modprobe zloop 136 136 $ mkdir -p /var/local/zloop/0 137 - $ echo "add capacity_mb=2048,zone_size_mb=64,zone_capacity=63MB" > /dev/zloop-control 137 + $ echo "add capacity_mb=2048,zone_size_mb=64,zone_capacity_mb=63" > /dev/zloop-control 138 138 139 139 For the device created (/dev/zloop0), the zone backing files are all created 140 140 under the default base directory (/var/local/zloop)::
+1 -1
block/bfq-iosched.c
··· 7181 7181 7182 7182 blk_stat_disable_accounting(bfqd->queue); 7183 7183 blk_queue_flag_clear(QUEUE_FLAG_DISABLE_WBT_DEF, bfqd->queue); 7184 - set_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT, &e->flags); 7184 + wbt_enable_default(bfqd->queue->disk); 7185 7185 7186 7186 kfree(bfqd); 7187 7187 }
+9 -5
block/blk-settings.c
··· 161 161 return -EINVAL; 162 162 } 163 163 164 - if (bi->pi_tuple_size > bi->metadata_size) { 165 - pr_warn("pi_tuple_size (%u) exceeds metadata_size (%u)\n", 166 - bi->pi_tuple_size, 167 - bi->metadata_size); 164 + if (bi->pi_offset + bi->pi_tuple_size > bi->metadata_size) { 165 + pr_warn("pi_offset (%u) + pi_tuple_size (%u) exceeds metadata_size (%u)\n", 166 + bi->pi_offset, bi->pi_tuple_size, bi->metadata_size); 168 167 return -EINVAL; 169 168 } 170 169 ··· 193 194 break; 194 195 } 195 196 196 - if (!bi->interval_exp) 197 + if (!bi->interval_exp) { 197 198 bi->interval_exp = ilog2(lim->logical_block_size); 199 + } else if (bi->interval_exp < SECTOR_SHIFT || 200 + bi->interval_exp > ilog2(lim->logical_block_size)) { 201 + pr_warn("invalid interval_exp %u\n", bi->interval_exp); 202 + return -EINVAL; 203 + } 198 204 199 205 /* 200 206 * The PI generation / validation helpers do not expect intervals to
+1 -1
block/blk-sysfs.c
··· 932 932 elevator_set_default(q); 933 933 934 934 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); 935 - wbt_enable_default(disk); 935 + wbt_init_enable_default(disk); 936 936 937 937 /* Now everything is ready and send out KOBJ_ADD uevent */ 938 938 kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
+16 -4
block/blk-wbt.c
··· 699 699 /* 700 700 * Enable wbt if defaults are configured that way 701 701 */ 702 - void wbt_enable_default(struct gendisk *disk) 702 + static bool __wbt_enable_default(struct gendisk *disk) 703 703 { 704 704 struct request_queue *q = disk->queue; 705 705 struct rq_qos *rqos; ··· 716 716 if (enable && RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT) 717 717 RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT; 718 718 mutex_unlock(&disk->rqos_state_mutex); 719 - return; 719 + return false; 720 720 } 721 721 mutex_unlock(&disk->rqos_state_mutex); 722 722 723 723 /* Queue not registered? Maybe shutting down... */ 724 724 if (!blk_queue_registered(q)) 725 - return; 725 + return false; 726 726 727 727 if (queue_is_mq(q) && enable) 728 - wbt_init(disk); 728 + return true; 729 + return false; 730 + } 731 + 732 + void wbt_enable_default(struct gendisk *disk) 733 + { 734 + __wbt_enable_default(disk); 729 735 } 730 736 EXPORT_SYMBOL_GPL(wbt_enable_default); 737 + 738 + void wbt_init_enable_default(struct gendisk *disk) 739 + { 740 + if (__wbt_enable_default(disk)) 741 + WARN_ON_ONCE(wbt_init(disk)); 742 + } 731 743 732 744 u64 wbt_default_latency_nsec(struct request_queue *q) 733 745 {
+5
block/blk-wbt.h
··· 5 5 #ifdef CONFIG_BLK_WBT 6 6 7 7 int wbt_init(struct gendisk *disk); 8 + void wbt_init_enable_default(struct gendisk *disk); 8 9 void wbt_disable_default(struct gendisk *disk); 9 10 void wbt_enable_default(struct gendisk *disk); 10 11 ··· 16 15 u64 wbt_default_latency_nsec(struct request_queue *); 17 16 18 17 #else 18 + 19 + static inline void wbt_init_enable_default(struct gendisk *disk) 20 + { 21 + } 19 22 20 23 static inline void wbt_disable_default(struct gendisk *disk) 21 24 {
-4
block/elevator.c
··· 633 633 .et = ctx->old->et, 634 634 .data = ctx->old->elevator_data 635 635 }; 636 - bool enable_wbt = test_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT, 637 - &ctx->old->flags); 638 636 639 637 elv_unregister_queue(q, ctx->old); 640 638 blk_mq_free_sched_res(&res, ctx->old->type, q->tag_set); 641 639 kobject_put(&ctx->old->kobj); 642 - if (enable_wbt) 643 - wbt_enable_default(q->disk); 644 640 } 645 641 if (ctx->new) { 646 642 ret = elv_register_queue(q, ctx->new, !ctx->no_uevent);
-1
block/elevator.h
··· 156 156 157 157 #define ELEVATOR_FLAG_REGISTERED 0 158 158 #define ELEVATOR_FLAG_DYING 1 159 - #define ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT 2 160 159 161 160 /* 162 161 * block elevator interface
+5 -4
block/ioctl.c
··· 442 442 if (copy_from_user(&read_keys, arg, sizeof(read_keys))) 443 443 return -EFAULT; 444 444 445 - keys_info_len = struct_size(keys_info, keys, read_keys.num_keys); 446 - if (keys_info_len == SIZE_MAX) 445 + if (read_keys.num_keys > PR_KEYS_MAX) 447 446 return -EINVAL; 448 447 449 - keys_info = kzalloc(keys_info_len, GFP_KERNEL); 448 + keys_info_len = struct_size(keys_info, keys, read_keys.num_keys); 449 + 450 + keys_info = kvzalloc(keys_info_len, GFP_KERNEL); 450 451 if (!keys_info) 451 452 return -ENOMEM; 452 453 ··· 474 473 if (copy_to_user(arg, &read_keys, sizeof(read_keys))) 475 474 ret = -EFAULT; 476 475 out: 477 - kfree(keys_info); 476 + kvfree(keys_info); 478 477 return ret; 479 478 } 480 479
+13 -9
drivers/block/loop.c
··· 1082 1082 /* Order wrt reading lo_state in loop_validate_file(). */ 1083 1083 wmb(); 1084 1084 1085 - lo->lo_state = Lo_bound; 1085 + WRITE_ONCE(lo->lo_state, Lo_bound); 1086 1086 if (part_shift) 1087 1087 lo->lo_flags |= LO_FLAGS_PARTSCAN; 1088 1088 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; ··· 1179 1179 if (!part_shift) 1180 1180 set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); 1181 1181 mutex_lock(&lo->lo_mutex); 1182 - lo->lo_state = Lo_unbound; 1182 + WRITE_ONCE(lo->lo_state, Lo_unbound); 1183 1183 mutex_unlock(&lo->lo_mutex); 1184 1184 1185 1185 /* ··· 1218 1218 1219 1219 lo->lo_flags |= LO_FLAGS_AUTOCLEAR; 1220 1220 if (disk_openers(lo->lo_disk) == 1) 1221 - lo->lo_state = Lo_rundown; 1221 + WRITE_ONCE(lo->lo_state, Lo_rundown); 1222 1222 loop_global_unlock(lo, true); 1223 1223 1224 1224 return 0; ··· 1743 1743 1744 1744 mutex_lock(&lo->lo_mutex); 1745 1745 if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR)) 1746 - lo->lo_state = Lo_rundown; 1746 + WRITE_ONCE(lo->lo_state, Lo_rundown); 1747 1747 1748 1748 need_clear = (lo->lo_state == Lo_rundown); 1749 1749 mutex_unlock(&lo->lo_mutex); ··· 1858 1858 1859 1859 blk_mq_start_request(rq); 1860 1860 1861 - if (lo->lo_state != Lo_bound) 1861 + if (data_race(READ_ONCE(lo->lo_state)) != Lo_bound) 1862 1862 return BLK_STS_IOERR; 1863 1863 1864 1864 switch (req_op(rq)) { ··· 2016 2016 lo->worker_tree = RB_ROOT; 2017 2017 INIT_LIST_HEAD(&lo->idle_worker_list); 2018 2018 timer_setup(&lo->timer, loop_free_idle_workers_timer, TIMER_DEFERRABLE); 2019 - lo->lo_state = Lo_unbound; 2019 + WRITE_ONCE(lo->lo_state, Lo_unbound); 2020 2020 2021 2021 err = mutex_lock_killable(&loop_ctl_mutex); 2022 2022 if (err) ··· 2174 2174 goto mark_visible; 2175 2175 } 2176 2176 /* Mark this loop device as no more bound, but not quite unbound yet */ 2177 - lo->lo_state = Lo_deleting; 2177 + WRITE_ONCE(lo->lo_state, Lo_deleting); 2178 2178 mutex_unlock(&lo->lo_mutex); 2179 2179 2180 2180 loop_remove(lo); ··· 2197 2197 if (ret) 2198 2198 return ret; 2199 2199 idr_for_each_entry(&loop_index_idr, lo, id) { 2200 - /* Hitting a race results in creating a new loop device which is harmless. */ 2201 - if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound) 2200 + /* 2201 + * Hitting a race results in creating a new loop device 2202 + * which is harmless. 2203 + */ 2204 + if (lo->idr_visible && 2205 + data_race(READ_ONCE(lo->lo_state)) == Lo_unbound) 2202 2206 goto found; 2203 2207 } 2204 2208 mutex_unlock(&loop_ctl_mutex);
+8 -5
drivers/block/rnbd/rnbd-clt.c
··· 1423 1423 goto out_alloc; 1424 1424 } 1425 1425 1426 - ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1, 1427 - GFP_KERNEL); 1428 - if (ret < 0) { 1426 + dev->clt_device_id = ida_alloc_max(&index_ida, 1427 + (1 << (MINORBITS - RNBD_PART_BITS)) - 1, 1428 + GFP_KERNEL); 1429 + if (dev->clt_device_id < 0) { 1430 + ret = dev->clt_device_id; 1429 1431 pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n", 1430 1432 pathname, sess->sessname, ret); 1431 1433 goto out_queues; ··· 1436 1434 dev->pathname = kstrdup(pathname, GFP_KERNEL); 1437 1435 if (!dev->pathname) { 1438 1436 ret = -ENOMEM; 1439 - goto out_queues; 1437 + goto out_ida; 1440 1438 } 1441 1439 1442 - dev->clt_device_id = ret; 1443 1440 dev->sess = sess; 1444 1441 dev->access_mode = access_mode; 1445 1442 dev->nr_poll_queues = nr_poll_queues; ··· 1454 1453 1455 1454 return dev; 1456 1455 1456 + out_ida: 1457 + ida_free(&index_ida, dev->clt_device_id); 1457 1458 out_queues: 1458 1459 kfree(dev->hw_queues); 1459 1460 out_alloc:
+28 -4
drivers/block/ublk_drv.c
··· 1080 1080 return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu); 1081 1081 } 1082 1082 1083 + static void ublk_end_request(struct request *req, blk_status_t error) 1084 + { 1085 + local_bh_disable(); 1086 + blk_mq_end_request(req, error); 1087 + local_bh_enable(); 1088 + } 1089 + 1083 1090 /* todo: handle partial completion */ 1084 1091 static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io, 1085 1092 bool need_map) 1086 1093 { 1087 1094 unsigned int unmapped_bytes; 1088 1095 blk_status_t res = BLK_STS_OK; 1096 + bool requeue; 1089 1097 1090 1098 /* failed read IO if nothing is read */ 1091 1099 if (!io->res && req_op(req) == REQ_OP_READ) ··· 1125 1117 if (unlikely(unmapped_bytes < io->res)) 1126 1118 io->res = unmapped_bytes; 1127 1119 1128 - if (blk_update_request(req, BLK_STS_OK, io->res)) 1120 + /* 1121 + * Run bio->bi_end_io() with softirqs disabled. If the final fput 1122 + * happens off this path, then that will prevent ublk's blkdev_release() 1123 + * from being called on current's task work, see fput() implementation. 1124 + * 1125 + * Otherwise, ublk server may not provide forward progress in case of 1126 + * reading the partition table from bdev_open() with disk->open_mutex 1127 + * held, and causes dead lock as we could already be holding 1128 + * disk->open_mutex here. 1129 + * 1130 + * Preferably we would not be doing IO with a mutex held that is also 1131 + * used for release, but this work-around will suffice for now. 1132 + */ 1133 + local_bh_disable(); 1134 + requeue = blk_update_request(req, BLK_STS_OK, io->res); 1135 + local_bh_enable(); 1136 + if (requeue) 1129 1137 blk_mq_requeue_request(req, true); 1130 1138 else if (likely(!blk_should_fake_timeout(req->q))) 1131 1139 __blk_mq_end_request(req, BLK_STS_OK); 1132 1140 1133 1141 return; 1134 1142 exit: 1135 - blk_mq_end_request(req, res); 1143 + ublk_end_request(req, res); 1136 1144 } 1137 1145 1138 1146 static struct io_uring_cmd *__ublk_prep_compl_io_cmd(struct ublk_io *io, ··· 1188 1164 if (ublk_nosrv_dev_should_queue_io(ubq->dev)) 1189 1165 blk_mq_requeue_request(rq, false); 1190 1166 else 1191 - blk_mq_end_request(rq, BLK_STS_IOERR); 1167 + ublk_end_request(rq, BLK_STS_IOERR); 1192 1168 } 1193 1169 1194 1170 static void ··· 1233 1209 ublk_auto_buf_reg_fallback(ubq, req->tag); 1234 1210 return AUTO_BUF_REG_FALLBACK; 1235 1211 } 1236 - blk_mq_end_request(req, BLK_STS_IOERR); 1212 + ublk_end_request(req, BLK_STS_IOERR); 1237 1213 return AUTO_BUF_REG_FAIL; 1238 1214 } 1239 1215
+4 -4
drivers/block/zloop.c
··· 697 697 struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq); 698 698 struct zloop_device *zlo = rq->q->queuedata; 699 699 700 - if (zlo->state == Zlo_deleting) 700 + if (data_race(READ_ONCE(zlo->state)) == Zlo_deleting) 701 701 return BLK_STS_IOERR; 702 702 703 703 /* ··· 1002 1002 ret = -ENOMEM; 1003 1003 goto out; 1004 1004 } 1005 - zlo->state = Zlo_creating; 1005 + WRITE_ONCE(zlo->state, Zlo_creating); 1006 1006 1007 1007 ret = mutex_lock_killable(&zloop_ctl_mutex); 1008 1008 if (ret) ··· 1113 1113 } 1114 1114 1115 1115 mutex_lock(&zloop_ctl_mutex); 1116 - zlo->state = Zlo_live; 1116 + WRITE_ONCE(zlo->state, Zlo_live); 1117 1117 mutex_unlock(&zloop_ctl_mutex); 1118 1118 1119 1119 pr_info("zloop: device %d, %u zones of %llu MiB, %u B block size\n", ··· 1177 1177 ret = -EINVAL; 1178 1178 } else { 1179 1179 idr_remove(&zloop_index_idr, zlo->id); 1180 - zlo->state = Zlo_deleting; 1180 + WRITE_ONCE(zlo->state, Zlo_deleting); 1181 1181 } 1182 1182 1183 1183 mutex_unlock(&zloop_ctl_mutex);
+15 -15
include/linux/bio.h
··· 46 46 #define bio_data_dir(bio) \ 47 47 (op_is_write(bio_op(bio)) ? WRITE : READ) 48 48 49 + static inline bool bio_flagged(const struct bio *bio, unsigned int bit) 50 + { 51 + return bio->bi_flags & (1U << bit); 52 + } 53 + 54 + static inline void bio_set_flag(struct bio *bio, unsigned int bit) 55 + { 56 + bio->bi_flags |= (1U << bit); 57 + } 58 + 59 + static inline void bio_clear_flag(struct bio *bio, unsigned int bit) 60 + { 61 + bio->bi_flags &= ~(1U << bit); 62 + } 63 + 49 64 /* 50 65 * Check whether this bio carries any data or not. A NULL bio is allowed. 51 66 */ ··· 238 223 smp_mb(); 239 224 } 240 225 atomic_set(&bio->__bi_cnt, count); 241 - } 242 - 243 - static inline bool bio_flagged(struct bio *bio, unsigned int bit) 244 - { 245 - return bio->bi_flags & (1U << bit); 246 - } 247 - 248 - static inline void bio_set_flag(struct bio *bio, unsigned int bit) 249 - { 250 - bio->bi_flags |= (1U << bit); 251 - } 252 - 253 - static inline void bio_clear_flag(struct bio *bio, unsigned int bit) 254 - { 255 - bio->bi_flags &= ~(1U << bit); 256 226 } 257 227 258 228 static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
+2
include/uapi/linux/pr.h
··· 79 79 #define IOC_PR_READ_KEYS _IOWR('p', 206, struct pr_read_keys) 80 80 #define IOC_PR_READ_RESERVATION _IOR('p', 207, struct pr_read_reservation) 81 81 82 + #define PR_KEYS_MAX (1u << 16) 83 + 82 84 #endif /* _UAPI_PR_H */
+8
tools/testing/selftests/ublk/Makefile
··· 21 21 TEST_PROGS += test_generic_11.sh 22 22 TEST_PROGS += test_generic_12.sh 23 23 TEST_PROGS += test_generic_13.sh 24 + TEST_PROGS += test_generic_14.sh 24 25 25 26 TEST_PROGS += test_null_01.sh 26 27 TEST_PROGS += test_null_02.sh 28 + TEST_PROGS += test_null_03.sh 27 29 TEST_PROGS += test_loop_01.sh 28 30 TEST_PROGS += test_loop_02.sh 29 31 TEST_PROGS += test_loop_03.sh 30 32 TEST_PROGS += test_loop_04.sh 31 33 TEST_PROGS += test_loop_05.sh 34 + TEST_PROGS += test_loop_06.sh 35 + TEST_PROGS += test_loop_07.sh 32 36 TEST_PROGS += test_stripe_01.sh 33 37 TEST_PROGS += test_stripe_02.sh 34 38 TEST_PROGS += test_stripe_03.sh 35 39 TEST_PROGS += test_stripe_04.sh 40 + TEST_PROGS += test_stripe_05.sh 41 + TEST_PROGS += test_stripe_06.sh 36 42 37 43 TEST_PROGS += test_stress_01.sh 38 44 TEST_PROGS += test_stress_02.sh 39 45 TEST_PROGS += test_stress_03.sh 40 46 TEST_PROGS += test_stress_04.sh 41 47 TEST_PROGS += test_stress_05.sh 48 + TEST_PROGS += test_stress_06.sh 49 + TEST_PROGS += test_stress_07.sh 42 50 43 51 TEST_GEN_PROGS_EXTENDED = kublk 44 52
+4 -3
tools/testing/selftests/ublk/file_backed.c
··· 34 34 unsigned zc = ublk_queue_use_zc(q); 35 35 unsigned auto_zc = ublk_queue_use_auto_zc(q); 36 36 enum io_uring_op op = ublk_to_uring_op(iod, zc | auto_zc); 37 + struct ublk_io *io = ublk_get_io(q, tag); 37 38 struct io_uring_sqe *sqe[3]; 38 - void *addr = (zc | auto_zc) ? NULL : (void *)iod->addr; 39 + void *addr = io->buf_addr; 39 40 40 41 if (!zc || auto_zc) { 41 42 ublk_io_alloc_sqes(t, sqe, 1); ··· 57 56 58 57 ublk_io_alloc_sqes(t, sqe, 3); 59 58 60 - io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index); 59 + io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, io->buf_index); 61 60 sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK; 62 61 sqe[0]->user_data = build_user_data(tag, 63 62 ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1); ··· 69 68 sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK; 70 69 sqe[1]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1); 71 70 72 - io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index); 71 + io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, io->buf_index); 73 72 sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1); 74 73 75 74 return 2;
+58 -6
tools/testing/selftests/ublk/kublk.c
··· 596 596 sqe->addr = ublk_auto_buf_reg_to_sqe_addr(&buf); 597 597 } 598 598 599 + /* Copy in pieces to test the buffer offset logic */ 600 + #define UBLK_USER_COPY_LEN 2048 601 + 602 + static void ublk_user_copy(const struct ublk_io *io, __u8 match_ublk_op) 603 + { 604 + const struct ublk_queue *q = ublk_io_to_queue(io); 605 + const struct ublksrv_io_desc *iod = ublk_get_iod(q, io->tag); 606 + __u64 off = ublk_user_copy_offset(q->q_id, io->tag); 607 + __u8 ublk_op = ublksrv_get_op(iod); 608 + __u32 len = iod->nr_sectors << 9; 609 + void *addr = io->buf_addr; 610 + 611 + if (ublk_op != match_ublk_op) 612 + return; 613 + 614 + while (len) { 615 + __u32 copy_len = min(len, UBLK_USER_COPY_LEN); 616 + ssize_t copied; 617 + 618 + if (ublk_op == UBLK_IO_OP_WRITE) 619 + copied = pread(q->ublk_fd, addr, copy_len, off); 620 + else if (ublk_op == UBLK_IO_OP_READ) 621 + copied = pwrite(q->ublk_fd, addr, copy_len, off); 622 + else 623 + assert(0); 624 + assert(copied == (ssize_t)copy_len); 625 + addr += copy_len; 626 + off += copy_len; 627 + len -= copy_len; 628 + } 629 + } 630 + 599 631 int ublk_queue_io_cmd(struct ublk_thread *t, struct ublk_io *io) 600 632 { 601 633 struct ublk_queue *q = ublk_io_to_queue(io); ··· 650 618 651 619 if (io->flags & UBLKS_IO_NEED_GET_DATA) 652 620 cmd_op = UBLK_U_IO_NEED_GET_DATA; 653 - else if (io->flags & UBLKS_IO_NEED_COMMIT_RQ_COMP) 621 + else if (io->flags & UBLKS_IO_NEED_COMMIT_RQ_COMP) { 622 + if (ublk_queue_use_user_copy(q)) 623 + ublk_user_copy(io, UBLK_IO_OP_READ); 624 + 654 625 cmd_op = UBLK_U_IO_COMMIT_AND_FETCH_REQ; 655 - else if (io->flags & UBLKS_IO_NEED_FETCH_RQ) 626 + } else if (io->flags & UBLKS_IO_NEED_FETCH_RQ) 656 627 cmd_op = UBLK_U_IO_FETCH_REQ; 657 628 658 629 if (io_uring_sq_space_left(&t->ring) < 1) ··· 684 649 sqe[0]->rw_flags = 0; 685 650 cmd->tag = io->tag; 686 651 cmd->q_id = q->q_id; 687 - if (!ublk_queue_no_buf(q)) 652 + if (!ublk_queue_no_buf(q) && !ublk_queue_use_user_copy(q)) 688 653 cmd->addr = (__u64) (uintptr_t) io->buf_addr; 689 654 else 690 655 cmd->addr = 0; ··· 786 751 787 752 if (cqe->res == UBLK_IO_RES_OK) { 788 753 assert(tag < q->q_depth); 754 + 755 + if (ublk_queue_use_user_copy(q)) 756 + ublk_user_copy(io, UBLK_IO_OP_WRITE); 757 + 789 758 if (q->tgt_ops->queue_io) 790 759 q->tgt_ops->queue_io(t, q, tag); 791 760 } else if (cqe->res == UBLK_IO_RES_NEED_GET_DATA) { ··· 1546 1507 1547 1508 printf("%s %s -t [null|loop|stripe|fault_inject] [-q nr_queues] [-d depth] [-n dev_id]\n", 1548 1509 exe, recovery ? "recover" : "add"); 1549 - printf("\t[--foreground] [--quiet] [-z] [--auto_zc] [--auto_zc_fallback] [--debug_mask mask] [-r 0|1 ] [-g]\n"); 1510 + printf("\t[--foreground] [--quiet] [-z] [--auto_zc] [--auto_zc_fallback] [--debug_mask mask] [-r 0|1] [-g] [-u]\n"); 1550 1511 printf("\t[-e 0|1 ] [-i 0|1] [--no_ublk_fixed_fd]\n"); 1551 1512 printf("\t[--nthreads threads] [--per_io_tasks]\n"); 1552 1513 printf("\t[target options] [backfile1] [backfile2] ...\n"); ··· 1607 1568 { "get_data", 1, NULL, 'g'}, 1608 1569 { "auto_zc", 0, NULL, 0 }, 1609 1570 { "auto_zc_fallback", 0, NULL, 0 }, 1571 + { "user_copy", 0, NULL, 'u'}, 1610 1572 { "size", 1, NULL, 's'}, 1611 1573 { "nthreads", 1, NULL, 0 }, 1612 1574 { "per_io_tasks", 0, NULL, 0 }, ··· 1633 1593 1634 1594 opterr = 0; 1635 1595 optind = 2; 1636 - while ((opt = getopt_long(argc, argv, "t:n:d:q:r:e:i:s:gaz", 1596 + while ((opt = getopt_long(argc, argv, "t:n:d:q:r:e:i:s:gazu", 1637 1597 longopts, &option_idx)) != -1) { 1638 1598 switch (opt) { 1639 1599 case 'a': ··· 1653 1613 ctx.queue_depth = strtol(optarg, NULL, 10); 1654 1614 break; 1655 1615 case 'z': 1656 - ctx.flags |= UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_USER_COPY; 1616 + ctx.flags |= UBLK_F_SUPPORT_ZERO_COPY; 1657 1617 break; 1658 1618 case 'r': 1659 1619 value = strtol(optarg, NULL, 10); ··· 1672 1632 break; 1673 1633 case 'g': 1674 1634 ctx.flags |= UBLK_F_NEED_GET_DATA; 1635 + break; 1636 + case 'u': 1637 + ctx.flags |= UBLK_F_USER_COPY; 1675 1638 break; 1676 1639 case 's': 1677 1640 ctx.size = strtoull(optarg, NULL, 10); ··· 1726 1683 ublk_err("%s: auto_zc_fallback is set but neither " 1727 1684 "F_AUTO_BUF_REG nor F_SUPPORT_ZERO_COPY is enabled\n", 1728 1685 __func__); 1686 + return -EINVAL; 1687 + } 1688 + 1689 + if (!!(ctx.flags & UBLK_F_NEED_GET_DATA) + 1690 + !!(ctx.flags & UBLK_F_USER_COPY) + 1691 + (ctx.flags & UBLK_F_SUPPORT_ZERO_COPY && !ctx.auto_zc_fallback) + 1692 + (ctx.flags & UBLK_F_AUTO_BUF_REG && !ctx.auto_zc_fallback) + 1693 + ctx.auto_zc_fallback > 1) { 1694 + fprintf(stderr, "too many data copy modes specified\n"); 1729 1695 return -EINVAL; 1730 1696 } 1731 1697
+17 -6
tools/testing/selftests/ublk/kublk.h
··· 208 208 return !!(iod->op_flags & UBLK_IO_F_NEED_REG_BUF); 209 209 } 210 210 211 + static inline __u64 ublk_user_copy_offset(unsigned q_id, unsigned tag) 212 + { 213 + return UBLKSRV_IO_BUF_OFFSET + 214 + ((__u64)q_id << UBLK_QID_OFF | (__u64)tag << UBLK_TAG_OFF); 215 + } 216 + 211 217 static inline int is_target_io(__u64 user_data) 212 218 { 213 219 return (user_data & (1ULL << 63)) != 0; ··· 396 390 return --io->tgt_ios == 0; 397 391 } 398 392 399 - static inline int ublk_queue_use_zc(const struct ublk_queue *q) 393 + static inline bool ublk_queue_use_zc(const struct ublk_queue *q) 400 394 { 401 - return q->flags & UBLK_F_SUPPORT_ZERO_COPY; 395 + return !!(q->flags & UBLK_F_SUPPORT_ZERO_COPY); 402 396 } 403 397 404 - static inline int ublk_queue_use_auto_zc(const struct ublk_queue *q) 398 + static inline bool ublk_queue_use_auto_zc(const struct ublk_queue *q) 405 399 { 406 - return q->flags & UBLK_F_AUTO_BUF_REG; 400 + return !!(q->flags & UBLK_F_AUTO_BUF_REG); 407 401 } 408 402 409 - static inline int ublk_queue_auto_zc_fallback(const struct ublk_queue *q) 403 + static inline bool ublk_queue_auto_zc_fallback(const struct ublk_queue *q) 410 404 { 411 - return q->flags & UBLKS_Q_AUTO_BUF_REG_FALLBACK; 405 + return !!(q->flags & UBLKS_Q_AUTO_BUF_REG_FALLBACK); 406 + } 407 + 408 + static inline bool ublk_queue_use_user_copy(const struct ublk_queue *q) 409 + { 410 + return !!(q->flags & UBLK_F_USER_COPY); 412 411 } 413 412 414 413 static inline int ublk_queue_no_buf(const struct ublk_queue *q)
+1 -1
tools/testing/selftests/ublk/stripe.c
··· 134 134 struct stripe_array *s = alloc_stripe_array(conf, iod); 135 135 struct ublk_io *io = ublk_get_io(q, tag); 136 136 int i, extra = zc ? 2 : 0; 137 - void *base = (zc | auto_zc) ? NULL : (void *)iod->addr; 137 + void *base = io->buf_addr; 138 138 139 139 io->private_data = s; 140 140 calculate_stripe_array(conf, iod, s, base);
+3 -2
tools/testing/selftests/ublk/test_common.sh
··· 333 333 334 334 run_io_and_recover() 335 335 { 336 - local action=$1 336 + local size=$1 337 + local action=$2 337 338 local state 338 339 local dev_id 339 340 340 - shift 1 341 + shift 2 341 342 dev_id=$(_add_ublk_dev "$@") 342 343 _check_add_dev "$TID" $? 343 344
+1 -1
tools/testing/selftests/ublk/test_generic_04.sh
··· 8 8 9 9 ublk_run_recover_test() 10 10 { 11 - run_io_and_recover "kill_daemon" "$@" 11 + run_io_and_recover 256M "kill_daemon" "$@" 12 12 ERR_CODE=$? 13 13 if [ ${ERR_CODE} -ne 0 ]; then 14 14 echo "$TID failure: $*"
+1 -1
tools/testing/selftests/ublk/test_generic_05.sh
··· 8 8 9 9 ublk_run_recover_test() 10 10 { 11 - run_io_and_recover "kill_daemon" "$@" 11 + run_io_and_recover 256M "kill_daemon" "$@" 12 12 ERR_CODE=$? 13 13 if [ ${ERR_CODE} -ne 0 ]; then 14 14 echo "$TID failure: $*"
+1 -1
tools/testing/selftests/ublk/test_generic_11.sh
··· 8 8 9 9 ublk_run_quiesce_recover() 10 10 { 11 - run_io_and_recover "quiesce_dev" "$@" 11 + run_io_and_recover 256M "quiesce_dev" "$@" 12 12 ERR_CODE=$? 13 13 if [ ${ERR_CODE} -ne 0 ]; then 14 14 echo "$TID failure: $*"
+40
tools/testing/selftests/ublk/test_generic_14.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + . "$(cd "$(dirname "$0")" && pwd)"/test_common.sh 5 + 6 + TID="generic_14" 7 + ERR_CODE=0 8 + 9 + ublk_run_recover_test() 10 + { 11 + run_io_and_recover 256M "kill_daemon" "$@" 12 + ERR_CODE=$? 13 + if [ ${ERR_CODE} -ne 0 ]; then 14 + echo "$TID failure: $*" 15 + _show_result $TID $ERR_CODE 16 + fi 17 + } 18 + 19 + if ! _have_program fio; then 20 + exit "$UBLK_SKIP_CODE" 21 + fi 22 + 23 + _prep_test "recover" "basic recover function verification (user copy)" 24 + 25 + _create_backfile 0 256M 26 + _create_backfile 1 128M 27 + _create_backfile 2 128M 28 + 29 + ublk_run_recover_test -t null -q 2 -r 1 -u & 30 + ublk_run_recover_test -t loop -q 2 -r 1 -u "${UBLK_BACKFILES[0]}" & 31 + ublk_run_recover_test -t stripe -q 2 -r 1 -u "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 32 + wait 33 + 34 + ublk_run_recover_test -t null -q 2 -r 1 -u -i 1 & 35 + ublk_run_recover_test -t loop -q 2 -r 1 -u -i 1 "${UBLK_BACKFILES[0]}" & 36 + ublk_run_recover_test -t stripe -q 2 -r 1 -u -i 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 37 + wait 38 + 39 + _cleanup_test "recover" 40 + _show_result $TID $ERR_CODE
+25
tools/testing/selftests/ublk/test_loop_06.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + . "$(cd "$(dirname "$0")" && pwd)"/test_common.sh 5 + 6 + TID="loop_06" 7 + ERR_CODE=0 8 + 9 + if ! _have_program fio; then 10 + exit "$UBLK_SKIP_CODE" 11 + fi 12 + 13 + _prep_test "loop" "write and verify over user copy" 14 + 15 + _create_backfile 0 256M 16 + dev_id=$(_add_ublk_dev -t loop -u "${UBLK_BACKFILES[0]}") 17 + _check_add_dev $TID $? 18 + 19 + # run fio over the ublk disk 20 + _run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M 21 + ERR_CODE=$? 22 + 23 + _cleanup_test "loop" 24 + 25 + _show_result $TID $ERR_CODE
+21
tools/testing/selftests/ublk/test_loop_07.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + . "$(cd "$(dirname "$0")" && pwd)"/test_common.sh 5 + 6 + TID="loop_07" 7 + ERR_CODE=0 8 + 9 + _prep_test "loop" "mkfs & mount & umount with user copy" 10 + 11 + _create_backfile 0 256M 12 + 13 + dev_id=$(_add_ublk_dev -t loop -u "${UBLK_BACKFILES[0]}") 14 + _check_add_dev $TID $? 15 + 16 + _mkfs_mount_test /dev/ublkb"${dev_id}" 17 + ERR_CODE=$? 18 + 19 + _cleanup_test "loop" 20 + 21 + _show_result $TID $ERR_CODE
+24
tools/testing/selftests/ublk/test_null_03.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + . "$(cd "$(dirname "$0")" && pwd)"/test_common.sh 5 + 6 + TID="null_03" 7 + ERR_CODE=0 8 + 9 + if ! _have_program fio; then 10 + exit "$UBLK_SKIP_CODE" 11 + fi 12 + 13 + _prep_test "null" "basic IO test with user copy" 14 + 15 + dev_id=$(_add_ublk_dev -t null -u) 16 + _check_add_dev $TID $? 17 + 18 + # run fio over the two disks 19 + fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio --rw=readwrite --iodepth=32 --size=256M > /dev/null 2>&1 20 + ERR_CODE=$? 21 + 22 + _cleanup_test "null" 23 + 24 + _show_result $TID $ERR_CODE
+7 -5
tools/testing/selftests/ublk/test_stress_04.sh
··· 31 31 ublk_io_and_kill_daemon 8G -t null -q 4 -z --no_ublk_fixed_fd & 32 32 ublk_io_and_kill_daemon 256M -t loop -q 4 -z --no_ublk_fixed_fd "${UBLK_BACKFILES[0]}" & 33 33 ublk_io_and_kill_daemon 256M -t stripe -q 4 -z "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 34 + wait 34 35 35 36 if _have_feature "AUTO_BUF_REG"; then 36 37 ublk_io_and_kill_daemon 8G -t null -q 4 --auto_zc & 37 38 ublk_io_and_kill_daemon 256M -t loop -q 4 --auto_zc "${UBLK_BACKFILES[0]}" & 38 39 ublk_io_and_kill_daemon 256M -t stripe -q 4 --auto_zc --no_ublk_fixed_fd "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 39 40 ublk_io_and_kill_daemon 8G -t null -q 4 -z --auto_zc --auto_zc_fallback & 41 + wait 40 42 fi 41 43 42 44 if _have_feature "PER_IO_DAEMON"; then 43 - ublk_io_and_kill_daemon 8G -t null -q 4 --nthreads 8 --per_io_tasks & 44 - ublk_io_and_kill_daemon 256M -t loop -q 4 --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[0]}" & 45 - ublk_io_and_kill_daemon 256M -t stripe -q 4 --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 46 - ublk_io_and_kill_daemon 8G -t null -q 4 --nthreads 8 --per_io_tasks & 45 + ublk_io_and_kill_daemon 8G -t null -q 4 --auto_zc --nthreads 8 --per_io_tasks & 46 + ublk_io_and_kill_daemon 256M -t loop -q 4 --auto_zc --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[0]}" & 47 + ublk_io_and_kill_daemon 256M -t stripe -q 4 --auto_zc --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 48 + ublk_io_and_kill_daemon 8G -t null -q 4 -z --auto_zc --auto_zc_fallback --nthreads 8 --per_io_tasks & 49 + wait 47 50 fi 48 - wait 49 51 50 52 _cleanup_test "stress" 51 53 _show_result $TID $ERR_CODE
+5 -5
tools/testing/selftests/ublk/test_stress_05.sh
··· 58 58 59 59 if _have_feature "ZERO_COPY"; then 60 60 for reissue in $(seq 0 1); do 61 - ublk_io_and_remove 8G -t null -q 4 -g -z -r 1 -i "$reissue" & 62 - ublk_io_and_remove 256M -t loop -q 4 -g -z -r 1 -i "$reissue" "${UBLK_BACKFILES[1]}" & 61 + ublk_io_and_remove 8G -t null -q 4 -z -r 1 -i "$reissue" & 62 + ublk_io_and_remove 256M -t loop -q 4 -z -r 1 -i "$reissue" "${UBLK_BACKFILES[1]}" & 63 63 wait 64 64 done 65 65 fi 66 66 67 67 if _have_feature "AUTO_BUF_REG"; then 68 68 for reissue in $(seq 0 1); do 69 - ublk_io_and_remove 8G -t null -q 4 -g --auto_zc -r 1 -i "$reissue" & 70 - ublk_io_and_remove 256M -t loop -q 4 -g --auto_zc -r 1 -i "$reissue" "${UBLK_BACKFILES[1]}" & 71 - ublk_io_and_remove 8G -t null -q 4 -g -z --auto_zc --auto_zc_fallback -r 1 -i "$reissue" & 69 + ublk_io_and_remove 8G -t null -q 4 --auto_zc -r 1 -i "$reissue" & 70 + ublk_io_and_remove 256M -t loop -q 4 --auto_zc -r 1 -i "$reissue" "${UBLK_BACKFILES[1]}" & 71 + ublk_io_and_remove 8G -t null -q 4 -z --auto_zc --auto_zc_fallback -r 1 -i "$reissue" & 72 72 wait 73 73 done 74 74 fi
+39
tools/testing/selftests/ublk/test_stress_06.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + . "$(cd "$(dirname "$0")" && pwd)"/test_common.sh 5 + TID="stress_06" 6 + ERR_CODE=0 7 + 8 + ublk_io_and_remove() 9 + { 10 + run_io_and_remove "$@" 11 + ERR_CODE=$? 12 + if [ ${ERR_CODE} -ne 0 ]; then 13 + echo "$TID failure: $*" 14 + _show_result $TID $ERR_CODE 15 + fi 16 + } 17 + 18 + if ! _have_program fio; then 19 + exit "$UBLK_SKIP_CODE" 20 + fi 21 + 22 + _prep_test "stress" "run IO and remove device (user copy)" 23 + 24 + _create_backfile 0 256M 25 + _create_backfile 1 128M 26 + _create_backfile 2 128M 27 + 28 + ublk_io_and_remove 8G -t null -q 4 -u & 29 + ublk_io_and_remove 256M -t loop -q 4 -u "${UBLK_BACKFILES[0]}" & 30 + ublk_io_and_remove 256M -t stripe -q 4 -u "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 31 + wait 32 + 33 + ublk_io_and_remove 8G -t null -q 4 -u --nthreads 8 --per_io_tasks & 34 + ublk_io_and_remove 256M -t loop -q 4 -u --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[0]}" & 35 + ublk_io_and_remove 256M -t stripe -q 4 -u --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 36 + wait 37 + 38 + _cleanup_test "stress" 39 + _show_result $TID $ERR_CODE
+39
tools/testing/selftests/ublk/test_stress_07.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + . "$(cd "$(dirname "$0")" && pwd)"/test_common.sh 5 + TID="stress_07" 6 + ERR_CODE=0 7 + 8 + ublk_io_and_kill_daemon() 9 + { 10 + run_io_and_kill_daemon "$@" 11 + ERR_CODE=$? 12 + if [ ${ERR_CODE} -ne 0 ]; then 13 + echo "$TID failure: $*" 14 + _show_result $TID $ERR_CODE 15 + fi 16 + } 17 + 18 + if ! _have_program fio; then 19 + exit "$UBLK_SKIP_CODE" 20 + fi 21 + 22 + _prep_test "stress" "run IO and kill ublk server (user copy)" 23 + 24 + _create_backfile 0 256M 25 + _create_backfile 1 128M 26 + _create_backfile 2 128M 27 + 28 + ublk_io_and_kill_daemon 8G -t null -q 4 -u --no_ublk_fixed_fd & 29 + ublk_io_and_kill_daemon 256M -t loop -q 4 -u --no_ublk_fixed_fd "${UBLK_BACKFILES[0]}" & 30 + ublk_io_and_kill_daemon 256M -t stripe -q 4 -u "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 31 + wait 32 + 33 + ublk_io_and_kill_daemon 8G -t null -q 4 -u --nthreads 8 --per_io_tasks & 34 + ublk_io_and_kill_daemon 256M -t loop -q 4 -u --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[0]}" & 35 + ublk_io_and_kill_daemon 256M -t stripe -q 4 -u --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 36 + wait 37 + 38 + _cleanup_test "stress" 39 + _show_result $TID $ERR_CODE
+26
tools/testing/selftests/ublk/test_stripe_05.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + . "$(cd "$(dirname "$0")" && pwd)"/test_common.sh 5 + 6 + TID="stripe_05" 7 + ERR_CODE=0 8 + 9 + if ! _have_program fio; then 10 + exit "$UBLK_SKIP_CODE" 11 + fi 12 + 13 + _prep_test "stripe" "write and verify test on user copy" 14 + 15 + _create_backfile 0 256M 16 + _create_backfile 1 256M 17 + 18 + dev_id=$(_add_ublk_dev -t stripe -q 2 -u "${UBLK_BACKFILES[0]}" "${UBLK_BACKFILES[1]}") 19 + _check_add_dev $TID $? 20 + 21 + # run fio over the ublk disk 22 + _run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=512M 23 + ERR_CODE=$? 24 + 25 + _cleanup_test "stripe" 26 + _show_result $TID $ERR_CODE
+21
tools/testing/selftests/ublk/test_stripe_06.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + . "$(cd "$(dirname "$0")" && pwd)"/test_common.sh 5 + 6 + TID="stripe_06" 7 + ERR_CODE=0 8 + 9 + _prep_test "stripe" "mkfs & mount & umount on user copy" 10 + 11 + _create_backfile 0 256M 12 + _create_backfile 1 256M 13 + 14 + dev_id=$(_add_ublk_dev -t stripe -u -q 2 "${UBLK_BACKFILES[0]}" "${UBLK_BACKFILES[1]}") 15 + _check_add_dev $TID $? 16 + 17 + _mkfs_mount_test /dev/ublkb"${dev_id}" 18 + ERR_CODE=$? 19 + 20 + _cleanup_test "stripe" 21 + _show_result $TID $ERR_CODE
+1 -2
tools/testing/selftests/ublk/trace/seq_io.bt
··· 4 4 $3: strlen($2) 5 5 */ 6 6 BEGIN { 7 - @last_rw[$1, str($2)] = 0; 7 + @last_rw[$1, str($2)] = (uint64)0; 8 8 } 9 9 tracepoint:block:block_rq_complete 10 10 { ··· 17 17 } 18 18 @last_rw[$dev, str($2)] = (args.sector + args.nr_sector); 19 19 } 20 - @ios = count(); 21 20 } 22 21 23 22 END {