Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: target: tcmu: Userspace must not complete queued commands

When tcmu queues a new command - no matter whether in command ring or in
qfull_queue - a cmd_id from IDR udev->commands is assigned to the command.

If userspace sends a wrong command completion containing the cmd_id of a
command on the qfull_queue, tcmu_handle_completions() finds the command in
the IDR and calls tcmu_handle_completion() for it. This might do some nasty
things because commands in qfull_queue do not have a valid dbi list.

To fix this bug, we no longer add queued commands to the idr. Instead the
cmd_id is assign when a command is written to the command ring.

Due to this change I had to adapt the source code at several places where
up to now an idr_for_each had been done.

[mkp: fix checkpatch warnings]

Link: https://lore.kernel.org/r/20200518164833.12775-1-bstroesser@ts.fujitsu.com
Acked-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Bodo Stroesser <bstroesser@ts.fujitsu.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Bodo Stroesser and committed by
Martin K. Petersen
61fb2482 5482d56b

+71 -83
+71 -83
drivers/target/target_core_user.c
··· 882 882 return command_size; 883 883 } 884 884 885 - static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, 886 - struct timer_list *timer) 885 + static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, 886 + struct timer_list *timer) 887 887 { 888 - struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 889 - int cmd_id; 890 - 891 - if (tcmu_cmd->cmd_id) 892 - goto setup_timer; 893 - 894 - cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); 895 - if (cmd_id < 0) { 896 - pr_err("tcmu: Could not allocate cmd id.\n"); 897 - return cmd_id; 898 - } 899 - tcmu_cmd->cmd_id = cmd_id; 900 - 901 - pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id, 902 - udev->name, tmo / MSEC_PER_SEC); 903 - 904 - setup_timer: 905 888 if (!tmo) 906 - return 0; 889 + return; 907 890 908 891 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); 909 892 if (!timer_pending(timer)) 910 893 mod_timer(timer, tcmu_cmd->deadline); 911 894 912 - return 0; 895 + pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd, 896 + tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC); 913 897 } 914 898 915 899 static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) 916 900 { 917 901 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 918 902 unsigned int tmo; 919 - int ret; 920 903 921 904 /* 922 905 * For backwards compat if qfull_time_out is not set use ··· 914 931 else 915 932 tmo = TCMU_TIME_OUT; 916 933 917 - ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); 918 - if (ret) 919 - return ret; 934 + tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); 920 935 921 936 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); 922 - pr_debug("adding cmd %u on dev %s to ring space wait queue\n", 923 - tcmu_cmd->cmd_id, udev->name); 937 + pr_debug("adding cmd %p on dev %s to ring space wait queue\n", 938 + tcmu_cmd, udev->name); 924 939 return 0; 925 940 } 926 941 ··· 940 959 struct tcmu_mailbox *mb; 941 960 struct tcmu_cmd_entry *entry; 942 961 struct iovec *iov; 943 - int iov_cnt, ret; 962 + int iov_cnt, cmd_id; 944 963 uint32_t cmd_head; 945 964 uint64_t cdb_off; 946 965 bool copy_to_data_area; ··· 1041 1060 } 1042 1061 entry->req.iov_bidi_cnt = iov_cnt; 1043 1062 1044 - ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, 1045 - &udev->cmd_timer); 1046 - if (ret) { 1047 - tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 1063 + cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); 1064 + if (cmd_id < 0) { 1065 + pr_err("tcmu: Could not allocate cmd id.\n"); 1048 1066 1067 + tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 1049 1068 *scsi_err = TCM_OUT_OF_RESOURCES; 1050 1069 return -1; 1051 1070 } 1071 + tcmu_cmd->cmd_id = cmd_id; 1072 + 1073 + pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id, 1074 + tcmu_cmd, udev->name); 1075 + 1076 + tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); 1077 + 1052 1078 entry->hdr.cmd_id = tcmu_cmd->cmd_id; 1053 1079 1054 1080 /* ··· 1267 1279 return handled; 1268 1280 } 1269 1281 1270 - static int tcmu_check_expired_cmd(int id, void *p, void *data) 1282 + static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) 1271 1283 { 1272 - struct tcmu_cmd *cmd = p; 1273 - struct tcmu_dev *udev = cmd->tcmu_dev; 1274 - u8 scsi_status; 1275 1284 struct se_cmd *se_cmd; 1276 - bool is_running; 1277 - 1278 - if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 1279 - return 0; 1280 1285 1281 1286 if (!time_after(jiffies, cmd->deadline)) 1282 - return 0; 1287 + return; 1283 1288 1284 - is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags); 1289 + set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 1290 + list_del_init(&cmd->queue_entry); 1285 1291 se_cmd = cmd->se_cmd; 1292 + cmd->se_cmd = NULL; 1286 1293 1287 - if (is_running) { 1288 - /* 1289 - * If cmd_time_out is disabled but qfull is set deadline 1290 - * will only reflect the qfull timeout. Ignore it. 1291 - */ 1292 - if (!udev->cmd_time_out) 1293 - return 0; 1294 + pr_debug("Timing out inflight cmd %u on dev %s.\n", 1295 + cmd->cmd_id, cmd->tcmu_dev->name); 1294 1296 1295 - set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 1296 - /* 1297 - * target_complete_cmd will translate this to LUN COMM FAILURE 1298 - */ 1299 - scsi_status = SAM_STAT_CHECK_CONDITION; 1300 - list_del_init(&cmd->queue_entry); 1301 - cmd->se_cmd = NULL; 1302 - } else { 1303 - list_del_init(&cmd->queue_entry); 1304 - idr_remove(&udev->commands, id); 1305 - tcmu_free_cmd(cmd); 1306 - scsi_status = SAM_STAT_TASK_SET_FULL; 1307 - } 1297 + target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION); 1298 + } 1308 1299 1309 - pr_debug("Timing out cmd %u on dev %s that is %s.\n", 1310 - id, udev->name, is_running ? "inflight" : "queued"); 1300 + static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd) 1301 + { 1302 + struct se_cmd *se_cmd; 1311 1303 1312 - target_complete_cmd(se_cmd, scsi_status); 1313 - return 0; 1304 + if (!time_after(jiffies, cmd->deadline)) 1305 + return; 1306 + 1307 + list_del_init(&cmd->queue_entry); 1308 + se_cmd = cmd->se_cmd; 1309 + tcmu_free_cmd(cmd); 1310 + 1311 + pr_debug("Timing out queued cmd %p on dev %s.\n", 1312 + cmd, cmd->tcmu_dev->name); 1313 + 1314 + target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL); 1314 1315 } 1315 1316 1316 1317 static void tcmu_device_timedout(struct tcmu_dev *udev) ··· 1384 1407 return &udev->se_dev; 1385 1408 } 1386 1409 1387 - static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) 1410 + static void run_qfull_queue(struct tcmu_dev *udev, bool fail) 1388 1411 { 1389 1412 struct tcmu_cmd *tcmu_cmd, *tmp_cmd; 1390 1413 LIST_HEAD(cmds); 1391 - bool drained = true; 1392 1414 sense_reason_t scsi_ret; 1393 1415 int ret; 1394 1416 1395 1417 if (list_empty(&udev->qfull_queue)) 1396 - return true; 1418 + return; 1397 1419 1398 1420 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); 1399 1421 ··· 1401 1425 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { 1402 1426 list_del_init(&tcmu_cmd->queue_entry); 1403 1427 1404 - pr_debug("removing cmd %u on dev %s from queue\n", 1405 - tcmu_cmd->cmd_id, udev->name); 1428 + pr_debug("removing cmd %p on dev %s from queue\n", 1429 + tcmu_cmd, udev->name); 1406 1430 1407 1431 if (fail) { 1408 - idr_remove(&udev->commands, tcmu_cmd->cmd_id); 1409 1432 /* 1410 1433 * We were not able to even start the command, so 1411 1434 * fail with busy to allow a retry in case runner ··· 1419 1444 1420 1445 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1421 1446 if (ret < 0) { 1422 - pr_debug("cmd %u on dev %s failed with %u\n", 1423 - tcmu_cmd->cmd_id, udev->name, scsi_ret); 1424 - 1425 - idr_remove(&udev->commands, tcmu_cmd->cmd_id); 1447 + pr_debug("cmd %p on dev %s failed with %u\n", 1448 + tcmu_cmd, udev->name, scsi_ret); 1426 1449 /* 1427 1450 * Ignore scsi_ret for now. target_complete_cmd 1428 1451 * drops it. ··· 1435 1462 * the queue 1436 1463 */ 1437 1464 list_splice_tail(&cmds, &udev->qfull_queue); 1438 - drained = false; 1439 1465 break; 1440 1466 } 1441 1467 } 1442 1468 1443 1469 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 1444 - return drained; 1445 1470 } 1446 1471 1447 1472 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) ··· 1623 1652 if (tcmu_check_and_free_pending_cmd(cmd) != 0) 1624 1653 all_expired = false; 1625 1654 } 1655 + if (!list_empty(&udev->qfull_queue)) 1656 + all_expired = false; 1626 1657 idr_destroy(&udev->commands); 1627 1658 WARN_ON(!all_expired); 1628 1659 ··· 2010 2037 mutex_lock(&udev->cmdr_lock); 2011 2038 2012 2039 idr_for_each_entry(&udev->commands, cmd, i) { 2013 - if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags)) 2014 - continue; 2015 - 2016 2040 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", 2017 2041 cmd->cmd_id, udev->name, 2018 2042 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); ··· 2045 2075 tcmu_flush_dcache_range(mb, sizeof(*mb)); 2046 2076 2047 2077 del_timer(&udev->cmd_timer); 2078 + 2079 + run_qfull_queue(udev, false); 2048 2080 2049 2081 mutex_unlock(&udev->cmdr_lock); 2050 2082 } ··· 2671 2699 static void check_timedout_devices(void) 2672 2700 { 2673 2701 struct tcmu_dev *udev, *tmp_dev; 2702 + struct tcmu_cmd *cmd, *tmp_cmd; 2674 2703 LIST_HEAD(devs); 2675 2704 2676 2705 spin_lock_bh(&timed_out_udevs_lock); ··· 2682 2709 spin_unlock_bh(&timed_out_udevs_lock); 2683 2710 2684 2711 mutex_lock(&udev->cmdr_lock); 2685 - idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); 2686 2712 2687 - tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); 2713 + /* 2714 + * If cmd_time_out is disabled but qfull is set deadline 2715 + * will only reflect the qfull timeout. Ignore it. 2716 + */ 2717 + if (udev->cmd_time_out) { 2718 + list_for_each_entry_safe(cmd, tmp_cmd, 2719 + &udev->inflight_queue, 2720 + queue_entry) { 2721 + tcmu_check_expired_ring_cmd(cmd); 2722 + } 2723 + tcmu_set_next_deadline(&udev->inflight_queue, 2724 + &udev->cmd_timer); 2725 + } 2726 + list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue, 2727 + queue_entry) { 2728 + tcmu_check_expired_queue_cmd(cmd); 2729 + } 2688 2730 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 2689 2731 2690 2732 mutex_unlock(&udev->cmdr_lock);