Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

target: add a parse_cdb method to the backend drivers

Instead of trying to handle all SCSI command sets in one function
(transport_generic_cmd_sequencer) call out to the backend driver to perform
this functionality. For pSCSI a copy of the existing code is used, but for
all virtual backends we can use a new parse_sbc_cdb helper is used to
provide a simple SBC emulation.

For now this setups means a fair amount of duplication between pSCSI and the
SBC library, but patches later in this series will sort out that problem.

(nab: Fix up build failure in target_core_pscsi.c)

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>

authored by

Christoph Hellwig and committed by
Nicholas Bellinger
d6e0175c 88455ec4

+936 -733
+1
drivers/target/Makefile
··· 10 10 target_core_tpg.o \ 11 11 target_core_transport.o \ 12 12 target_core_cdb.o \ 13 + target_core_sbc.o \ 13 14 target_core_spc.o \ 14 15 target_core_ua.o \ 15 16 target_core_rd.o \
+1
drivers/target/target_core_file.c
··· 561 561 .allocate_virtdevice = fd_allocate_virtdevice, 562 562 .create_virtdevice = fd_create_virtdevice, 563 563 .free_device = fd_free_device, 564 + .parse_cdb = sbc_parse_cdb, 564 565 .execute_cmd = fd_execute_cmd, 565 566 .do_sync_cache = fd_emulate_sync_cache, 566 567 .check_configfs_dev_params = fd_check_configfs_dev_params,
+1
drivers/target/target_core_iblock.c
··· 653 653 .allocate_virtdevice = iblock_allocate_virtdevice, 654 654 .create_virtdevice = iblock_create_virtdevice, 655 655 .free_device = iblock_free_device, 656 + .parse_cdb = sbc_parse_cdb, 656 657 .execute_cmd = iblock_execute_cmd, 657 658 .do_discard = iblock_do_discard, 658 659 .do_sync_cache = iblock_emulate_sync_cache,
-3
drivers/target/target_core_internal.h
··· 96 96 struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun); 97 97 int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); 98 98 99 - /* target_core_spc.c */ 100 - int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size, bool passthrough); 101 - 102 99 /* target_core_transport.c */ 103 100 extern struct kmem_cache *se_tmr_req_cache; 104 101
+469 -1
drivers/target/target_core_pscsi.c
··· 35 35 #include <linux/spinlock.h> 36 36 #include <linux/genhd.h> 37 37 #include <linux/cdrom.h> 38 - #include <linux/file.h> 38 + #include <linux/ratelimit.h> 39 39 #include <linux/module.h> 40 + #include <asm/unaligned.h> 41 + 40 42 #include <scsi/scsi.h> 41 43 #include <scsi/scsi_device.h> 42 44 #include <scsi/scsi_cmnd.h> ··· 48 46 #include <target/target_core_base.h> 49 47 #include <target/target_core_backend.h> 50 48 49 + #include "target_core_alua.h" 51 50 #include "target_core_pscsi.h" 52 51 53 52 #define ISPRINT(a) ((a >= ' ') && (a <= '~')) ··· 1022 1019 return -ENOMEM; 1023 1020 } 1024 1021 1022 + static inline u32 pscsi_get_sectors_6( 1023 + unsigned char *cdb, 1024 + struct se_cmd *cmd, 1025 + int *ret) 1026 + { 1027 + struct se_device *dev = cmd->se_dev; 1028 + 1029 + /* 1030 + * Assume TYPE_DISK for non struct se_device objects. 1031 + * Use 8-bit sector value. 1032 + */ 1033 + if (!dev) 1034 + goto type_disk; 1035 + 1036 + /* 1037 + * Use 24-bit allocation length for TYPE_TAPE. 1038 + */ 1039 + if (dev->transport->get_device_type(dev) == TYPE_TAPE) 1040 + return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; 1041 + 1042 + /* 1043 + * Everything else assume TYPE_DISK Sector CDB location. 1044 + * Use 8-bit sector value. SBC-3 says: 1045 + * 1046 + * A TRANSFER LENGTH field set to zero specifies that 256 1047 + * logical blocks shall be written. Any other value 1048 + * specifies the number of logical blocks that shall be 1049 + * written. 1050 + */ 1051 + type_disk: 1052 + return cdb[4] ? : 256; 1053 + } 1054 + 1055 + static inline u32 pscsi_get_sectors_10( 1056 + unsigned char *cdb, 1057 + struct se_cmd *cmd, 1058 + int *ret) 1059 + { 1060 + struct se_device *dev = cmd->se_dev; 1061 + 1062 + /* 1063 + * Assume TYPE_DISK for non struct se_device objects. 1064 + * Use 16-bit sector value. 1065 + */ 1066 + if (!dev) 1067 + goto type_disk; 1068 + 1069 + /* 1070 + * XXX_10 is not defined in SSC, throw an exception 1071 + */ 1072 + if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 1073 + *ret = -EINVAL; 1074 + return 0; 1075 + } 1076 + 1077 + /* 1078 + * Everything else assume TYPE_DISK Sector CDB location. 1079 + * Use 16-bit sector value. 1080 + */ 1081 + type_disk: 1082 + return (u32)(cdb[7] << 8) + cdb[8]; 1083 + } 1084 + 1085 + static inline u32 pscsi_get_sectors_12( 1086 + unsigned char *cdb, 1087 + struct se_cmd *cmd, 1088 + int *ret) 1089 + { 1090 + struct se_device *dev = cmd->se_dev; 1091 + 1092 + /* 1093 + * Assume TYPE_DISK for non struct se_device objects. 1094 + * Use 32-bit sector value. 1095 + */ 1096 + if (!dev) 1097 + goto type_disk; 1098 + 1099 + /* 1100 + * XXX_12 is not defined in SSC, throw an exception 1101 + */ 1102 + if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 1103 + *ret = -EINVAL; 1104 + return 0; 1105 + } 1106 + 1107 + /* 1108 + * Everything else assume TYPE_DISK Sector CDB location. 1109 + * Use 32-bit sector value. 1110 + */ 1111 + type_disk: 1112 + return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 1113 + } 1114 + 1115 + static inline u32 pscsi_get_sectors_16( 1116 + unsigned char *cdb, 1117 + struct se_cmd *cmd, 1118 + int *ret) 1119 + { 1120 + struct se_device *dev = cmd->se_dev; 1121 + 1122 + /* 1123 + * Assume TYPE_DISK for non struct se_device objects. 1124 + * Use 32-bit sector value. 1125 + */ 1126 + if (!dev) 1127 + goto type_disk; 1128 + 1129 + /* 1130 + * Use 24-bit allocation length for TYPE_TAPE. 1131 + */ 1132 + if (dev->transport->get_device_type(dev) == TYPE_TAPE) 1133 + return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; 1134 + 1135 + type_disk: 1136 + return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 1137 + (cdb[12] << 8) + cdb[13]; 1138 + } 1139 + 1140 + /* 1141 + * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 1142 + */ 1143 + static inline u32 pscsi_get_sectors_32( 1144 + unsigned char *cdb, 1145 + struct se_cmd *cmd, 1146 + int *ret) 1147 + { 1148 + /* 1149 + * Assume TYPE_DISK for non struct se_device objects. 1150 + * Use 32-bit sector value. 1151 + */ 1152 + return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 1153 + (cdb[30] << 8) + cdb[31]; 1154 + 1155 + } 1156 + 1157 + static inline u32 pscsi_get_lba_21(unsigned char *cdb) 1158 + { 1159 + return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 1160 + } 1161 + 1162 + static inline u32 pscsi_get_lba_32(unsigned char *cdb) 1163 + { 1164 + return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 1165 + } 1166 + 1167 + static inline unsigned long long pscsi_get_lba_64(unsigned char *cdb) 1168 + { 1169 + unsigned int __v1, __v2; 1170 + 1171 + __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 1172 + __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1173 + 1174 + return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 1175 + } 1176 + 1177 + /* 1178 + * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 1179 + */ 1180 + static inline unsigned long long pscsi_get_lba_64_ext(unsigned char *cdb) 1181 + { 1182 + unsigned int __v1, __v2; 1183 + 1184 + __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 1185 + __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 1186 + 1187 + return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 1188 + } 1189 + 1190 + 1191 + static inline u32 pscsi_get_size( 1192 + u32 sectors, 1193 + unsigned char *cdb, 1194 + struct se_cmd *cmd) 1195 + { 1196 + struct se_device *dev = cmd->se_dev; 1197 + 1198 + if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 1199 + if (cdb[1] & 1) { /* sectors */ 1200 + return dev->se_sub_dev->se_dev_attrib.block_size * sectors; 1201 + } else /* bytes */ 1202 + return sectors; 1203 + } 1204 + 1205 + pr_debug("Returning block_size: %u, sectors: %u == %u for" 1206 + " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, 1207 + sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors, 1208 + dev->transport->name); 1209 + 1210 + return dev->se_sub_dev->se_dev_attrib.block_size * sectors; 1211 + } 1212 + 1213 + static int pscsi_parse_cdb(struct se_cmd *cmd, unsigned int *size) 1214 + { 1215 + struct se_device *dev = cmd->se_dev; 1216 + struct se_subsystem_dev *su_dev = dev->se_sub_dev; 1217 + unsigned char *cdb = cmd->t_task_cdb; 1218 + int sector_ret = 0; 1219 + u32 sectors = 0; 1220 + u16 service_action; 1221 + int ret; 1222 + 1223 + if (cmd->se_cmd_flags & SCF_BIDI) 1224 + goto out_unsupported_cdb; 1225 + 1226 + switch (cdb[0]) { 1227 + case READ_6: 1228 + sectors = pscsi_get_sectors_6(cdb, cmd, &sector_ret); 1229 + if (sector_ret) 1230 + goto out_unsupported_cdb; 1231 + *size = pscsi_get_size(sectors, cdb, cmd); 1232 + cmd->t_task_lba = pscsi_get_lba_21(cdb); 1233 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1234 + break; 1235 + case READ_10: 1236 + sectors = pscsi_get_sectors_10(cdb, cmd, &sector_ret); 1237 + if (sector_ret) 1238 + goto out_unsupported_cdb; 1239 + *size = pscsi_get_size(sectors, cdb, cmd); 1240 + cmd->t_task_lba = pscsi_get_lba_32(cdb); 1241 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1242 + break; 1243 + case READ_12: 1244 + sectors = pscsi_get_sectors_12(cdb, cmd, &sector_ret); 1245 + if (sector_ret) 1246 + goto out_unsupported_cdb; 1247 + *size = pscsi_get_size(sectors, cdb, cmd); 1248 + cmd->t_task_lba = pscsi_get_lba_32(cdb); 1249 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1250 + break; 1251 + case READ_16: 1252 + sectors = pscsi_get_sectors_16(cdb, cmd, &sector_ret); 1253 + if (sector_ret) 1254 + goto out_unsupported_cdb; 1255 + *size = pscsi_get_size(sectors, cdb, cmd); 1256 + cmd->t_task_lba = pscsi_get_lba_64(cdb); 1257 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1258 + break; 1259 + case WRITE_6: 1260 + sectors = pscsi_get_sectors_6(cdb, cmd, &sector_ret); 1261 + if (sector_ret) 1262 + goto out_unsupported_cdb; 1263 + *size = pscsi_get_size(sectors, cdb, cmd); 1264 + cmd->t_task_lba = pscsi_get_lba_21(cdb); 1265 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1266 + break; 1267 + case WRITE_10: 1268 + case WRITE_VERIFY: 1269 + sectors = pscsi_get_sectors_10(cdb, cmd, &sector_ret); 1270 + if (sector_ret) 1271 + goto out_unsupported_cdb; 1272 + *size = pscsi_get_size(sectors, cdb, cmd); 1273 + cmd->t_task_lba = pscsi_get_lba_32(cdb); 1274 + if (cdb[1] & 0x8) 1275 + cmd->se_cmd_flags |= SCF_FUA; 1276 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1277 + break; 1278 + case WRITE_12: 1279 + sectors = pscsi_get_sectors_12(cdb, cmd, &sector_ret); 1280 + if (sector_ret) 1281 + goto out_unsupported_cdb; 1282 + *size = pscsi_get_size(sectors, cdb, cmd); 1283 + cmd->t_task_lba = pscsi_get_lba_32(cdb); 1284 + if (cdb[1] & 0x8) 1285 + cmd->se_cmd_flags |= SCF_FUA; 1286 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1287 + break; 1288 + case WRITE_16: 1289 + sectors = pscsi_get_sectors_16(cdb, cmd, &sector_ret); 1290 + if (sector_ret) 1291 + goto out_unsupported_cdb; 1292 + *size = pscsi_get_size(sectors, cdb, cmd); 1293 + cmd->t_task_lba = pscsi_get_lba_64(cdb); 1294 + if (cdb[1] & 0x8) 1295 + cmd->se_cmd_flags |= SCF_FUA; 1296 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1297 + break; 1298 + case VARIABLE_LENGTH_CMD: 1299 + service_action = get_unaligned_be16(&cdb[8]); 1300 + switch (service_action) { 1301 + case WRITE_SAME_32: 1302 + sectors = pscsi_get_sectors_32(cdb, cmd, &sector_ret); 1303 + if (sector_ret) 1304 + goto out_unsupported_cdb; 1305 + 1306 + if (!sectors) { 1307 + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 1308 + " supported\n"); 1309 + goto out_invalid_cdb_field; 1310 + } 1311 + 1312 + *size = pscsi_get_size(1, cdb, cmd); 1313 + cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 1314 + break; 1315 + default: 1316 + pr_err("VARIABLE_LENGTH_CMD service action" 1317 + " 0x%04x not supported\n", service_action); 1318 + goto out_unsupported_cdb; 1319 + } 1320 + break; 1321 + case GPCMD_READ_BUFFER_CAPACITY: 1322 + case GPCMD_SEND_OPC: 1323 + *size = (cdb[7] << 8) + cdb[8]; 1324 + break; 1325 + case READ_BLOCK_LIMITS: 1326 + *size = READ_BLOCK_LEN; 1327 + break; 1328 + case GPCMD_GET_CONFIGURATION: 1329 + case GPCMD_READ_FORMAT_CAPACITIES: 1330 + case GPCMD_READ_DISC_INFO: 1331 + case GPCMD_READ_TRACK_RZONE_INFO: 1332 + *size = (cdb[7] << 8) + cdb[8]; 1333 + break; 1334 + case GPCMD_MECHANISM_STATUS: 1335 + case GPCMD_READ_DVD_STRUCTURE: 1336 + *size = (cdb[8] << 8) + cdb[9]; 1337 + break; 1338 + case READ_POSITION: 1339 + *size = READ_POSITION_LEN; 1340 + break; 1341 + case READ_BUFFER: 1342 + *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 1343 + break; 1344 + case READ_CAPACITY: 1345 + *size = READ_CAP_LEN; 1346 + break; 1347 + case READ_MEDIA_SERIAL_NUMBER: 1348 + case SERVICE_ACTION_IN: 1349 + case ACCESS_CONTROL_IN: 1350 + case ACCESS_CONTROL_OUT: 1351 + *size = (cdb[10] << 24) | (cdb[11] << 16) | 1352 + (cdb[12] << 8) | cdb[13]; 1353 + break; 1354 + case READ_TOC: 1355 + *size = cdb[8]; 1356 + break; 1357 + case READ_ELEMENT_STATUS: 1358 + *size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; 1359 + break; 1360 + case SYNCHRONIZE_CACHE: 1361 + case SYNCHRONIZE_CACHE_16: 1362 + /* 1363 + * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE 1364 + */ 1365 + if (cdb[0] == SYNCHRONIZE_CACHE) { 1366 + sectors = pscsi_get_sectors_10(cdb, cmd, &sector_ret); 1367 + cmd->t_task_lba = pscsi_get_lba_32(cdb); 1368 + } else { 1369 + sectors = pscsi_get_sectors_16(cdb, cmd, &sector_ret); 1370 + cmd->t_task_lba = pscsi_get_lba_64(cdb); 1371 + } 1372 + if (sector_ret) 1373 + goto out_unsupported_cdb; 1374 + 1375 + *size = pscsi_get_size(sectors, cdb, cmd); 1376 + break; 1377 + case UNMAP: 1378 + *size = get_unaligned_be16(&cdb[7]); 1379 + break; 1380 + case WRITE_SAME_16: 1381 + sectors = pscsi_get_sectors_16(cdb, cmd, &sector_ret); 1382 + if (sector_ret) 1383 + goto out_unsupported_cdb; 1384 + 1385 + if (!sectors) { 1386 + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 1387 + goto out_invalid_cdb_field; 1388 + } 1389 + 1390 + *size = pscsi_get_size(1, cdb, cmd); 1391 + cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 1392 + break; 1393 + case WRITE_SAME: 1394 + sectors = pscsi_get_sectors_10(cdb, cmd, &sector_ret); 1395 + if (sector_ret) 1396 + goto out_unsupported_cdb; 1397 + 1398 + if (!sectors) { 1399 + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 1400 + goto out_invalid_cdb_field; 1401 + } 1402 + 1403 + *size = pscsi_get_size(1, cdb, cmd); 1404 + cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 1405 + break; 1406 + case ALLOW_MEDIUM_REMOVAL: 1407 + case ERASE: 1408 + case REZERO_UNIT: 1409 + case SEEK_10: 1410 + case SPACE: 1411 + case START_STOP: 1412 + case VERIFY: 1413 + case WRITE_FILEMARKS: 1414 + case GPCMD_CLOSE_TRACK: 1415 + case INITIALIZE_ELEMENT_STATUS: 1416 + case GPCMD_LOAD_UNLOAD: 1417 + case GPCMD_SET_SPEED: 1418 + case MOVE_MEDIUM: 1419 + *size = 0; 1420 + break; 1421 + case GET_EVENT_STATUS_NOTIFICATION: 1422 + *size = (cdb[7] << 8) | cdb[8]; 1423 + break; 1424 + case ATA_16: 1425 + switch (cdb[2] & 0x3) { /* T_LENGTH */ 1426 + case 0x0: 1427 + sectors = 0; 1428 + break; 1429 + case 0x1: 1430 + sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4]; 1431 + break; 1432 + case 0x2: 1433 + sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6]; 1434 + break; 1435 + case 0x3: 1436 + pr_err("T_LENGTH=0x3 not supported for ATA_16\n"); 1437 + goto out_invalid_cdb_field; 1438 + } 1439 + 1440 + /* BYTE_BLOCK */ 1441 + if (cdb[2] & 0x4) { 1442 + /* BLOCK T_TYPE: 512 or sector */ 1443 + *size = sectors * ((cdb[2] & 0x10) ? 1444 + dev->se_sub_dev->se_dev_attrib.block_size : 512); 1445 + } else { 1446 + /* BYTE */ 1447 + *size = sectors; 1448 + } 1449 + break; 1450 + default: 1451 + ret = spc_parse_cdb(cmd, size, true); 1452 + if (ret) 1453 + return ret; 1454 + } 1455 + 1456 + if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1457 + if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { 1458 + printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 1459 + " big sectors %u exceeds fabric_max_sectors:" 1460 + " %u\n", cdb[0], sectors, 1461 + su_dev->se_dev_attrib.fabric_max_sectors); 1462 + goto out_invalid_cdb_field; 1463 + } 1464 + if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { 1465 + printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 1466 + " big sectors %u exceeds backend hw_max_sectors:" 1467 + " %u\n", cdb[0], sectors, 1468 + su_dev->se_dev_attrib.hw_max_sectors); 1469 + goto out_invalid_cdb_field; 1470 + } 1471 + } 1472 + 1473 + return 0; 1474 + 1475 + out_unsupported_cdb: 1476 + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1477 + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1478 + return -EINVAL; 1479 + out_invalid_cdb_field: 1480 + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1481 + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 1482 + return -EINVAL; 1483 + } 1484 + 1485 + 1025 1486 static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1026 1487 u32 sgl_nents, enum dma_data_direction data_direction) 1027 1488 { ··· 1655 1188 .create_virtdevice = pscsi_create_virtdevice, 1656 1189 .free_device = pscsi_free_device, 1657 1190 .transport_complete = pscsi_transport_complete, 1191 + .parse_cdb = pscsi_parse_cdb, 1658 1192 .execute_cmd = pscsi_execute_cmd, 1659 1193 .check_configfs_dev_params = pscsi_check_configfs_dev_params, 1660 1194 .set_configfs_dev_params = pscsi_set_configfs_dev_params,
+1
drivers/target/target_core_rd.c
··· 468 468 .allocate_virtdevice = rd_allocate_virtdevice, 469 469 .create_virtdevice = rd_create_virtdevice, 470 470 .free_device = rd_free_device, 471 + .parse_cdb = sbc_parse_cdb, 471 472 .execute_cmd = rd_execute_cmd, 472 473 .check_configfs_dev_params = rd_check_configfs_dev_params, 473 474 .set_configfs_dev_params = rd_set_configfs_dev_params,
+450
drivers/target/target_core_sbc.c
··· 1 + /* 2 + * SCSI Block Commands (SBC) parsing and emulation. 3 + * 4 + * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. 5 + * Copyright (c) 2005, 2006, 2007 SBE, Inc. 6 + * Copyright (c) 2007-2010 Rising Tide Systems 7 + * Copyright (c) 2008-2010 Linux-iSCSI.org 8 + * 9 + * Nicholas A. Bellinger <nab@kernel.org> 10 + * 11 + * This program is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License as published by 13 + * the Free Software Foundation; either version 2 of the License, or 14 + * (at your option) any later version. 15 + * 16 + * This program is distributed in the hope that it will be useful, 17 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 + * GNU General Public License for more details. 20 + * 21 + * You should have received a copy of the GNU General Public License 22 + * along with this program; if not, write to the Free Software 23 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 + */ 25 + 26 + #include <linux/kernel.h> 27 + #include <linux/module.h> 28 + #include <linux/ratelimit.h> 29 + #include <asm/unaligned.h> 30 + #include <scsi/scsi.h> 31 + 32 + #include <target/target_core_base.h> 33 + #include <target/target_core_backend.h> 34 + #include <target/target_core_fabric.h> 35 + 36 + #include "target_core_internal.h" 37 + #include "target_core_ua.h" 38 + 39 + 40 + static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 41 + { 42 + return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors; 43 + } 44 + 45 + static int sbc_check_valid_sectors(struct se_cmd *cmd) 46 + { 47 + struct se_device *dev = cmd->se_dev; 48 + unsigned long long end_lba; 49 + u32 sectors; 50 + 51 + sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size; 52 + end_lba = dev->transport->get_blocks(dev) + 1; 53 + 54 + if (cmd->t_task_lba + sectors > end_lba) { 55 + pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n", 56 + cmd->t_task_lba, sectors, end_lba); 57 + return -EINVAL; 58 + } 59 + 60 + return 0; 61 + } 62 + 63 + static inline u32 transport_get_sectors_6(unsigned char *cdb) 64 + { 65 + /* 66 + * Use 8-bit sector value. SBC-3 says: 67 + * 68 + * A TRANSFER LENGTH field set to zero specifies that 256 69 + * logical blocks shall be written. Any other value 70 + * specifies the number of logical blocks that shall be 71 + * written. 72 + */ 73 + return cdb[4] ? : 256; 74 + } 75 + 76 + static inline u32 transport_get_sectors_10(unsigned char *cdb) 77 + { 78 + return (u32)(cdb[7] << 8) + cdb[8]; 79 + } 80 + 81 + static inline u32 transport_get_sectors_12(unsigned char *cdb) 82 + { 83 + return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 84 + } 85 + 86 + static inline u32 transport_get_sectors_16(unsigned char *cdb) 87 + { 88 + return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 89 + (cdb[12] << 8) + cdb[13]; 90 + } 91 + 92 + /* 93 + * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 94 + */ 95 + static inline u32 transport_get_sectors_32(unsigned char *cdb) 96 + { 97 + return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 98 + (cdb[30] << 8) + cdb[31]; 99 + 100 + } 101 + 102 + static inline u32 transport_lba_21(unsigned char *cdb) 103 + { 104 + return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 105 + } 106 + 107 + static inline u32 transport_lba_32(unsigned char *cdb) 108 + { 109 + return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 110 + } 111 + 112 + static inline unsigned long long transport_lba_64(unsigned char *cdb) 113 + { 114 + unsigned int __v1, __v2; 115 + 116 + __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 117 + __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 118 + 119 + return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 120 + } 121 + 122 + /* 123 + * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 124 + */ 125 + static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 126 + { 127 + unsigned int __v1, __v2; 128 + 129 + __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 130 + __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 131 + 132 + return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 133 + } 134 + 135 + static int sbc_write_same_supported(struct se_device *dev, 136 + unsigned char *flags) 137 + { 138 + if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 139 + pr_err("WRITE_SAME PBDATA and LBDATA" 140 + " bits not supported for Block Discard" 141 + " Emulation\n"); 142 + return -ENOSYS; 143 + } 144 + 145 + /* 146 + * Currently for the emulated case we only accept 147 + * tpws with the UNMAP=1 bit set. 148 + */ 149 + if (!(flags[0] & 0x08)) { 150 + pr_err("WRITE_SAME w/o UNMAP bit not" 151 + " supported for Block Discard Emulation\n"); 152 + return -ENOSYS; 153 + } 154 + 155 + return 0; 156 + } 157 + 158 + static void xdreadwrite_callback(struct se_cmd *cmd) 159 + { 160 + unsigned char *buf, *addr; 161 + struct scatterlist *sg; 162 + unsigned int offset; 163 + int i; 164 + int count; 165 + /* 166 + * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 167 + * 168 + * 1) read the specified logical block(s); 169 + * 2) transfer logical blocks from the data-out buffer; 170 + * 3) XOR the logical blocks transferred from the data-out buffer with 171 + * the logical blocks read, storing the resulting XOR data in a buffer; 172 + * 4) if the DISABLE WRITE bit is set to zero, then write the logical 173 + * blocks transferred from the data-out buffer; and 174 + * 5) transfer the resulting XOR data to the data-in buffer. 175 + */ 176 + buf = kmalloc(cmd->data_length, GFP_KERNEL); 177 + if (!buf) { 178 + pr_err("Unable to allocate xor_callback buf\n"); 179 + return; 180 + } 181 + /* 182 + * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 183 + * into the locally allocated *buf 184 + */ 185 + sg_copy_to_buffer(cmd->t_data_sg, 186 + cmd->t_data_nents, 187 + buf, 188 + cmd->data_length); 189 + 190 + /* 191 + * Now perform the XOR against the BIDI read memory located at 192 + * cmd->t_mem_bidi_list 193 + */ 194 + 195 + offset = 0; 196 + for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 197 + addr = kmap_atomic(sg_page(sg)); 198 + if (!addr) 199 + goto out; 200 + 201 + for (i = 0; i < sg->length; i++) 202 + *(addr + sg->offset + i) ^= *(buf + offset + i); 203 + 204 + offset += sg->length; 205 + kunmap_atomic(addr); 206 + } 207 + 208 + out: 209 + kfree(buf); 210 + } 211 + 212 + int sbc_parse_cdb(struct se_cmd *cmd, unsigned int *size) 213 + { 214 + struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 215 + struct se_device *dev = cmd->se_dev; 216 + unsigned char *cdb = cmd->t_task_cdb; 217 + u32 sectors = 0; 218 + int ret; 219 + 220 + switch (cdb[0]) { 221 + case READ_6: 222 + sectors = transport_get_sectors_6(cdb); 223 + cmd->t_task_lba = transport_lba_21(cdb); 224 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 225 + break; 226 + case READ_10: 227 + sectors = transport_get_sectors_10(cdb); 228 + cmd->t_task_lba = transport_lba_32(cdb); 229 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 230 + break; 231 + case READ_12: 232 + sectors = transport_get_sectors_12(cdb); 233 + cmd->t_task_lba = transport_lba_32(cdb); 234 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 235 + break; 236 + case READ_16: 237 + sectors = transport_get_sectors_16(cdb); 238 + cmd->t_task_lba = transport_lba_64(cdb); 239 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 240 + break; 241 + case WRITE_6: 242 + sectors = transport_get_sectors_6(cdb); 243 + cmd->t_task_lba = transport_lba_21(cdb); 244 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 245 + break; 246 + case WRITE_10: 247 + case WRITE_VERIFY: 248 + sectors = transport_get_sectors_10(cdb); 249 + cmd->t_task_lba = transport_lba_32(cdb); 250 + if (cdb[1] & 0x8) 251 + cmd->se_cmd_flags |= SCF_FUA; 252 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 253 + break; 254 + case WRITE_12: 255 + sectors = transport_get_sectors_12(cdb); 256 + cmd->t_task_lba = transport_lba_32(cdb); 257 + if (cdb[1] & 0x8) 258 + cmd->se_cmd_flags |= SCF_FUA; 259 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 260 + break; 261 + case WRITE_16: 262 + sectors = transport_get_sectors_16(cdb); 263 + cmd->t_task_lba = transport_lba_64(cdb); 264 + if (cdb[1] & 0x8) 265 + cmd->se_cmd_flags |= SCF_FUA; 266 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 267 + break; 268 + case XDWRITEREAD_10: 269 + if ((cmd->data_direction != DMA_TO_DEVICE) || 270 + !(cmd->se_cmd_flags & SCF_BIDI)) 271 + goto out_invalid_cdb_field; 272 + sectors = transport_get_sectors_10(cdb); 273 + 274 + cmd->t_task_lba = transport_lba_32(cdb); 275 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 276 + 277 + /* 278 + * Setup BIDI XOR callback to be run after I/O completion. 279 + */ 280 + cmd->transport_complete_callback = &xdreadwrite_callback; 281 + if (cdb[1] & 0x8) 282 + cmd->se_cmd_flags |= SCF_FUA; 283 + break; 284 + case VARIABLE_LENGTH_CMD: 285 + { 286 + u16 service_action = get_unaligned_be16(&cdb[8]); 287 + switch (service_action) { 288 + case XDWRITEREAD_32: 289 + sectors = transport_get_sectors_32(cdb); 290 + 291 + /* 292 + * Use WRITE_32 and READ_32 opcodes for the emulated 293 + * XDWRITE_READ_32 logic. 294 + */ 295 + cmd->t_task_lba = transport_lba_64_ext(cdb); 296 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 297 + 298 + /* 299 + * Setup BIDI XOR callback to be run during after I/O 300 + * completion. 301 + */ 302 + cmd->transport_complete_callback = &xdreadwrite_callback; 303 + if (cdb[1] & 0x8) 304 + cmd->se_cmd_flags |= SCF_FUA; 305 + break; 306 + case WRITE_SAME_32: 307 + sectors = transport_get_sectors_32(cdb); 308 + if (!sectors) { 309 + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 310 + " supported\n"); 311 + goto out_invalid_cdb_field; 312 + } 313 + 314 + *size = sbc_get_size(cmd, 1); 315 + cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 316 + 317 + if (sbc_write_same_supported(dev, &cdb[10]) < 0) 318 + goto out_unsupported_cdb; 319 + cmd->execute_cmd = target_emulate_write_same; 320 + break; 321 + default: 322 + pr_err("VARIABLE_LENGTH_CMD service action" 323 + " 0x%04x not supported\n", service_action); 324 + goto out_unsupported_cdb; 325 + } 326 + break; 327 + } 328 + case READ_CAPACITY: 329 + *size = READ_CAP_LEN; 330 + cmd->execute_cmd = target_emulate_readcapacity; 331 + break; 332 + case SERVICE_ACTION_IN: 333 + switch (cmd->t_task_cdb[1] & 0x1f) { 334 + case SAI_READ_CAPACITY_16: 335 + cmd->execute_cmd = target_emulate_readcapacity_16; 336 + break; 337 + default: 338 + pr_err("Unsupported SA: 0x%02x\n", 339 + cmd->t_task_cdb[1] & 0x1f); 340 + goto out_invalid_cdb_field; 341 + } 342 + *size = (cdb[10] << 24) | (cdb[11] << 16) | 343 + (cdb[12] << 8) | cdb[13]; 344 + break; 345 + case SYNCHRONIZE_CACHE: 346 + case SYNCHRONIZE_CACHE_16: 347 + /* 348 + * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE 349 + */ 350 + if (cdb[0] == SYNCHRONIZE_CACHE) { 351 + sectors = transport_get_sectors_10(cdb); 352 + cmd->t_task_lba = transport_lba_32(cdb); 353 + } else { 354 + sectors = transport_get_sectors_16(cdb); 355 + cmd->t_task_lba = transport_lba_64(cdb); 356 + } 357 + 358 + *size = sbc_get_size(cmd, sectors); 359 + 360 + /* 361 + * Check to ensure that LBA + Range does not exceed past end of 362 + * device for IBLOCK and FILEIO ->do_sync_cache() backend calls 363 + */ 364 + if (cmd->t_task_lba || sectors) { 365 + if (sbc_check_valid_sectors(cmd) < 0) 366 + goto out_invalid_cdb_field; 367 + } 368 + cmd->execute_cmd = target_emulate_synchronize_cache; 369 + break; 370 + case UNMAP: 371 + *size = get_unaligned_be16(&cdb[7]); 372 + cmd->execute_cmd = target_emulate_unmap; 373 + break; 374 + case WRITE_SAME_16: 375 + sectors = transport_get_sectors_16(cdb); 376 + if (!sectors) { 377 + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 378 + goto out_invalid_cdb_field; 379 + } 380 + 381 + *size = sbc_get_size(cmd, 1); 382 + cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 383 + 384 + if (sbc_write_same_supported(dev, &cdb[1]) < 0) 385 + goto out_unsupported_cdb; 386 + cmd->execute_cmd = target_emulate_write_same; 387 + break; 388 + case WRITE_SAME: 389 + sectors = transport_get_sectors_10(cdb); 390 + if (!sectors) { 391 + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 392 + goto out_invalid_cdb_field; 393 + } 394 + 395 + *size = sbc_get_size(cmd, 1); 396 + cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 397 + 398 + /* 399 + * Follow sbcr26 with WRITE_SAME (10) and check for the existence 400 + * of byte 1 bit 3 UNMAP instead of original reserved field 401 + */ 402 + if (sbc_write_same_supported(dev, &cdb[1]) < 0) 403 + goto out_unsupported_cdb; 404 + cmd->execute_cmd = target_emulate_write_same; 405 + break; 406 + case VERIFY: 407 + *size = 0; 408 + cmd->execute_cmd = target_emulate_noop; 409 + break; 410 + default: 411 + ret = spc_parse_cdb(cmd, size, false); 412 + if (ret) 413 + return ret; 414 + } 415 + 416 + /* reject any command that we don't have a handler for */ 417 + if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd) 418 + goto out_unsupported_cdb; 419 + 420 + if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 421 + if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { 422 + printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 423 + " big sectors %u exceeds fabric_max_sectors:" 424 + " %u\n", cdb[0], sectors, 425 + su_dev->se_dev_attrib.fabric_max_sectors); 426 + goto out_invalid_cdb_field; 427 + } 428 + if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { 429 + printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 430 + " big sectors %u exceeds backend hw_max_sectors:" 431 + " %u\n", cdb[0], sectors, 432 + su_dev->se_dev_attrib.hw_max_sectors); 433 + goto out_invalid_cdb_field; 434 + } 435 + 436 + *size = sbc_get_size(cmd, sectors); 437 + } 438 + 439 + return 0; 440 + 441 + out_unsupported_cdb: 442 + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 443 + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 444 + return -EINVAL; 445 + out_invalid_cdb_field: 446 + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 447 + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 448 + return -EINVAL; 449 + } 450 + EXPORT_SYMBOL(sbc_parse_cdb);
+1
drivers/target/target_core_spc.c
··· 152 152 cmd->sam_task_attr = MSG_HEAD_TAG; 153 153 break; 154 154 case TEST_UNIT_READY: 155 + *size = 0; 155 156 if (!passthrough) 156 157 cmd->execute_cmd = target_emulate_noop; 157 158 break;
+7 -729
drivers/target/target_core_transport.c
··· 1343 1343 } 1344 1344 } 1345 1345 1346 - static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); 1347 - 1348 1346 static int target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1349 1347 { 1350 1348 struct se_device *dev = cmd->se_dev; ··· 1469 1471 u32 pr_reg_type = 0; 1470 1472 u8 alua_ascq = 0; 1471 1473 unsigned long flags; 1474 + unsigned int size; 1472 1475 int ret; 1473 1476 1474 1477 transport_generic_prepare_cdb(cdb); ··· 1561 1562 */ 1562 1563 } 1563 1564 1564 - /* 1565 - * Setup the received CDB based on SCSI defined opcodes and 1566 - * perform unit attention, persistent reservations and ALUA 1567 - * checks for virtual device backends. The cmd->t_task_cdb 1568 - * pointer is expected to be setup before we reach this point. 1569 - */ 1570 - ret = transport_generic_cmd_sequencer(cmd, cdb); 1565 + ret = cmd->se_dev->transport->parse_cdb(cmd, &size); 1566 + if (ret < 0) 1567 + return ret; 1568 + 1569 + ret = target_cmd_size_check(cmd, size); 1571 1570 if (ret < 0) 1572 1571 return ret; 1573 1572 ··· 1691 1694 target_put_sess_cmd(se_sess, se_cmd); 1692 1695 return; 1693 1696 } 1694 - /* 1695 - * Sanitize CDBs via transport_generic_cmd_sequencer() and 1696 - * allocate the necessary tasks to complete the received CDB+data 1697 - */ 1697 + 1698 1698 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1699 1699 if (rc != 0) { 1700 1700 transport_generic_request_failure(se_cmd); ··· 1960 1966 } 1961 1967 EXPORT_SYMBOL(transport_generic_request_failure); 1962 1968 1963 - static inline u32 transport_lba_21(unsigned char *cdb) 1964 - { 1965 - return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 1966 - } 1967 - 1968 - static inline u32 transport_lba_32(unsigned char *cdb) 1969 - { 1970 - return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 1971 - } 1972 - 1973 - static inline unsigned long long transport_lba_64(unsigned char *cdb) 1974 - { 1975 - unsigned int __v1, __v2; 1976 - 1977 - __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 1978 - __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1979 - 1980 - return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 1981 - } 1982 - 1983 - /* 1984 - * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 1985 - */ 1986 - static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 1987 - { 1988 - unsigned int __v1, __v2; 1989 - 1990 - __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 1991 - __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 1992 - 1993 - return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 1994 - } 1995 - 1996 1969 /* 1997 1970 * Called from Fabric Module context from transport_execute_tasks() 1998 1971 * ··· 2108 2147 return 0; 2109 2148 } 2110 2149 2111 - static inline u32 transport_get_sectors_6( 2112 - unsigned char *cdb, 2113 - struct se_cmd *cmd, 2114 - int *ret) 2115 - { 2116 - struct se_device *dev = cmd->se_dev; 2117 - 2118 - /* 2119 - * Assume TYPE_DISK for non struct se_device objects. 2120 - * Use 8-bit sector value. 2121 - */ 2122 - if (!dev) 2123 - goto type_disk; 2124 - 2125 - /* 2126 - * Use 24-bit allocation length for TYPE_TAPE. 2127 - */ 2128 - if (dev->transport->get_device_type(dev) == TYPE_TAPE) 2129 - return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; 2130 - 2131 - /* 2132 - * Everything else assume TYPE_DISK Sector CDB location. 2133 - * Use 8-bit sector value. SBC-3 says: 2134 - * 2135 - * A TRANSFER LENGTH field set to zero specifies that 256 2136 - * logical blocks shall be written. Any other value 2137 - * specifies the number of logical blocks that shall be 2138 - * written. 2139 - */ 2140 - type_disk: 2141 - return cdb[4] ? : 256; 2142 - } 2143 - 2144 - static inline u32 transport_get_sectors_10( 2145 - unsigned char *cdb, 2146 - struct se_cmd *cmd, 2147 - int *ret) 2148 - { 2149 - struct se_device *dev = cmd->se_dev; 2150 - 2151 - /* 2152 - * Assume TYPE_DISK for non struct se_device objects. 2153 - * Use 16-bit sector value. 2154 - */ 2155 - if (!dev) 2156 - goto type_disk; 2157 - 2158 - /* 2159 - * XXX_10 is not defined in SSC, throw an exception 2160 - */ 2161 - if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 2162 - *ret = -EINVAL; 2163 - return 0; 2164 - } 2165 - 2166 - /* 2167 - * Everything else assume TYPE_DISK Sector CDB location. 2168 - * Use 16-bit sector value. 2169 - */ 2170 - type_disk: 2171 - return (u32)(cdb[7] << 8) + cdb[8]; 2172 - } 2173 - 2174 - static inline u32 transport_get_sectors_12( 2175 - unsigned char *cdb, 2176 - struct se_cmd *cmd, 2177 - int *ret) 2178 - { 2179 - struct se_device *dev = cmd->se_dev; 2180 - 2181 - /* 2182 - * Assume TYPE_DISK for non struct se_device objects. 2183 - * Use 32-bit sector value. 2184 - */ 2185 - if (!dev) 2186 - goto type_disk; 2187 - 2188 - /* 2189 - * XXX_12 is not defined in SSC, throw an exception 2190 - */ 2191 - if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 2192 - *ret = -EINVAL; 2193 - return 0; 2194 - } 2195 - 2196 - /* 2197 - * Everything else assume TYPE_DISK Sector CDB location. 2198 - * Use 32-bit sector value. 2199 - */ 2200 - type_disk: 2201 - return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 2202 - } 2203 - 2204 - static inline u32 transport_get_sectors_16( 2205 - unsigned char *cdb, 2206 - struct se_cmd *cmd, 2207 - int *ret) 2208 - { 2209 - struct se_device *dev = cmd->se_dev; 2210 - 2211 - /* 2212 - * Assume TYPE_DISK for non struct se_device objects. 2213 - * Use 32-bit sector value. 2214 - */ 2215 - if (!dev) 2216 - goto type_disk; 2217 - 2218 - /* 2219 - * Use 24-bit allocation length for TYPE_TAPE. 2220 - */ 2221 - if (dev->transport->get_device_type(dev) == TYPE_TAPE) 2222 - return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; 2223 - 2224 - type_disk: 2225 - return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 2226 - (cdb[12] << 8) + cdb[13]; 2227 - } 2228 - 2229 - /* 2230 - * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 2231 - */ 2232 - static inline u32 transport_get_sectors_32( 2233 - unsigned char *cdb, 2234 - struct se_cmd *cmd, 2235 - int *ret) 2236 - { 2237 - /* 2238 - * Assume TYPE_DISK for non struct se_device objects. 2239 - * Use 32-bit sector value. 2240 - */ 2241 - return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 2242 - (cdb[30] << 8) + cdb[31]; 2243 - 2244 - } 2245 - 2246 - static inline u32 transport_get_size( 2247 - u32 sectors, 2248 - unsigned char *cdb, 2249 - struct se_cmd *cmd) 2250 - { 2251 - struct se_device *dev = cmd->se_dev; 2252 - 2253 - if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 2254 - if (cdb[1] & 1) { /* sectors */ 2255 - return dev->se_sub_dev->se_dev_attrib.block_size * sectors; 2256 - } else /* bytes */ 2257 - return sectors; 2258 - } 2259 - 2260 - pr_debug("Returning block_size: %u, sectors: %u == %u for" 2261 - " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, 2262 - sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors, 2263 - dev->transport->name); 2264 - 2265 - return dev->se_sub_dev->se_dev_attrib.block_size * sectors; 2266 - } 2267 - 2268 - static void transport_xor_callback(struct se_cmd *cmd) 2269 - { 2270 - unsigned char *buf, *addr; 2271 - struct scatterlist *sg; 2272 - unsigned int offset; 2273 - int i; 2274 - int count; 2275 - /* 2276 - * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 2277 - * 2278 - * 1) read the specified logical block(s); 2279 - * 2) transfer logical blocks from the data-out buffer; 2280 - * 3) XOR the logical blocks transferred from the data-out buffer with 2281 - * the logical blocks read, storing the resulting XOR data in a buffer; 2282 - * 4) if the DISABLE WRITE bit is set to zero, then write the logical 2283 - * blocks transferred from the data-out buffer; and 2284 - * 5) transfer the resulting XOR data to the data-in buffer. 2285 - */ 2286 - buf = kmalloc(cmd->data_length, GFP_KERNEL); 2287 - if (!buf) { 2288 - pr_err("Unable to allocate xor_callback buf\n"); 2289 - return; 2290 - } 2291 - /* 2292 - * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 2293 - * into the locally allocated *buf 2294 - */ 2295 - sg_copy_to_buffer(cmd->t_data_sg, 2296 - cmd->t_data_nents, 2297 - buf, 2298 - cmd->data_length); 2299 - 2300 - /* 2301 - * Now perform the XOR against the BIDI read memory located at 2302 - * cmd->t_mem_bidi_list 2303 - */ 2304 - 2305 - offset = 0; 2306 - for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 2307 - addr = kmap_atomic(sg_page(sg)); 2308 - if (!addr) 2309 - goto out; 2310 - 2311 - for (i = 0; i < sg->length; i++) 2312 - *(addr + sg->offset + i) ^= *(buf + offset + i); 2313 - 2314 - offset += sg->length; 2315 - kunmap_atomic(addr); 2316 - } 2317 - 2318 - out: 2319 - kfree(buf); 2320 - } 2321 - 2322 2150 /* 2323 2151 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd 2324 2152 */ ··· 2187 2437 } 2188 2438 2189 2439 return 0; 2190 - } 2191 - 2192 - static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) 2193 - { 2194 - /* 2195 - * Determine if the received WRITE_SAME is used to for direct 2196 - * passthrough into Linux/SCSI with struct request via TCM/pSCSI 2197 - * or we are signaling the use of internal WRITE_SAME + UNMAP=1 2198 - * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. 2199 - */ 2200 - int passthrough = (dev->transport->transport_type == 2201 - TRANSPORT_PLUGIN_PHBA_PDEV); 2202 - 2203 - if (!passthrough) { 2204 - if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 2205 - pr_err("WRITE_SAME PBDATA and LBDATA" 2206 - " bits not supported for Block Discard" 2207 - " Emulation\n"); 2208 - return -ENOSYS; 2209 - } 2210 - /* 2211 - * Currently for the emulated case we only accept 2212 - * tpws with the UNMAP=1 bit set. 2213 - */ 2214 - if (!(flags[0] & 0x08)) { 2215 - pr_err("WRITE_SAME w/o UNMAP bit not" 2216 - " supported for Block Discard Emulation\n"); 2217 - return -ENOSYS; 2218 - } 2219 - } 2220 - 2221 - return 0; 2222 - } 2223 - 2224 - static int transport_generic_cmd_sequencer( 2225 - struct se_cmd *cmd, 2226 - unsigned char *cdb) 2227 - { 2228 - struct se_device *dev = cmd->se_dev; 2229 - struct se_subsystem_dev *su_dev = dev->se_sub_dev; 2230 - int sector_ret = 0, passthrough; 2231 - u32 sectors = 0, size = 0; 2232 - u16 service_action; 2233 - int ret; 2234 - 2235 - /* 2236 - * If we operate in passthrough mode we skip most CDB emulation and 2237 - * instead hand the commands down to the physical SCSI device. 2238 - */ 2239 - passthrough = 2240 - (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); 2241 - 2242 - switch (cdb[0]) { 2243 - case READ_6: 2244 - sectors = transport_get_sectors_6(cdb, cmd, &sector_ret); 2245 - if (sector_ret) 2246 - goto out_unsupported_cdb; 2247 - size = transport_get_size(sectors, cdb, cmd); 2248 - cmd->t_task_lba = transport_lba_21(cdb); 2249 - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 2250 - break; 2251 - case READ_10: 2252 - sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 2253 - if (sector_ret) 2254 - goto out_unsupported_cdb; 2255 - size = transport_get_size(sectors, cdb, cmd); 2256 - cmd->t_task_lba = transport_lba_32(cdb); 2257 - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 2258 - break; 2259 - case READ_12: 2260 - sectors = transport_get_sectors_12(cdb, cmd, &sector_ret); 2261 - if (sector_ret) 2262 - goto out_unsupported_cdb; 2263 - size = transport_get_size(sectors, cdb, cmd); 2264 - cmd->t_task_lba = transport_lba_32(cdb); 2265 - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 2266 - break; 2267 - case READ_16: 2268 - sectors = transport_get_sectors_16(cdb, cmd, &sector_ret); 2269 - if (sector_ret) 2270 - goto out_unsupported_cdb; 2271 - size = transport_get_size(sectors, cdb, cmd); 2272 - cmd->t_task_lba = transport_lba_64(cdb); 2273 - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 2274 - break; 2275 - case WRITE_6: 2276 - sectors = transport_get_sectors_6(cdb, cmd, &sector_ret); 2277 - if (sector_ret) 2278 - goto out_unsupported_cdb; 2279 - size = transport_get_size(sectors, cdb, cmd); 2280 - cmd->t_task_lba = transport_lba_21(cdb); 2281 - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 2282 - break; 2283 - case WRITE_10: 2284 - case WRITE_VERIFY: 2285 - sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 2286 - if (sector_ret) 2287 - goto out_unsupported_cdb; 2288 - size = transport_get_size(sectors, cdb, cmd); 2289 - cmd->t_task_lba = transport_lba_32(cdb); 2290 - if (cdb[1] & 0x8) 2291 - cmd->se_cmd_flags |= SCF_FUA; 2292 - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 2293 - break; 2294 - case WRITE_12: 2295 - sectors = transport_get_sectors_12(cdb, cmd, &sector_ret); 2296 - if (sector_ret) 2297 - goto out_unsupported_cdb; 2298 - size = transport_get_size(sectors, cdb, cmd); 2299 - cmd->t_task_lba = transport_lba_32(cdb); 2300 - if (cdb[1] & 0x8) 2301 - cmd->se_cmd_flags |= SCF_FUA; 2302 - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 2303 - break; 2304 - case WRITE_16: 2305 - sectors = transport_get_sectors_16(cdb, cmd, &sector_ret); 2306 - if (sector_ret) 2307 - goto out_unsupported_cdb; 2308 - size = transport_get_size(sectors, cdb, cmd); 2309 - cmd->t_task_lba = transport_lba_64(cdb); 2310 - if (cdb[1] & 0x8) 2311 - cmd->se_cmd_flags |= SCF_FUA; 2312 - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 2313 - break; 2314 - case XDWRITEREAD_10: 2315 - if ((cmd->data_direction != DMA_TO_DEVICE) || 2316 - !(cmd->se_cmd_flags & SCF_BIDI)) 2317 - goto out_invalid_cdb_field; 2318 - sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 2319 - if (sector_ret) 2320 - goto out_unsupported_cdb; 2321 - size = transport_get_size(sectors, cdb, cmd); 2322 - cmd->t_task_lba = transport_lba_32(cdb); 2323 - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 2324 - 2325 - /* 2326 - * Do now allow BIDI commands for passthrough mode. 2327 - */ 2328 - if (passthrough) 2329 - goto out_unsupported_cdb; 2330 - 2331 - /* 2332 - * Setup BIDI XOR callback to be run after I/O completion. 2333 - */ 2334 - cmd->transport_complete_callback = &transport_xor_callback; 2335 - if (cdb[1] & 0x8) 2336 - cmd->se_cmd_flags |= SCF_FUA; 2337 - break; 2338 - case VARIABLE_LENGTH_CMD: 2339 - service_action = get_unaligned_be16(&cdb[8]); 2340 - switch (service_action) { 2341 - case XDWRITEREAD_32: 2342 - sectors = transport_get_sectors_32(cdb, cmd, &sector_ret); 2343 - if (sector_ret) 2344 - goto out_unsupported_cdb; 2345 - size = transport_get_size(sectors, cdb, cmd); 2346 - /* 2347 - * Use WRITE_32 and READ_32 opcodes for the emulated 2348 - * XDWRITE_READ_32 logic. 2349 - */ 2350 - cmd->t_task_lba = transport_lba_64_ext(cdb); 2351 - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 2352 - 2353 - /* 2354 - * Do now allow BIDI commands for passthrough mode. 2355 - */ 2356 - if (passthrough) 2357 - goto out_unsupported_cdb; 2358 - 2359 - /* 2360 - * Setup BIDI XOR callback to be run during after I/O 2361 - * completion. 2362 - */ 2363 - cmd->transport_complete_callback = &transport_xor_callback; 2364 - if (cdb[1] & 0x8) 2365 - cmd->se_cmd_flags |= SCF_FUA; 2366 - break; 2367 - case WRITE_SAME_32: 2368 - sectors = transport_get_sectors_32(cdb, cmd, &sector_ret); 2369 - if (sector_ret) 2370 - goto out_unsupported_cdb; 2371 - 2372 - if (sectors) 2373 - size = transport_get_size(1, cdb, cmd); 2374 - else { 2375 - pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 2376 - " supported\n"); 2377 - goto out_invalid_cdb_field; 2378 - } 2379 - 2380 - cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 2381 - 2382 - if (target_check_write_same_discard(&cdb[10], dev) < 0) 2383 - goto out_unsupported_cdb; 2384 - if (!passthrough) 2385 - cmd->execute_cmd = target_emulate_write_same; 2386 - break; 2387 - default: 2388 - pr_err("VARIABLE_LENGTH_CMD service action" 2389 - " 0x%04x not supported\n", service_action); 2390 - goto out_unsupported_cdb; 2391 - } 2392 - break; 2393 - case MAINTENANCE_IN: 2394 - if (dev->transport->get_device_type(dev) != TYPE_ROM) { 2395 - /* MAINTENANCE_IN from SCC-2 */ 2396 - /* 2397 - * Check for emulated MI_REPORT_TARGET_PGS. 2398 - */ 2399 - if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS && 2400 - su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 2401 - cmd->execute_cmd = 2402 - target_emulate_report_target_port_groups; 2403 - } 2404 - size = (cdb[6] << 24) | (cdb[7] << 16) | 2405 - (cdb[8] << 8) | cdb[9]; 2406 - } else { 2407 - /* GPCMD_SEND_KEY from multi media commands */ 2408 - size = (cdb[8] << 8) + cdb[9]; 2409 - } 2410 - break; 2411 - case GPCMD_READ_BUFFER_CAPACITY: 2412 - case GPCMD_SEND_OPC: 2413 - size = (cdb[7] << 8) + cdb[8]; 2414 - break; 2415 - case READ_BLOCK_LIMITS: 2416 - size = READ_BLOCK_LEN; 2417 - break; 2418 - case GPCMD_GET_CONFIGURATION: 2419 - case GPCMD_READ_FORMAT_CAPACITIES: 2420 - case GPCMD_READ_DISC_INFO: 2421 - case GPCMD_READ_TRACK_RZONE_INFO: 2422 - size = (cdb[7] << 8) + cdb[8]; 2423 - break; 2424 - case GPCMD_MECHANISM_STATUS: 2425 - case GPCMD_READ_DVD_STRUCTURE: 2426 - size = (cdb[8] << 8) + cdb[9]; 2427 - break; 2428 - case READ_POSITION: 2429 - size = READ_POSITION_LEN; 2430 - break; 2431 - case MAINTENANCE_OUT: 2432 - if (dev->transport->get_device_type(dev) != TYPE_ROM) { 2433 - /* MAINTENANCE_OUT from SCC-2 2434 - * 2435 - * Check for emulated MO_SET_TARGET_PGS. 2436 - */ 2437 - if (cdb[1] == MO_SET_TARGET_PGS && 2438 - su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 2439 - cmd->execute_cmd = 2440 - target_emulate_set_target_port_groups; 2441 - } 2442 - 2443 - size = (cdb[6] << 24) | (cdb[7] << 16) | 2444 - (cdb[8] << 8) | cdb[9]; 2445 - } else { 2446 - /* GPCMD_REPORT_KEY from multi media commands */ 2447 - size = (cdb[8] << 8) + cdb[9]; 2448 - } 2449 - break; 2450 - case READ_BUFFER: 2451 - size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 2452 - break; 2453 - case READ_CAPACITY: 2454 - size = READ_CAP_LEN; 2455 - if (!passthrough) 2456 - cmd->execute_cmd = target_emulate_readcapacity; 2457 - break; 2458 - case READ_MEDIA_SERIAL_NUMBER: 2459 - case SERVICE_ACTION_IN: 2460 - switch (cmd->t_task_cdb[1] & 0x1f) { 2461 - case SAI_READ_CAPACITY_16: 2462 - if (!passthrough) 2463 - cmd->execute_cmd = 2464 - target_emulate_readcapacity_16; 2465 - break; 2466 - default: 2467 - if (passthrough) 2468 - break; 2469 - 2470 - pr_err("Unsupported SA: 0x%02x\n", 2471 - cmd->t_task_cdb[1] & 0x1f); 2472 - goto out_invalid_cdb_field; 2473 - } 2474 - /*FALLTHROUGH*/ 2475 - case ACCESS_CONTROL_IN: 2476 - case ACCESS_CONTROL_OUT: 2477 - size = (cdb[10] << 24) | (cdb[11] << 16) | 2478 - (cdb[12] << 8) | cdb[13]; 2479 - break; 2480 - /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ 2481 - #if 0 2482 - case GPCMD_READ_CD: 2483 - sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 2484 - size = (2336 * sectors); 2485 - break; 2486 - #endif 2487 - case READ_TOC: 2488 - size = cdb[8]; 2489 - break; 2490 - case READ_ELEMENT_STATUS: 2491 - size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; 2492 - break; 2493 - case SYNCHRONIZE_CACHE: 2494 - case SYNCHRONIZE_CACHE_16: 2495 - /* 2496 - * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE 2497 - */ 2498 - if (cdb[0] == SYNCHRONIZE_CACHE) { 2499 - sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 2500 - cmd->t_task_lba = transport_lba_32(cdb); 2501 - } else { 2502 - sectors = transport_get_sectors_16(cdb, cmd, &sector_ret); 2503 - cmd->t_task_lba = transport_lba_64(cdb); 2504 - } 2505 - if (sector_ret) 2506 - goto out_unsupported_cdb; 2507 - 2508 - size = transport_get_size(sectors, cdb, cmd); 2509 - 2510 - if (passthrough) 2511 - break; 2512 - 2513 - /* 2514 - * Check to ensure that LBA + Range does not exceed past end of 2515 - * device for IBLOCK and FILEIO ->do_sync_cache() backend calls 2516 - */ 2517 - if ((cmd->t_task_lba != 0) || (sectors != 0)) { 2518 - if (transport_cmd_get_valid_sectors(cmd) < 0) 2519 - goto out_invalid_cdb_field; 2520 - } 2521 - cmd->execute_cmd = target_emulate_synchronize_cache; 2522 - break; 2523 - case UNMAP: 2524 - size = get_unaligned_be16(&cdb[7]); 2525 - if (!passthrough) 2526 - cmd->execute_cmd = target_emulate_unmap; 2527 - break; 2528 - case WRITE_SAME_16: 2529 - sectors = transport_get_sectors_16(cdb, cmd, &sector_ret); 2530 - if (sector_ret) 2531 - goto out_unsupported_cdb; 2532 - 2533 - if (sectors) 2534 - size = transport_get_size(1, cdb, cmd); 2535 - else { 2536 - pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 2537 - goto out_invalid_cdb_field; 2538 - } 2539 - 2540 - cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 2541 - 2542 - if (target_check_write_same_discard(&cdb[1], dev) < 0) 2543 - goto out_unsupported_cdb; 2544 - if (!passthrough) 2545 - cmd->execute_cmd = target_emulate_write_same; 2546 - break; 2547 - case WRITE_SAME: 2548 - sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 2549 - if (sector_ret) 2550 - goto out_unsupported_cdb; 2551 - 2552 - if (sectors) 2553 - size = transport_get_size(1, cdb, cmd); 2554 - else { 2555 - pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 2556 - goto out_invalid_cdb_field; 2557 - } 2558 - 2559 - cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 2560 - /* 2561 - * Follow sbcr26 with WRITE_SAME (10) and check for the existence 2562 - * of byte 1 bit 3 UNMAP instead of original reserved field 2563 - */ 2564 - if (target_check_write_same_discard(&cdb[1], dev) < 0) 2565 - goto out_unsupported_cdb; 2566 - if (!passthrough) 2567 - cmd->execute_cmd = target_emulate_write_same; 2568 - break; 2569 - case ALLOW_MEDIUM_REMOVAL: 2570 - case ERASE: 2571 - case REZERO_UNIT: 2572 - case SEEK_10: 2573 - case SPACE: 2574 - case START_STOP: 2575 - case VERIFY: 2576 - case WRITE_FILEMARKS: 2577 - if (!passthrough) 2578 - cmd->execute_cmd = target_emulate_noop; 2579 - break; 2580 - case GPCMD_CLOSE_TRACK: 2581 - case INITIALIZE_ELEMENT_STATUS: 2582 - case GPCMD_LOAD_UNLOAD: 2583 - case GPCMD_SET_SPEED: 2584 - case MOVE_MEDIUM: 2585 - break; 2586 - case GET_EVENT_STATUS_NOTIFICATION: 2587 - size = (cdb[7] << 8) | cdb[8]; 2588 - break; 2589 - case ATA_16: 2590 - /* Only support ATA passthrough to pSCSI backends.. */ 2591 - if (!passthrough) 2592 - goto out_unsupported_cdb; 2593 - 2594 - /* T_LENGTH */ 2595 - switch (cdb[2] & 0x3) { 2596 - case 0x0: 2597 - sectors = 0; 2598 - break; 2599 - case 0x1: 2600 - sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4]; 2601 - break; 2602 - case 0x2: 2603 - sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6]; 2604 - break; 2605 - case 0x3: 2606 - pr_err("T_LENGTH=0x3 not supported for ATA_16\n"); 2607 - goto out_invalid_cdb_field; 2608 - } 2609 - 2610 - /* BYTE_BLOCK */ 2611 - if (cdb[2] & 0x4) { 2612 - /* BLOCK T_TYPE: 512 or sector */ 2613 - size = sectors * ((cdb[2] & 0x10) ? 2614 - dev->se_sub_dev->se_dev_attrib.block_size : 512); 2615 - } else { 2616 - /* BYTE */ 2617 - size = sectors; 2618 - } 2619 - break; 2620 - default: 2621 - ret = spc_parse_cdb(cmd, &size, passthrough); 2622 - if (ret) 2623 - return ret; 2624 - } 2625 - 2626 - ret = target_cmd_size_check(cmd, size); 2627 - if (ret) 2628 - return ret; 2629 - 2630 - if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 2631 - if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { 2632 - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 2633 - " big sectors %u exceeds fabric_max_sectors:" 2634 - " %u\n", cdb[0], sectors, 2635 - su_dev->se_dev_attrib.fabric_max_sectors); 2636 - goto out_invalid_cdb_field; 2637 - } 2638 - if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { 2639 - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 2640 - " big sectors %u exceeds backend hw_max_sectors:" 2641 - " %u\n", cdb[0], sectors, 2642 - su_dev->se_dev_attrib.hw_max_sectors); 2643 - goto out_invalid_cdb_field; 2644 - } 2645 - } 2646 - 2647 - /* reject any command that we don't have a handler for */ 2648 - if (!(passthrough || cmd->execute_cmd || 2649 - (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))) 2650 - goto out_unsupported_cdb; 2651 - 2652 - return 0; 2653 - 2654 - out_unsupported_cdb: 2655 - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2656 - cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 2657 - return -EINVAL; 2658 - out_invalid_cdb_field: 2659 - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2660 - cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 2661 - return -EINVAL; 2662 2440 } 2663 2441 2664 2442 /*
+5
include/target/target_core_backend.h
··· 24 24 struct se_subsystem_dev *, void *); 25 25 void (*free_device)(void *); 26 26 int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *); 27 + 28 + int (*parse_cdb)(struct se_cmd *cmd, unsigned int *size); 27 29 int (*execute_cmd)(struct se_cmd *, struct scatterlist *, u32, 28 30 enum dma_data_direction); 29 31 int (*do_discard)(struct se_device *, sector_t, u32); ··· 50 48 void *, struct se_dev_limits *, const char *, const char *); 51 49 52 50 void target_complete_cmd(struct se_cmd *, u8); 51 + 52 + int sbc_parse_cdb(struct se_cmd *cmd, unsigned int *size); 53 + int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size, bool passthrough); 53 54 54 55 void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); 55 56 int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);