Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target updates from Nicholas Bellinger:
"Lots of activity again this round for I/O performance optimizations
(per-cpu IDA pre-allocation for vhost + iscsi/target), and the
addition of new fabric independent features to target-core
(COMPARE_AND_WRITE + EXTENDED_COPY).

The main highlights include:

- Support for iscsi-target login multiplexing across individual
network portals
- Generic Per-cpu IDA logic (kent + akpm + clameter)
- Conversion of vhost to use per-cpu IDA pre-allocation for
descriptors, SGLs and userspace page pointer list
- Conversion of iscsi-target + iser-target to use per-cpu IDA
pre-allocation for descriptors
- Add support for generic COMPARE_AND_WRITE (AtomicTestandSet)
emulation for virtual backend drivers
- Add support for generic EXTENDED_COPY (CopyOffload) emulation for
virtual backend drivers.
- Add support for fast memory registration mode to iser-target (Vu)

The patches to add COMPARE_AND_WRITE and EXTENDED_COPY support are of
particular significance, which make us the first and only open source
target to support the full set of VAAI primitives.

Currently Linux clients are lacking upstream support to actually
utilize these primitives. However, with server side support now in
place for folks like MKP + ZAB working on the client, this logic once
reserved for the highest end of storage arrays, can now be run in VMs
on their laptops"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (50 commits)
target/iscsi: Bump versions to v4.1.0
target: Update copyright ownership/year information to 2013
iscsi-target: Bump default TCP listen backlog to 256
target: Fix >= v3.9+ regression in PR APTPL + ALUA metadata write-out
iscsi-target; Bump default CmdSN Depth to 64
iscsi-target: Remove unnecessary wait_for_completion in iscsi_get_thread_set
iscsi-target: Add thread_set->ts_activate_sem + use common deallocate
iscsi-target: Fix race with thread_pre_handler flush_signals + ISCSI_THREAD_SET_DIE
target: remove unused including <linux/version.h>
iser-target: introduce fast memory registration mode (FRWR)
iser-target: generalize rdma memory registration and cleanup
iser-target: move rdma wr processing to a shared function
target: Enable global EXTENDED_COPY setup/release
target: Add Third Party Copy (3PC) bit in INQUIRY response
target: Enable EXTENDED_COPY setup in spc_parse_cdb
target: Add support for EXTENDED_COPY copy offload emulation
target: Avoid non-existent tg_pt_gp_mem in target_alua_state_check
target: Add global device list for EXTENDED_COPY
target: Make helpers non static for EXTENDED_COPY command setup
target: Make spc_parse_naa_6h_vendor_specific non static
...

+3498 -787
+531 -236
drivers/infiniband/ulp/isert/ib_isert.c
··· 1 1 /******************************************************************************* 2 2 * This file contains iSCSI extentions for RDMA (iSER) Verbs 3 3 * 4 - * (c) Copyright 2013 RisingTide Systems LLC. 4 + * (c) Copyright 2013 Datera, Inc. 5 5 * 6 6 * Nicholas A. Bellinger <nab@linux-iscsi.org> 7 7 * ··· 39 39 static LIST_HEAD(device_list); 40 40 static struct workqueue_struct *isert_rx_wq; 41 41 static struct workqueue_struct *isert_comp_wq; 42 - static struct kmem_cache *isert_cmd_cache; 42 + 43 + static void 44 + isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 45 + static int 46 + isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 47 + struct isert_rdma_wr *wr); 48 + static void 49 + isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 50 + static int 51 + isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 52 + struct isert_rdma_wr *wr); 43 53 44 54 static void 45 55 isert_qp_event_callback(struct ib_event *e, void *context) ··· 90 80 { 91 81 struct isert_device *device = isert_conn->conn_device; 92 82 struct ib_qp_init_attr attr; 93 - struct ib_device_attr devattr; 94 83 int ret, index, min_index = 0; 95 - 96 - memset(&devattr, 0, sizeof(struct ib_device_attr)); 97 - ret = isert_query_device(cma_id->device, &devattr); 98 - if (ret) 99 - return ret; 100 84 101 85 mutex_lock(&device_list_mutex); 102 86 for (index = 0; index < device->cqs_used; index++) ··· 112 108 * FIXME: Use devattr.max_sge - 2 for max_send_sge as 113 109 * work-around for RDMA_READ.. 114 110 */ 115 - attr.cap.max_send_sge = devattr.max_sge - 2; 111 + attr.cap.max_send_sge = device->dev_attr.max_sge - 2; 116 112 isert_conn->max_sge = attr.cap.max_send_sge; 117 113 118 114 attr.cap.max_recv_sge = 1; ··· 214 210 { 215 211 struct ib_device *ib_dev = device->ib_device; 216 212 struct isert_cq_desc *cq_desc; 213 + struct ib_device_attr *dev_attr; 217 214 int ret = 0, i, j; 215 + 216 + dev_attr = &device->dev_attr; 217 + ret = isert_query_device(ib_dev, dev_attr); 218 + if (ret) 219 + return ret; 220 + 221 + /* asign function handlers */ 222 + if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { 223 + device->use_frwr = 1; 224 + device->reg_rdma_mem = isert_reg_rdma_frwr; 225 + device->unreg_rdma_mem = isert_unreg_rdma_frwr; 226 + } else { 227 + device->use_frwr = 0; 228 + device->reg_rdma_mem = isert_map_rdma; 229 + device->unreg_rdma_mem = isert_unmap_cmd; 230 + } 218 231 219 232 device->cqs_used = min_t(int, num_online_cpus(), 220 233 device->ib_device->num_comp_vectors); 221 234 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); 222 - pr_debug("Using %d CQs, device %s supports %d vectors\n", 235 + pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n", 223 236 device->cqs_used, device->ib_device->name, 224 - device->ib_device->num_comp_vectors); 237 + device->ib_device->num_comp_vectors, device->use_frwr); 225 238 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * 226 239 device->cqs_used, GFP_KERNEL); 227 240 if (!device->cq_desc) { ··· 384 363 return device; 385 364 } 386 365 366 + static void 367 + isert_conn_free_frwr_pool(struct isert_conn *isert_conn) 368 + { 369 + struct fast_reg_descriptor *fr_desc, *tmp; 370 + int i = 0; 371 + 372 + if (list_empty(&isert_conn->conn_frwr_pool)) 373 + return; 374 + 375 + pr_debug("Freeing conn %p frwr pool", isert_conn); 376 + 377 + list_for_each_entry_safe(fr_desc, tmp, 378 + &isert_conn->conn_frwr_pool, list) { 379 + list_del(&fr_desc->list); 380 + ib_free_fast_reg_page_list(fr_desc->data_frpl); 381 + ib_dereg_mr(fr_desc->data_mr); 382 + kfree(fr_desc); 383 + ++i; 384 + } 385 + 386 + if (i < isert_conn->conn_frwr_pool_size) 387 + pr_warn("Pool still has %d regions registered\n", 388 + isert_conn->conn_frwr_pool_size - i); 389 + } 390 + 391 + static int 392 + isert_conn_create_frwr_pool(struct isert_conn *isert_conn) 393 + { 394 + struct fast_reg_descriptor *fr_desc; 395 + struct isert_device *device = isert_conn->conn_device; 396 + int i, ret; 397 + 398 + INIT_LIST_HEAD(&isert_conn->conn_frwr_pool); 399 + isert_conn->conn_frwr_pool_size = 0; 400 + for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) { 401 + fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); 402 + if (!fr_desc) { 403 + pr_err("Failed to allocate fast_reg descriptor\n"); 404 + ret = -ENOMEM; 405 + goto err; 406 + } 407 + 408 + fr_desc->data_frpl = 409 + ib_alloc_fast_reg_page_list(device->ib_device, 410 + ISCSI_ISER_SG_TABLESIZE); 411 + if (IS_ERR(fr_desc->data_frpl)) { 412 + pr_err("Failed to allocate fr_pg_list err=%ld\n", 413 + PTR_ERR(fr_desc->data_frpl)); 414 + ret = PTR_ERR(fr_desc->data_frpl); 415 + goto err; 416 + } 417 + 418 + fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd, 419 + ISCSI_ISER_SG_TABLESIZE); 420 + if (IS_ERR(fr_desc->data_mr)) { 421 + pr_err("Failed to allocate frmr err=%ld\n", 422 + PTR_ERR(fr_desc->data_mr)); 423 + ret = PTR_ERR(fr_desc->data_mr); 424 + ib_free_fast_reg_page_list(fr_desc->data_frpl); 425 + goto err; 426 + } 427 + pr_debug("Create fr_desc %p page_list %p\n", 428 + fr_desc, fr_desc->data_frpl->page_list); 429 + 430 + fr_desc->valid = true; 431 + list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); 432 + isert_conn->conn_frwr_pool_size++; 433 + } 434 + 435 + pr_debug("Creating conn %p frwr pool size=%d", 436 + isert_conn, isert_conn->conn_frwr_pool_size); 437 + 438 + return 0; 439 + 440 + err: 441 + isert_conn_free_frwr_pool(isert_conn); 442 + return ret; 443 + } 444 + 387 445 static int 388 446 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 389 447 { ··· 489 389 kref_init(&isert_conn->conn_kref); 490 390 kref_get(&isert_conn->conn_kref); 491 391 mutex_init(&isert_conn->conn_mutex); 392 + spin_lock_init(&isert_conn->conn_lock); 492 393 493 394 cma_id->context = isert_conn; 494 395 isert_conn->conn_cm_id = cma_id; ··· 547 446 isert_conn->conn_pd = device->dev_pd; 548 447 isert_conn->conn_mr = device->dev_mr; 549 448 449 + if (device->use_frwr) { 450 + ret = isert_conn_create_frwr_pool(isert_conn); 451 + if (ret) { 452 + pr_err("Conn: %p failed to create frwr_pool\n", isert_conn); 453 + goto out_frwr; 454 + } 455 + } 456 + 550 457 ret = isert_conn_setup_qp(isert_conn, cma_id); 551 458 if (ret) 552 459 goto out_conn_dev; ··· 568 459 return 0; 569 460 570 461 out_conn_dev: 462 + if (device->use_frwr) 463 + isert_conn_free_frwr_pool(isert_conn); 464 + out_frwr: 571 465 isert_device_try_release(device); 572 466 out_rsp_dma_map: 573 467 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, ··· 593 481 int cq_index; 594 482 595 483 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 484 + 485 + if (device->use_frwr) 486 + isert_conn_free_frwr_pool(isert_conn); 596 487 597 488 if (isert_conn->conn_qp) { 598 489 cq_index = ((struct isert_cq_desc *) ··· 984 869 size, rx_buflen, MAX_KEY_VALUE_PAIRS); 985 870 memcpy(login->req_buf, &rx_desc->data[0], size); 986 871 987 - complete(&isert_conn->conn_login_comp); 988 - } 989 - 990 - static void 991 - isert_release_cmd(struct iscsi_cmd *cmd) 992 - { 993 - struct isert_cmd *isert_cmd = container_of(cmd, struct isert_cmd, 994 - iscsi_cmd); 995 - 996 - pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd); 997 - 998 - kfree(cmd->buf_ptr); 999 - kfree(cmd->tmr_req); 1000 - 1001 - kmem_cache_free(isert_cmd_cache, isert_cmd); 872 + if (login->first_request) { 873 + complete(&isert_conn->conn_login_comp); 874 + return; 875 + } 876 + schedule_delayed_work(&conn->login_work, 0); 1002 877 } 1003 878 1004 879 static struct iscsi_cmd 1005 - *isert_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp) 880 + *isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp) 1006 881 { 1007 882 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1008 883 struct isert_cmd *isert_cmd; 884 + struct iscsi_cmd *cmd; 1009 885 1010 - isert_cmd = kmem_cache_zalloc(isert_cmd_cache, gfp); 1011 - if (!isert_cmd) { 1012 - pr_err("Unable to allocate isert_cmd\n"); 886 + cmd = iscsit_allocate_cmd(conn, gfp); 887 + if (!cmd) { 888 + pr_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1013 889 return NULL; 1014 890 } 891 + isert_cmd = iscsit_priv_cmd(cmd); 1015 892 isert_cmd->conn = isert_conn; 1016 - isert_cmd->iscsi_cmd.release_cmd = &isert_release_cmd; 893 + isert_cmd->iscsi_cmd = cmd; 1017 894 1018 - return &isert_cmd->iscsi_cmd; 895 + return cmd; 1019 896 } 1020 897 1021 898 static int 1022 899 isert_handle_scsi_cmd(struct isert_conn *isert_conn, 1023 - struct isert_cmd *isert_cmd, struct iser_rx_desc *rx_desc, 1024 - unsigned char *buf) 900 + struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd, 901 + struct iser_rx_desc *rx_desc, unsigned char *buf) 1025 902 { 1026 - struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1027 903 struct iscsi_conn *conn = isert_conn->conn; 1028 904 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1029 905 struct scatterlist *sg; ··· 1121 1015 1122 1016 static int 1123 1017 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1124 - struct iser_rx_desc *rx_desc, unsigned char *buf) 1018 + struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1019 + unsigned char *buf) 1125 1020 { 1126 - struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1127 1021 struct iscsi_conn *conn = isert_conn->conn; 1128 1022 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; 1129 1023 int rc; ··· 1140 1034 1141 1035 static int 1142 1036 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1143 - struct iser_rx_desc *rx_desc, struct iscsi_text *hdr) 1037 + struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1038 + struct iscsi_text *hdr) 1144 1039 { 1145 - struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1146 1040 struct iscsi_conn *conn = isert_conn->conn; 1147 1041 u32 payload_length = ntoh24(hdr->dlength); 1148 1042 int rc; ··· 1187 1081 1188 1082 switch (opcode) { 1189 1083 case ISCSI_OP_SCSI_CMD: 1190 - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1084 + cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1191 1085 if (!cmd) 1192 1086 break; 1193 1087 1194 - isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd); 1088 + isert_cmd = iscsit_priv_cmd(cmd); 1195 1089 isert_cmd->read_stag = read_stag; 1196 1090 isert_cmd->read_va = read_va; 1197 1091 isert_cmd->write_stag = write_stag; 1198 1092 isert_cmd->write_va = write_va; 1199 1093 1200 - ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, 1094 + ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd, 1201 1095 rx_desc, (unsigned char *)hdr); 1202 1096 break; 1203 1097 case ISCSI_OP_NOOP_OUT: 1204 - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1098 + cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1205 1099 if (!cmd) 1206 1100 break; 1207 1101 1208 - isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd); 1209 - ret = isert_handle_nop_out(isert_conn, isert_cmd, 1102 + isert_cmd = iscsit_priv_cmd(cmd); 1103 + ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd, 1210 1104 rx_desc, (unsigned char *)hdr); 1211 1105 break; 1212 1106 case ISCSI_OP_SCSI_DATA_OUT: ··· 1214 1108 (unsigned char *)hdr); 1215 1109 break; 1216 1110 case ISCSI_OP_SCSI_TMFUNC: 1217 - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1111 + cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1218 1112 if (!cmd) 1219 1113 break; 1220 1114 ··· 1222 1116 (unsigned char *)hdr); 1223 1117 break; 1224 1118 case ISCSI_OP_LOGOUT: 1225 - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1119 + cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1226 1120 if (!cmd) 1227 1121 break; 1228 1122 ··· 1233 1127 HZ); 1234 1128 break; 1235 1129 case ISCSI_OP_TEXT: 1236 - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1130 + cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1237 1131 if (!cmd) 1238 1132 break; 1239 1133 1240 - isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd); 1241 - ret = isert_handle_text_cmd(isert_conn, isert_cmd, 1134 + isert_cmd = iscsit_priv_cmd(cmd); 1135 + ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, 1242 1136 rx_desc, (struct iscsi_text *)hdr); 1243 1137 break; 1244 1138 default: ··· 1349 1243 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1350 1244 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1351 1245 1352 - pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n"); 1353 - 1246 + pr_debug("isert_unmap_cmd: %p\n", isert_cmd); 1354 1247 if (wr->sge) { 1355 - ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE); 1248 + pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd); 1249 + ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, 1250 + (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 1251 + DMA_TO_DEVICE : DMA_FROM_DEVICE); 1356 1252 wr->sge = NULL; 1357 1253 } 1358 1254 1359 - kfree(wr->send_wr); 1360 - wr->send_wr = NULL; 1255 + if (wr->send_wr) { 1256 + pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd); 1257 + kfree(wr->send_wr); 1258 + wr->send_wr = NULL; 1259 + } 1361 1260 1362 - kfree(isert_cmd->ib_sge); 1363 - isert_cmd->ib_sge = NULL; 1261 + if (wr->ib_sge) { 1262 + pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd); 1263 + kfree(wr->ib_sge); 1264 + wr->ib_sge = NULL; 1265 + } 1266 + } 1267 + 1268 + static void 1269 + isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1270 + { 1271 + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1272 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1273 + LIST_HEAD(unmap_list); 1274 + 1275 + pr_debug("unreg_frwr_cmd: %p\n", isert_cmd); 1276 + 1277 + if (wr->fr_desc) { 1278 + pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n", 1279 + isert_cmd, wr->fr_desc); 1280 + spin_lock_bh(&isert_conn->conn_lock); 1281 + list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool); 1282 + spin_unlock_bh(&isert_conn->conn_lock); 1283 + wr->fr_desc = NULL; 1284 + } 1285 + 1286 + if (wr->sge) { 1287 + pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd); 1288 + ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, 1289 + (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 1290 + DMA_TO_DEVICE : DMA_FROM_DEVICE); 1291 + wr->sge = NULL; 1292 + } 1293 + 1294 + wr->ib_sge = NULL; 1295 + wr->send_wr = NULL; 1364 1296 } 1365 1297 1366 1298 static void 1367 1299 isert_put_cmd(struct isert_cmd *isert_cmd) 1368 1300 { 1369 - struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1301 + struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1370 1302 struct isert_conn *isert_conn = isert_cmd->conn; 1371 1303 struct iscsi_conn *conn = isert_conn->conn; 1304 + struct isert_device *device = isert_conn->conn_device; 1372 1305 1373 1306 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd); 1374 1307 ··· 1421 1276 if (cmd->data_direction == DMA_TO_DEVICE) 1422 1277 iscsit_stop_dataout_timer(cmd); 1423 1278 1424 - isert_unmap_cmd(isert_cmd, isert_conn); 1279 + device->unreg_rdma_mem(isert_cmd, isert_conn); 1425 1280 transport_generic_free_cmd(&cmd->se_cmd, 0); 1426 1281 break; 1427 1282 case ISCSI_OP_SCSI_TMFUNC: ··· 1456 1311 * Fall-through 1457 1312 */ 1458 1313 default: 1459 - isert_release_cmd(cmd); 1314 + iscsit_release_cmd(cmd); 1460 1315 break; 1461 1316 } 1462 1317 } ··· 1492 1347 struct isert_cmd *isert_cmd) 1493 1348 { 1494 1349 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1495 - struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1350 + struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1496 1351 struct se_cmd *se_cmd = &cmd->se_cmd; 1497 - struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device; 1352 + struct isert_conn *isert_conn = isert_cmd->conn; 1353 + struct isert_device *device = isert_conn->conn_device; 1498 1354 1499 1355 iscsit_stop_dataout_timer(cmd); 1356 + device->unreg_rdma_mem(isert_cmd, isert_conn); 1357 + cmd->write_data_done = wr->cur_rdma_length; 1500 1358 1501 - if (wr->sge) { 1502 - pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n"); 1503 - ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE); 1504 - wr->sge = NULL; 1505 - } 1506 - 1507 - if (isert_cmd->ib_sge) { 1508 - pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n"); 1509 - kfree(isert_cmd->ib_sge); 1510 - isert_cmd->ib_sge = NULL; 1511 - } 1512 - 1513 - cmd->write_data_done = se_cmd->data_length; 1514 - 1515 - pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n"); 1359 + pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1516 1360 spin_lock_bh(&cmd->istate_lock); 1517 1361 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1518 1362 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; ··· 1517 1383 struct isert_cmd, comp_work); 1518 1384 struct isert_conn *isert_conn = isert_cmd->conn; 1519 1385 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1520 - struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1386 + struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1521 1387 1522 1388 switch (cmd->i_state) { 1523 1389 case ISTATE_SEND_TASKMGTRSP: ··· 1563 1429 struct isert_conn *isert_conn, 1564 1430 struct ib_device *ib_dev) 1565 1431 { 1566 - struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1432 + struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1567 1433 1568 1434 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || 1569 1435 cmd->i_state == ISTATE_SEND_LOGOUTRSP || ··· 1755 1621 static int 1756 1622 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1757 1623 { 1758 - struct isert_cmd *isert_cmd = container_of(cmd, 1759 - struct isert_cmd, iscsi_cmd); 1624 + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1760 1625 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1761 1626 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1762 1627 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) ··· 1804 1671 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 1805 1672 bool nopout_response) 1806 1673 { 1807 - struct isert_cmd *isert_cmd = container_of(cmd, 1808 - struct isert_cmd, iscsi_cmd); 1674 + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1809 1675 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1810 1676 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1811 1677 ··· 1823 1691 static int 1824 1692 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1825 1693 { 1826 - struct isert_cmd *isert_cmd = container_of(cmd, 1827 - struct isert_cmd, iscsi_cmd); 1694 + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1828 1695 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1829 1696 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1830 1697 ··· 1841 1710 static int 1842 1711 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1843 1712 { 1844 - struct isert_cmd *isert_cmd = container_of(cmd, 1845 - struct isert_cmd, iscsi_cmd); 1713 + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1846 1714 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1847 1715 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1848 1716 ··· 1859 1729 static int 1860 1730 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1861 1731 { 1862 - struct isert_cmd *isert_cmd = container_of(cmd, 1863 - struct isert_cmd, iscsi_cmd); 1732 + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1864 1733 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1865 1734 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1866 1735 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; ··· 1891 1762 static int 1892 1763 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1893 1764 { 1894 - struct isert_cmd *isert_cmd = container_of(cmd, 1895 - struct isert_cmd, iscsi_cmd); 1765 + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1896 1766 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1897 1767 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1898 1768 struct iscsi_text_rsp *hdr = ··· 1933 1805 struct ib_sge *ib_sge, struct ib_send_wr *send_wr, 1934 1806 u32 data_left, u32 offset) 1935 1807 { 1936 - struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1808 + struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1937 1809 struct scatterlist *sg_start, *tmp_sg; 1938 1810 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1939 1811 u32 sg_off, page_off; ··· 1960 1832 ib_sg_dma_len(ib_dev, tmp_sg) - page_off); 1961 1833 ib_sge->lkey = isert_conn->conn_mr->lkey; 1962 1834 1963 - pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u\n", 1964 - ib_sge->addr, ib_sge->length); 1835 + pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", 1836 + ib_sge->addr, ib_sge->length, ib_sge->lkey); 1965 1837 page_off = 0; 1966 1838 data_left -= ib_sge->length; 1967 1839 ib_sge++; ··· 1975 1847 } 1976 1848 1977 1849 static int 1978 - isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1850 + isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 1851 + struct isert_rdma_wr *wr) 1979 1852 { 1980 1853 struct se_cmd *se_cmd = &cmd->se_cmd; 1981 - struct isert_cmd *isert_cmd = container_of(cmd, 1982 - struct isert_cmd, iscsi_cmd); 1983 - struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1854 + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1984 1855 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1985 - struct ib_send_wr *wr_failed, *send_wr; 1986 1856 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1857 + struct ib_send_wr *send_wr; 1987 1858 struct ib_sge *ib_sge; 1988 - struct scatterlist *sg; 1989 - u32 offset = 0, data_len, data_left, rdma_write_max; 1990 - int rc, ret = 0, count, sg_nents, i, ib_sge_cnt; 1859 + struct scatterlist *sg_start; 1860 + u32 sg_off = 0, sg_nents; 1861 + u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0; 1862 + int ret = 0, count, i, ib_sge_cnt; 1991 1863 1992 - pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length); 1864 + if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 1865 + data_left = se_cmd->data_length; 1866 + iscsit_increment_maxcmdsn(cmd, conn->sess); 1867 + cmd->stat_sn = conn->stat_sn++; 1868 + } else { 1869 + sg_off = cmd->write_data_done / PAGE_SIZE; 1870 + data_left = se_cmd->data_length - cmd->write_data_done; 1871 + offset = cmd->write_data_done; 1872 + isert_cmd->tx_desc.isert_cmd = isert_cmd; 1873 + } 1993 1874 1994 - sg = &se_cmd->t_data_sg[0]; 1995 - sg_nents = se_cmd->t_data_nents; 1875 + sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1876 + sg_nents = se_cmd->t_data_nents - sg_off; 1996 1877 1997 - count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE); 1878 + count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, 1879 + (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 1880 + DMA_TO_DEVICE : DMA_FROM_DEVICE); 1998 1881 if (unlikely(!count)) { 1999 - pr_err("Unable to map put_datain SGs\n"); 1882 + pr_err("Cmd: %p unrable to map SGs\n", isert_cmd); 2000 1883 return -EINVAL; 2001 1884 } 2002 - wr->sge = sg; 1885 + wr->sge = sg_start; 2003 1886 wr->num_sge = sg_nents; 2004 - pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n", 2005 - count, sg, sg_nents); 1887 + wr->cur_rdma_length = data_left; 1888 + pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", 1889 + isert_cmd, count, sg_start, sg_nents, data_left); 2006 1890 2007 1891 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); 2008 1892 if (!ib_sge) { 2009 - pr_warn("Unable to allocate datain ib_sge\n"); 1893 + pr_warn("Unable to allocate ib_sge\n"); 2010 1894 ret = -ENOMEM; 2011 1895 goto unmap_sg; 2012 1896 } 2013 - isert_cmd->ib_sge = ib_sge; 2014 - 2015 - pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n", 2016 - ib_sge, se_cmd->t_data_nents); 1897 + wr->ib_sge = ib_sge; 2017 1898 2018 1899 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); 2019 1900 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 2020 1901 GFP_KERNEL); 2021 1902 if (!wr->send_wr) { 2022 - pr_err("Unable to allocate wr->send_wr\n"); 1903 + pr_debug("Unable to allocate wr->send_wr\n"); 2023 1904 ret = -ENOMEM; 2024 1905 goto unmap_sg; 2025 1906 } 2026 - pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n", 2027 - wr->send_wr, wr->send_wr_num); 2028 - 2029 - iscsit_increment_maxcmdsn(cmd, conn->sess); 2030 - cmd->stat_sn = conn->stat_sn++; 2031 1907 2032 1908 wr->isert_cmd = isert_cmd; 2033 1909 rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 2034 - data_left = se_cmd->data_length; 2035 1910 2036 1911 for (i = 0; i < wr->send_wr_num; i++) { 2037 1912 send_wr = &isert_cmd->rdma_wr.send_wr[i]; 2038 1913 data_len = min(data_left, rdma_write_max); 2039 1914 2040 - send_wr->opcode = IB_WR_RDMA_WRITE; 2041 1915 send_wr->send_flags = 0; 2042 - send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset; 2043 - send_wr->wr.rdma.rkey = isert_cmd->read_stag; 1916 + if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 1917 + send_wr->opcode = IB_WR_RDMA_WRITE; 1918 + send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset; 1919 + send_wr->wr.rdma.rkey = isert_cmd->read_stag; 1920 + if (i + 1 == wr->send_wr_num) 1921 + send_wr->next = &isert_cmd->tx_desc.send_wr; 1922 + else 1923 + send_wr->next = &wr->send_wr[i + 1]; 1924 + } else { 1925 + send_wr->opcode = IB_WR_RDMA_READ; 1926 + send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset; 1927 + send_wr->wr.rdma.rkey = isert_cmd->write_stag; 1928 + if (i + 1 == wr->send_wr_num) 1929 + send_wr->send_flags = IB_SEND_SIGNALED; 1930 + else 1931 + send_wr->next = &wr->send_wr[i + 1]; 1932 + } 2044 1933 2045 1934 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, 2046 1935 send_wr, data_len, offset); 2047 1936 ib_sge += ib_sge_cnt; 2048 1937 2049 - if (i + 1 == wr->send_wr_num) 2050 - send_wr->next = &isert_cmd->tx_desc.send_wr; 2051 - else 2052 - send_wr->next = &wr->send_wr[i + 1]; 2053 - 2054 1938 offset += data_len; 1939 + va_offset += data_len; 2055 1940 data_left -= data_len; 2056 1941 } 1942 + 1943 + return 0; 1944 + unmap_sg: 1945 + ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, 1946 + (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 1947 + DMA_TO_DEVICE : DMA_FROM_DEVICE); 1948 + return ret; 1949 + } 1950 + 1951 + static int 1952 + isert_map_fr_pagelist(struct ib_device *ib_dev, 1953 + struct scatterlist *sg_start, int sg_nents, u64 *fr_pl) 1954 + { 1955 + u64 start_addr, end_addr, page, chunk_start = 0; 1956 + struct scatterlist *tmp_sg; 1957 + int i = 0, new_chunk, last_ent, n_pages; 1958 + 1959 + n_pages = 0; 1960 + new_chunk = 1; 1961 + last_ent = sg_nents - 1; 1962 + for_each_sg(sg_start, tmp_sg, sg_nents, i) { 1963 + start_addr = ib_sg_dma_address(ib_dev, tmp_sg); 1964 + if (new_chunk) 1965 + chunk_start = start_addr; 1966 + end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg); 1967 + 1968 + pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n", 1969 + i, (unsigned long long)tmp_sg->dma_address, 1970 + tmp_sg->length); 1971 + 1972 + if ((end_addr & ~PAGE_MASK) && i < last_ent) { 1973 + new_chunk = 0; 1974 + continue; 1975 + } 1976 + new_chunk = 1; 1977 + 1978 + page = chunk_start & PAGE_MASK; 1979 + do { 1980 + fr_pl[n_pages++] = page; 1981 + pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n", 1982 + n_pages - 1, page); 1983 + page += PAGE_SIZE; 1984 + } while (page < end_addr); 1985 + } 1986 + 1987 + return n_pages; 1988 + } 1989 + 1990 + static int 1991 + isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, 1992 + struct isert_cmd *isert_cmd, struct isert_conn *isert_conn, 1993 + struct ib_sge *ib_sge, u32 offset, unsigned int data_len) 1994 + { 1995 + struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1996 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1997 + struct scatterlist *sg_start; 1998 + u32 sg_off, page_off; 1999 + struct ib_send_wr fr_wr, inv_wr; 2000 + struct ib_send_wr *bad_wr, *wr = NULL; 2001 + u8 key; 2002 + int ret, sg_nents, pagelist_len; 2003 + 2004 + sg_off = offset / PAGE_SIZE; 2005 + sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 2006 + sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off, 2007 + ISCSI_ISER_SG_TABLESIZE); 2008 + page_off = offset % PAGE_SIZE; 2009 + 2010 + pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n", 2011 + isert_cmd, fr_desc, sg_nents, sg_off, offset); 2012 + 2013 + pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents, 2014 + &fr_desc->data_frpl->page_list[0]); 2015 + 2016 + if (!fr_desc->valid) { 2017 + memset(&inv_wr, 0, sizeof(inv_wr)); 2018 + inv_wr.opcode = IB_WR_LOCAL_INV; 2019 + inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; 2020 + wr = &inv_wr; 2021 + /* Bump the key */ 2022 + key = (u8)(fr_desc->data_mr->rkey & 0x000000FF); 2023 + ib_update_fast_reg_key(fr_desc->data_mr, ++key); 2024 + } 2025 + 2026 + /* Prepare FASTREG WR */ 2027 + memset(&fr_wr, 0, sizeof(fr_wr)); 2028 + fr_wr.opcode = IB_WR_FAST_REG_MR; 2029 + fr_wr.wr.fast_reg.iova_start = 2030 + fr_desc->data_frpl->page_list[0] + page_off; 2031 + fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl; 2032 + fr_wr.wr.fast_reg.page_list_len = pagelist_len; 2033 + fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 2034 + fr_wr.wr.fast_reg.length = data_len; 2035 + fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey; 2036 + fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE; 2037 + 2038 + if (!wr) 2039 + wr = &fr_wr; 2040 + else 2041 + wr->next = &fr_wr; 2042 + 2043 + ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); 2044 + if (ret) { 2045 + pr_err("fast registration failed, ret:%d\n", ret); 2046 + return ret; 2047 + } 2048 + fr_desc->valid = false; 2049 + 2050 + ib_sge->lkey = fr_desc->data_mr->lkey; 2051 + ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off; 2052 + ib_sge->length = data_len; 2053 + 2054 + pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", 2055 + ib_sge->addr, ib_sge->length, ib_sge->lkey); 2056 + 2057 + return ret; 2058 + } 2059 + 2060 + static int 2061 + isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2062 + struct isert_rdma_wr *wr) 2063 + { 2064 + struct se_cmd *se_cmd = &cmd->se_cmd; 2065 + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2066 + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2067 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2068 + struct ib_send_wr *send_wr; 2069 + struct ib_sge *ib_sge; 2070 + struct scatterlist *sg_start; 2071 + struct fast_reg_descriptor *fr_desc; 2072 + u32 sg_off = 0, sg_nents; 2073 + u32 offset = 0, data_len, data_left, rdma_write_max; 2074 + int ret = 0, count; 2075 + unsigned long flags; 2076 + 2077 + if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2078 + data_left = se_cmd->data_length; 2079 + iscsit_increment_maxcmdsn(cmd, conn->sess); 2080 + cmd->stat_sn = conn->stat_sn++; 2081 + } else { 2082 + sg_off = cmd->write_data_done / PAGE_SIZE; 2083 + data_left = se_cmd->data_length - cmd->write_data_done; 2084 + offset = cmd->write_data_done; 2085 + isert_cmd->tx_desc.isert_cmd = isert_cmd; 2086 + } 2087 + 2088 + sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 2089 + sg_nents = se_cmd->t_data_nents - sg_off; 2090 + 2091 + count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, 2092 + (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 2093 + DMA_TO_DEVICE : DMA_FROM_DEVICE); 2094 + if (unlikely(!count)) { 2095 + pr_err("Cmd: %p unrable to map SGs\n", isert_cmd); 2096 + return -EINVAL; 2097 + } 2098 + wr->sge = sg_start; 2099 + wr->num_sge = sg_nents; 2100 + pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", 2101 + isert_cmd, count, sg_start, sg_nents, data_left); 2102 + 2103 + memset(&wr->s_ib_sge, 0, sizeof(*ib_sge)); 2104 + ib_sge = &wr->s_ib_sge; 2105 + wr->ib_sge = ib_sge; 2106 + 2107 + wr->send_wr_num = 1; 2108 + memset(&wr->s_send_wr, 0, sizeof(*send_wr)); 2109 + wr->send_wr = &wr->s_send_wr; 2110 + 2111 + wr->isert_cmd = isert_cmd; 2112 + rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE; 2113 + 2114 + send_wr = &isert_cmd->rdma_wr.s_send_wr; 2115 + send_wr->sg_list = ib_sge; 2116 + send_wr->num_sge = 1; 2117 + send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 2118 + if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2119 + send_wr->opcode = IB_WR_RDMA_WRITE; 2120 + send_wr->wr.rdma.remote_addr = isert_cmd->read_va; 2121 + send_wr->wr.rdma.rkey = isert_cmd->read_stag; 2122 + send_wr->send_flags = 0; 2123 + send_wr->next = &isert_cmd->tx_desc.send_wr; 2124 + } else { 2125 + send_wr->opcode = IB_WR_RDMA_READ; 2126 + send_wr->wr.rdma.remote_addr = isert_cmd->write_va; 2127 + send_wr->wr.rdma.rkey = isert_cmd->write_stag; 2128 + send_wr->send_flags = IB_SEND_SIGNALED; 2129 + } 2130 + 2131 + data_len = min(data_left, rdma_write_max); 2132 + wr->cur_rdma_length = data_len; 2133 + 2134 + spin_lock_irqsave(&isert_conn->conn_lock, flags); 2135 + fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, 2136 + struct fast_reg_descriptor, list); 2137 + list_del(&fr_desc->list); 2138 + spin_unlock_irqrestore(&isert_conn->conn_lock, flags); 2139 + wr->fr_desc = fr_desc; 2140 + 2141 + ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, 2142 + ib_sge, offset, data_len); 2143 + if (ret) { 2144 + list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); 2145 + goto unmap_sg; 2146 + } 2147 + 2148 + return 0; 2149 + 2150 + unmap_sg: 2151 + ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, 2152 + (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 2153 + DMA_TO_DEVICE : DMA_FROM_DEVICE); 2154 + return ret; 2155 + } 2156 + 2157 + static int 2158 + isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2159 + { 2160 + struct se_cmd *se_cmd = &cmd->se_cmd; 2161 + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2162 + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 2163 + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2164 + struct isert_device *device = isert_conn->conn_device; 2165 + struct ib_send_wr *wr_failed; 2166 + int rc; 2167 + 2168 + pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n", 2169 + isert_cmd, se_cmd->data_length); 2170 + wr->iser_ib_op = ISER_IB_RDMA_WRITE; 2171 + rc = device->reg_rdma_mem(conn, cmd, wr); 2172 + if (rc) { 2173 + pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); 2174 + return rc; 2175 + } 2176 + 2057 2177 /* 2058 2178 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2059 2179 */ ··· 2318 1942 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2319 1943 atomic_dec(&isert_conn->post_send_buf_count); 2320 1944 } 2321 - pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n"); 2322 - return 1; 1945 + pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", 1946 + isert_cmd); 2323 1947 2324 - unmap_sg: 2325 - ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE); 2326 - return ret; 1948 + return 1; 2327 1949 } 2328 1950 2329 1951 static int 2330 1952 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2331 1953 { 2332 1954 struct se_cmd *se_cmd = &cmd->se_cmd; 2333 - struct isert_cmd *isert_cmd = container_of(cmd, 2334 - struct isert_cmd, iscsi_cmd); 1955 + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2335 1956 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 2336 1957 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2337 - struct ib_send_wr *wr_failed, *send_wr; 2338 - struct ib_sge *ib_sge; 2339 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2340 - struct scatterlist *sg_start; 2341 - u32 sg_off, sg_nents, page_off, va_offset = 0; 2342 - u32 offset = 0, data_len, data_left, rdma_write_max; 2343 - int rc, ret = 0, count, i, ib_sge_cnt; 1958 + struct isert_device *device = isert_conn->conn_device; 1959 + struct ib_send_wr *wr_failed; 1960 + int rc; 2344 1961 2345 - pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n", 2346 - se_cmd->data_length, cmd->write_data_done); 2347 - 2348 - sg_off = cmd->write_data_done / PAGE_SIZE; 2349 - sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 2350 - page_off = cmd->write_data_done % PAGE_SIZE; 2351 - 2352 - pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n", 2353 - sg_off, sg_start, page_off); 2354 - 2355 - data_left = se_cmd->data_length - cmd->write_data_done; 2356 - sg_nents = se_cmd->t_data_nents - sg_off; 2357 - 2358 - pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n", 2359 - data_left, sg_nents); 2360 - 2361 - count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE); 2362 - if (unlikely(!count)) { 2363 - pr_err("Unable to map get_dataout SGs\n"); 2364 - return -EINVAL; 2365 - } 2366 - wr->sge = sg_start; 2367 - wr->num_sge = sg_nents; 2368 - pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n", 2369 - count, sg_start, sg_nents); 2370 - 2371 - ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); 2372 - if (!ib_sge) { 2373 - pr_warn("Unable to allocate dataout ib_sge\n"); 2374 - ret = -ENOMEM; 2375 - goto unmap_sg; 2376 - } 2377 - isert_cmd->ib_sge = ib_sge; 2378 - 2379 - pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n", 2380 - ib_sge, sg_nents); 2381 - 2382 - wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); 2383 - wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 2384 - GFP_KERNEL); 2385 - if (!wr->send_wr) { 2386 - pr_debug("Unable to allocate wr->send_wr\n"); 2387 - ret = -ENOMEM; 2388 - goto unmap_sg; 2389 - } 2390 - pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n", 2391 - wr->send_wr, wr->send_wr_num); 2392 - 2393 - isert_cmd->tx_desc.isert_cmd = isert_cmd; 2394 - 1962 + pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 1963 + isert_cmd, se_cmd->data_length, cmd->write_data_done); 2395 1964 wr->iser_ib_op = ISER_IB_RDMA_READ; 2396 - wr->isert_cmd = isert_cmd; 2397 - rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 2398 - offset = cmd->write_data_done; 2399 - 2400 - for (i = 0; i < wr->send_wr_num; i++) { 2401 - send_wr = &isert_cmd->rdma_wr.send_wr[i]; 2402 - data_len = min(data_left, rdma_write_max); 2403 - 2404 - send_wr->opcode = IB_WR_RDMA_READ; 2405 - send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset; 2406 - send_wr->wr.rdma.rkey = isert_cmd->write_stag; 2407 - 2408 - ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, 2409 - send_wr, data_len, offset); 2410 - ib_sge += ib_sge_cnt; 2411 - 2412 - if (i + 1 == wr->send_wr_num) 2413 - send_wr->send_flags = IB_SEND_SIGNALED; 2414 - else 2415 - send_wr->next = &wr->send_wr[i + 1]; 2416 - 2417 - offset += data_len; 2418 - va_offset += data_len; 2419 - data_left -= data_len; 1965 + rc = device->reg_rdma_mem(conn, cmd, wr); 1966 + if (rc) { 1967 + pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); 1968 + return rc; 2420 1969 } 2421 1970 2422 1971 atomic_inc(&isert_conn->post_send_buf_count); ··· 2351 2050 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2352 2051 atomic_dec(&isert_conn->post_send_buf_count); 2353 2052 } 2354 - pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n"); 2355 - return 0; 2053 + pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2054 + isert_cmd); 2356 2055 2357 - unmap_sg: 2358 - ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE); 2359 - return ret; 2056 + return 0; 2360 2057 } 2361 2058 2362 2059 static int ··· 2523 2224 int ret; 2524 2225 2525 2226 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn); 2227 + /* 2228 + * For login requests after the first PDU, isert_rx_login_req() will 2229 + * kick schedule_delayed_work(&conn->login_work) as the packet is 2230 + * received, which turns this callback from iscsi_target_do_login_rx() 2231 + * into a NOP. 2232 + */ 2233 + if (!login->first_request) 2234 + return 0; 2526 2235 2527 2236 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); 2528 2237 if (ret) ··· 2700 2393 static struct iscsit_transport iser_target_transport = { 2701 2394 .name = "IB/iSER", 2702 2395 .transport_type = ISCSI_INFINIBAND, 2396 + .priv_size = sizeof(struct isert_cmd), 2703 2397 .owner = THIS_MODULE, 2704 2398 .iscsit_setup_np = isert_setup_np, 2705 2399 .iscsit_accept_np = isert_accept_np, 2706 2400 .iscsit_free_np = isert_free_np, 2707 2401 .iscsit_free_conn = isert_free_conn, 2708 - .iscsit_alloc_cmd = isert_alloc_cmd, 2709 2402 .iscsit_get_login_rx = isert_get_login_rx, 2710 2403 .iscsit_put_login_tx = isert_put_login_tx, 2711 2404 .iscsit_immediate_queue = isert_immediate_queue, ··· 2732 2425 goto destroy_rx_wq; 2733 2426 } 2734 2427 2735 - isert_cmd_cache = kmem_cache_create("isert_cmd_cache", 2736 - sizeof(struct isert_cmd), __alignof__(struct isert_cmd), 2737 - 0, NULL); 2738 - if (!isert_cmd_cache) { 2739 - pr_err("Unable to create isert_cmd_cache\n"); 2740 - ret = -ENOMEM; 2741 - goto destroy_tx_cq; 2742 - } 2743 - 2744 2428 iscsit_register_transport(&iser_target_transport); 2745 2429 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n"); 2746 2430 return 0; 2747 2431 2748 - destroy_tx_cq: 2749 - destroy_workqueue(isert_comp_wq); 2750 2432 destroy_rx_wq: 2751 2433 destroy_workqueue(isert_rx_wq); 2752 2434 return ret; ··· 2743 2447 2744 2448 static void __exit isert_exit(void) 2745 2449 { 2746 - kmem_cache_destroy(isert_cmd_cache); 2747 2450 destroy_workqueue(isert_comp_wq); 2748 2451 destroy_workqueue(isert_rx_wq); 2749 2452 iscsit_unregister_transport(&iser_target_transport);
+24 -2
drivers/infiniband/ulp/isert/ib_isert.h
··· 5 5 #include <rdma/rdma_cm.h> 6 6 7 7 #define ISERT_RDMA_LISTEN_BACKLOG 10 8 + #define ISCSI_ISER_SG_TABLESIZE 256 8 9 9 10 enum isert_desc_type { 10 11 ISCSI_TX_CONTROL, ··· 46 45 struct ib_send_wr send_wr; 47 46 } __packed; 48 47 48 + struct fast_reg_descriptor { 49 + struct list_head list; 50 + struct ib_mr *data_mr; 51 + struct ib_fast_reg_page_list *data_frpl; 52 + bool valid; 53 + }; 54 + 49 55 struct isert_rdma_wr { 50 56 struct list_head wr_list; 51 57 struct isert_cmd *isert_cmd; 52 58 enum iser_ib_op_code iser_ib_op; 53 59 struct ib_sge *ib_sge; 60 + struct ib_sge s_ib_sge; 54 61 int num_sge; 55 62 struct scatterlist *sge; 56 63 int send_wr_num; 57 64 struct ib_send_wr *send_wr; 65 + struct ib_send_wr s_send_wr; 66 + u32 cur_rdma_length; 67 + struct fast_reg_descriptor *fr_desc; 58 68 }; 59 69 60 70 struct isert_cmd { ··· 79 67 u32 write_va_off; 80 68 u32 rdma_wr_num; 81 69 struct isert_conn *conn; 82 - struct iscsi_cmd iscsi_cmd; 83 - struct ib_sge *ib_sge; 70 + struct iscsi_cmd *iscsi_cmd; 84 71 struct iser_tx_desc tx_desc; 85 72 struct isert_rdma_wr rdma_wr; 86 73 struct work_struct comp_work; ··· 117 106 wait_queue_head_t conn_wait; 118 107 wait_queue_head_t conn_wait_comp_err; 119 108 struct kref conn_kref; 109 + struct list_head conn_frwr_pool; 110 + int conn_frwr_pool_size; 111 + /* lock to protect frwr_pool */ 112 + spinlock_t conn_lock; 120 113 }; 121 114 122 115 #define ISERT_MAX_CQ 64 ··· 133 118 }; 134 119 135 120 struct isert_device { 121 + int use_frwr; 136 122 int cqs_used; 137 123 int refcount; 138 124 int cq_active_qps[ISERT_MAX_CQ]; ··· 144 128 struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; 145 129 struct isert_cq_desc *cq_desc; 146 130 struct list_head dev_node; 131 + struct ib_device_attr dev_attr; 132 + int (*reg_rdma_mem)(struct iscsi_conn *conn, 133 + struct iscsi_cmd *cmd, 134 + struct isert_rdma_wr *wr); 135 + void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd, 136 + struct isert_conn *isert_conn); 147 137 }; 148 138 149 139 struct isert_np {
+1 -1
drivers/scsi/qla2xxx/qla_target.c
··· 10 10 * 11 11 * Forward port and refactoring to modern qla2xxx and target/configfs 12 12 * 13 - * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org> 13 + * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> 14 14 * 15 15 * This program is free software; you can redistribute it and/or 16 16 * modify it under the terms of the GNU General Public License
+24 -35
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 2 2 * This file contains tcm implementation using v4 configfs fabric infrastructure 3 3 * for QLogic target mode HBAs 4 4 * 5 - * ?? Copyright 2010-2011 RisingTide Systems LLC. 5 + * (c) Copyright 2010-2013 Datera, Inc. 6 6 * 7 - * Licensed to the Linux Foundation under the General Public License (GPL) 8 - * version 2. 9 - * 10 - * Author: Nicholas A. Bellinger <nab@risingtidesystems.com> 7 + * Author: Nicholas A. Bellinger <nab@daterainc.com> 11 8 * 12 9 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from 13 10 * the TCM_FC / Open-FCoE.org fabric module. ··· 357 360 return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect; 358 361 } 359 362 363 + static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) 364 + { 365 + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 366 + struct tcm_qla2xxx_tpg, se_tpg); 367 + 368 + return QLA_TPG_ATTRIB(tpg)->demo_mode_login_only; 369 + } 370 + 360 371 static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( 361 372 struct se_portal_group *se_tpg) 362 373 { ··· 494 489 return 0; 495 490 } 496 491 497 - /* 498 - * The LIO target core uses DMA_TO_DEVICE to mean that data is going 499 - * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean 500 - * that data is coming from the target (eg handling a READ). However, 501 - * this is just the opposite of what we have to tell the DMA mapping 502 - * layer -- eg when handling a READ, the HBA will have to DMA the data 503 - * out of memory so it can send it to the initiator, which means we 504 - * need to use DMA_TO_DEVICE when we map the data. 505 - */ 506 - static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd) 507 - { 508 - if (se_cmd->se_cmd_flags & SCF_BIDI) 509 - return DMA_BIDIRECTIONAL; 510 - 511 - switch (se_cmd->data_direction) { 512 - case DMA_TO_DEVICE: 513 - return DMA_FROM_DEVICE; 514 - case DMA_FROM_DEVICE: 515 - return DMA_TO_DEVICE; 516 - case DMA_NONE: 517 - default: 518 - return DMA_NONE; 519 - } 520 - } 521 - 522 492 static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) 523 493 { 524 494 struct qla_tgt_cmd *cmd = container_of(se_cmd, 525 495 struct qla_tgt_cmd, se_cmd); 526 496 527 497 cmd->bufflen = se_cmd->data_length; 528 - cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); 498 + cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 529 499 530 500 cmd->sg_cnt = se_cmd->t_data_nents; 531 501 cmd->sg = se_cmd->t_data_sg; ··· 636 656 struct qla_tgt_cmd, se_cmd); 637 657 638 658 cmd->bufflen = se_cmd->data_length; 639 - cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); 659 + cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 640 660 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); 641 661 642 662 cmd->sg_cnt = se_cmd->t_data_nents; ··· 660 680 cmd->sg = NULL; 661 681 cmd->sg_cnt = 0; 662 682 cmd->offset = 0; 663 - cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); 683 + cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 664 684 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); 665 685 666 686 if (se_cmd->data_direction == DMA_FROM_DEVICE) { ··· 919 939 DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); 920 940 QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); 921 941 942 + /* 943 + * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_login_only 944 + */ 945 + DEF_QLA_TPG_ATTR_BOOL(demo_mode_login_only); 946 + DEF_QLA_TPG_ATTRIB(demo_mode_login_only); 947 + QLA_TPG_ATTR(demo_mode_login_only, S_IRUGO | S_IWUSR); 948 + 922 949 static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { 923 950 &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr, 924 951 &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr, 925 952 &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr, 926 953 &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr, 954 + &tcm_qla2xxx_tpg_attrib_demo_mode_login_only.attr, 927 955 NULL, 928 956 }; 929 957 ··· 1030 1042 QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1; 1031 1043 QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1; 1032 1044 QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1; 1045 + QLA_TPG_ATTRIB(tpg)->demo_mode_login_only = 1; 1033 1046 1034 1047 ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn, 1035 1048 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); ··· 1725 1736 tcm_qla2xxx_check_demo_write_protect, 1726 1737 .tpg_check_prod_mode_write_protect = 1727 1738 tcm_qla2xxx_check_prod_write_protect, 1728 - .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true, 1739 + .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, 1729 1740 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, 1730 1741 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, 1731 1742 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, ··· 1773 1784 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true, 1774 1785 .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true, 1775 1786 .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false, 1776 - .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true, 1787 + .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, 1777 1788 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, 1778 1789 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, 1779 1790 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
+1
drivers/scsi/qla2xxx/tcm_qla2xxx.h
··· 29 29 int cache_dynamic_acls; 30 30 int demo_mode_write_protect; 31 31 int prod_mode_write_protect; 32 + int demo_mode_login_only; 32 33 }; 33 34 34 35 struct tcm_qla2xxx_tpg {
+2 -1
drivers/target/Makefile
··· 13 13 target_core_spc.o \ 14 14 target_core_ua.o \ 15 15 target_core_rd.o \ 16 - target_core_stat.o 16 + target_core_stat.o \ 17 + target_core_xcopy.o 17 18 18 19 obj-$(CONFIG_TARGET_CORE) += target_core_mod.o 19 20
+34 -49
drivers/target/iscsi/iscsi_target.c
··· 1 1 /******************************************************************************* 2 2 * This file contains main functions related to the iSCSI Target Core Driver. 3 3 * 4 - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 - * 6 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 4 + * (c) Copyright 2007-2013 Datera, Inc. 7 5 * 8 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 7 * ··· 61 63 62 64 struct iscsit_global *iscsit_global; 63 65 64 - struct kmem_cache *lio_cmd_cache; 65 66 struct kmem_cache *lio_qr_cache; 66 67 struct kmem_cache *lio_dr_cache; 67 68 struct kmem_cache *lio_ooo_cache; ··· 217 220 spin_unlock_bh(&np->np_thread_lock); 218 221 return -1; 219 222 } 220 - if (np->np_login_tpg) { 221 - pr_err("np->np_login_tpg() is not NULL!\n"); 222 - spin_unlock_bh(&np->np_thread_lock); 223 - return -1; 224 - } 225 223 spin_unlock_bh(&np->np_thread_lock); 226 224 /* 227 225 * Determine if the portal group is accepting storage traffic. ··· 231 239 /* 232 240 * Here we serialize access across the TIQN+TPG Tuple. 233 241 */ 234 - ret = mutex_lock_interruptible(&tpg->np_login_lock); 242 + ret = down_interruptible(&tpg->np_login_sem); 235 243 if ((ret != 0) || signal_pending(current)) 236 244 return -1; 237 245 238 - spin_lock_bh(&np->np_thread_lock); 239 - np->np_login_tpg = tpg; 240 - spin_unlock_bh(&np->np_thread_lock); 246 + spin_lock_bh(&tpg->tpg_state_lock); 247 + if (tpg->tpg_state != TPG_STATE_ACTIVE) { 248 + spin_unlock_bh(&tpg->tpg_state_lock); 249 + up(&tpg->np_login_sem); 250 + return -1; 251 + } 252 + spin_unlock_bh(&tpg->tpg_state_lock); 241 253 242 254 return 0; 243 255 } 244 256 245 - int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) 257 + void iscsit_login_kref_put(struct kref *kref) 258 + { 259 + struct iscsi_tpg_np *tpg_np = container_of(kref, 260 + struct iscsi_tpg_np, tpg_np_kref); 261 + 262 + complete(&tpg_np->tpg_np_comp); 263 + } 264 + 265 + int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg, 266 + struct iscsi_tpg_np *tpg_np) 246 267 { 247 268 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 248 269 249 - spin_lock_bh(&np->np_thread_lock); 250 - np->np_login_tpg = NULL; 251 - spin_unlock_bh(&np->np_thread_lock); 270 + up(&tpg->np_login_sem); 252 271 253 - mutex_unlock(&tpg->np_login_lock); 272 + if (tpg_np) 273 + kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); 254 274 255 275 if (tiqn) 256 276 iscsit_put_tiqn_for_login(tiqn); ··· 414 410 int iscsit_reset_np_thread( 415 411 struct iscsi_np *np, 416 412 struct iscsi_tpg_np *tpg_np, 417 - struct iscsi_portal_group *tpg) 413 + struct iscsi_portal_group *tpg, 414 + bool shutdown) 418 415 { 419 416 spin_lock_bh(&np->np_thread_lock); 420 - if (tpg && tpg_np) { 421 - /* 422 - * The reset operation need only be performed when the 423 - * passed struct iscsi_portal_group has a login in progress 424 - * to one of the network portals. 425 - */ 426 - if (tpg_np->tpg_np->np_login_tpg != tpg) { 427 - spin_unlock_bh(&np->np_thread_lock); 428 - return 0; 429 - } 430 - } 431 417 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) { 432 418 spin_unlock_bh(&np->np_thread_lock); 433 419 return 0; ··· 431 437 spin_lock_bh(&np->np_thread_lock); 432 438 } 433 439 spin_unlock_bh(&np->np_thread_lock); 440 + 441 + if (tpg_np && shutdown) { 442 + kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); 443 + 444 + wait_for_completion(&tpg_np->tpg_np_comp); 445 + } 434 446 435 447 return 0; 436 448 } ··· 497 497 .iscsit_setup_np = iscsit_setup_np, 498 498 .iscsit_accept_np = iscsit_accept_np, 499 499 .iscsit_free_np = iscsit_free_np, 500 - .iscsit_alloc_cmd = iscsit_alloc_cmd, 501 500 .iscsit_get_login_rx = iscsit_get_login_rx, 502 501 .iscsit_put_login_tx = iscsit_put_login_tx, 503 502 .iscsit_get_dataout = iscsit_build_r2ts_for_cmd, ··· 537 538 goto ts_out1; 538 539 } 539 540 540 - lio_cmd_cache = kmem_cache_create("lio_cmd_cache", 541 - sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd), 542 - 0, NULL); 543 - if (!lio_cmd_cache) { 544 - pr_err("Unable to kmem_cache_create() for" 545 - " lio_cmd_cache\n"); 546 - goto ts_out2; 547 - } 548 - 549 541 lio_qr_cache = kmem_cache_create("lio_qr_cache", 550 542 sizeof(struct iscsi_queue_req), 551 543 __alignof__(struct iscsi_queue_req), 0, NULL); 552 544 if (!lio_qr_cache) { 553 545 pr_err("nable to kmem_cache_create() for" 554 546 " lio_qr_cache\n"); 555 - goto cmd_out; 547 + goto ts_out2; 556 548 } 557 549 558 550 lio_dr_cache = kmem_cache_create("lio_dr_cache", ··· 587 597 kmem_cache_destroy(lio_dr_cache); 588 598 qr_out: 589 599 kmem_cache_destroy(lio_qr_cache); 590 - cmd_out: 591 - kmem_cache_destroy(lio_cmd_cache); 592 600 ts_out2: 593 601 iscsi_deallocate_thread_sets(); 594 602 ts_out1: ··· 604 616 iscsi_thread_set_free(); 605 617 iscsit_release_discovery_tpg(); 606 618 iscsit_unregister_transport(&iscsi_target_transport); 607 - kmem_cache_destroy(lio_cmd_cache); 608 619 kmem_cache_destroy(lio_qr_cache); 609 620 kmem_cache_destroy(lio_dr_cache); 610 621 kmem_cache_destroy(lio_ooo_cache); ··· 3434 3447 bool inaddr_any = iscsit_check_inaddr_any(np); 3435 3448 3436 3449 len = sprintf(buf, "TargetAddress=" 3437 - "%s%s%s:%hu,%hu", 3438 - (np->np_sockaddr.ss_family == AF_INET6) ? 3439 - "[" : "", (inaddr_any == false) ? 3450 + "%s:%hu,%hu", 3451 + (inaddr_any == false) ? 3440 3452 np->np_ip : conn->local_ip, 3441 - (np->np_sockaddr.ss_family == AF_INET6) ? 3442 - "]" : "", (inaddr_any == false) ? 3453 + (inaddr_any == false) ? 3443 3454 np->np_port : conn->local_port, 3444 3455 tpg->tpgt); 3445 3456 len += 1;
+4 -3
drivers/target/iscsi/iscsi_target.h
··· 7 7 extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *); 8 8 extern void iscsit_del_tiqn(struct iscsi_tiqn *); 9 9 extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *); 10 - extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *); 10 + extern void iscsit_login_kref_put(struct kref *); 11 + extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *, 12 + struct iscsi_tpg_np *); 11 13 extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *, 12 14 struct iscsi_np *, int); 13 15 extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *, 14 16 char *, int); 15 17 extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *, 16 - struct iscsi_portal_group *); 18 + struct iscsi_portal_group *, bool); 17 19 extern int iscsit_del_np(struct iscsi_np *); 18 20 extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *); 19 21 extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *); ··· 39 37 40 38 extern struct kmem_cache *lio_dr_cache; 41 39 extern struct kmem_cache *lio_ooo_cache; 42 - extern struct kmem_cache *lio_cmd_cache; 43 40 extern struct kmem_cache *lio_qr_cache; 44 41 extern struct kmem_cache *lio_r2t_cache; 45 42
+1 -3
drivers/target/iscsi/iscsi_target_auth.c
··· 1 1 /******************************************************************************* 2 2 * This file houses the main functions for the iSCSI CHAP support 3 3 * 4 - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 - * 6 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 4 + * (c) Copyright 2007-2013 Datera, Inc. 7 5 * 8 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 7 *
+7 -9
drivers/target/iscsi/iscsi_target_configfs.c
··· 2 2 * This file contains the configfs implementation for iSCSI Target mode 3 3 * from the LIO-Target Project. 4 4 * 5 - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 6 - * 7 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 5 + * (c) Copyright 2007-2013 Datera, Inc. 8 6 * 9 7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 10 8 * ··· 263 265 *port_str = '\0'; /* Terminate string for IP */ 264 266 port_str++; /* Skip over ":" */ 265 267 266 - ret = strict_strtoul(port_str, 0, &port); 268 + ret = kstrtoul(port_str, 0, &port); 267 269 if (ret < 0) { 268 - pr_err("strict_strtoul() failed for port_str: %d\n", ret); 270 + pr_err("kstrtoul() failed for port_str: %d\n", ret); 269 271 return ERR_PTR(ret); 270 272 } 271 273 sock_in6 = (struct sockaddr_in6 *)&sockaddr; ··· 288 290 *port_str = '\0'; /* Terminate string for IP */ 289 291 port_str++; /* Skip over ":" */ 290 292 291 - ret = strict_strtoul(port_str, 0, &port); 293 + ret = kstrtoul(port_str, 0, &port); 292 294 if (ret < 0) { 293 - pr_err("strict_strtoul() failed for port_str: %d\n", ret); 295 + pr_err("kstrtoul() failed for port_str: %d\n", ret); 294 296 return ERR_PTR(ret); 295 297 } 296 298 sock_in = (struct sockaddr_in *)&sockaddr; ··· 1479 1481 struct target_fabric_configfs *tf, 1480 1482 char *page) 1481 1483 { 1482 - return sprintf(page, "RisingTide Systems Linux-iSCSI Target "ISCSIT_VERSION"\n"); 1484 + return sprintf(page, "Datera Inc. iSCSI Target "ISCSIT_VERSION"\n"); 1483 1485 } 1484 1486 1485 1487 TF_WWN_ATTR_RO(lio_target, lio_version); ··· 1923 1925 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1924 1926 1925 1927 pr_debug("Entering lio_release_cmd for se_cmd: %p\n", se_cmd); 1926 - cmd->release_cmd(cmd); 1928 + iscsit_release_cmd(cmd); 1927 1929 } 1928 1930 1929 1931 /* End functions for target_core_fabric_ops */
+21 -5
drivers/target/iscsi/iscsi_target_core.h
··· 9 9 #include <scsi/iscsi_proto.h> 10 10 #include <target/target_core_base.h> 11 11 12 - #define ISCSIT_VERSION "v4.1.0-rc2" 12 + #define ISCSIT_VERSION "v4.1.0" 13 13 #define ISCSI_MAX_DATASN_MISSING_COUNT 16 14 14 #define ISCSI_TX_THREAD_TCP_TIMEOUT 2 15 15 #define ISCSI_RX_THREAD_TCP_TIMEOUT 2 ··· 17 17 #define SECONDS_FOR_ASYNC_TEXT 10 18 18 #define SECONDS_FOR_LOGOUT_COMP 15 19 19 #define WHITE_SPACE " \t\v\f\n\r" 20 + #define ISCSIT_MIN_TAGS 16 21 + #define ISCSIT_EXTRA_TAGS 8 22 + #define ISCSIT_TCP_BACKLOG 256 20 23 21 24 /* struct iscsi_node_attrib sanity values */ 22 25 #define NA_DATAOUT_TIMEOUT 3 ··· 50 47 #define TA_NETIF_TIMEOUT_MAX 15 51 48 #define TA_NETIF_TIMEOUT_MIN 2 52 49 #define TA_GENERATE_NODE_ACLS 0 53 - #define TA_DEFAULT_CMDSN_DEPTH 16 50 + #define TA_DEFAULT_CMDSN_DEPTH 64 54 51 #define TA_DEFAULT_CMDSN_DEPTH_MAX 512 55 52 #define TA_DEFAULT_CMDSN_DEPTH_MIN 1 56 53 #define TA_CACHE_DYNAMIC_ACLS 0 ··· 492 489 u32 first_data_sg_off; 493 490 u32 kmapped_nents; 494 491 sense_reason_t sense_reason; 495 - void (*release_cmd)(struct iscsi_cmd *); 496 492 } ____cacheline_aligned; 497 493 498 494 struct iscsi_tmr_req { ··· 556 554 struct completion rx_half_close_comp; 557 555 /* socket used by this connection */ 558 556 struct socket *sock; 557 + void (*orig_data_ready)(struct sock *, int); 558 + void (*orig_state_change)(struct sock *); 559 + #define LOGIN_FLAGS_READ_ACTIVE 1 560 + #define LOGIN_FLAGS_CLOSED 2 561 + #define LOGIN_FLAGS_READY 4 562 + unsigned long login_flags; 563 + struct delayed_work login_work; 564 + struct delayed_work login_cleanup_work; 565 + struct iscsi_login *login; 559 566 struct timer_list nopin_timer; 560 567 struct timer_list nopin_response_timer; 561 568 struct timer_list transport_timer; 569 + struct task_struct *login_kworker; 562 570 /* Spinlock used for add/deleting cmd's from conn_cmd_list */ 563 571 spinlock_t cmd_lock; 564 572 spinlock_t conn_usage_lock; ··· 596 584 void *context; 597 585 struct iscsi_login_thread_s *login_thread; 598 586 struct iscsi_portal_group *tpg; 587 + struct iscsi_tpg_np *tpg_np; 599 588 /* Pointer to parent session */ 600 589 struct iscsi_session *sess; 601 590 /* Pointer to thread_set in use for this conn's threads */ ··· 695 682 u8 version_max; 696 683 u8 login_complete; 697 684 u8 login_failed; 685 + bool zero_tsih; 698 686 char isid[6]; 699 687 u32 cmd_sn; 700 688 itt_t init_task_tag; ··· 708 694 char *req_buf; 709 695 char *rsp_buf; 710 696 struct iscsi_conn *conn; 697 + struct iscsi_np *np; 711 698 } ____cacheline_aligned; 712 699 713 700 struct iscsi_node_attrib { ··· 788 773 struct __kernel_sockaddr_storage np_sockaddr; 789 774 struct task_struct *np_thread; 790 775 struct timer_list np_login_timer; 791 - struct iscsi_portal_group *np_login_tpg; 792 776 void *np_context; 793 777 struct iscsit_transport *np_transport; 794 778 struct list_head np_list; ··· 802 788 struct list_head tpg_np_parent_list; 803 789 struct se_tpg_np se_tpg_np; 804 790 spinlock_t tpg_np_parent_lock; 791 + struct completion tpg_np_comp; 792 + struct kref tpg_np_kref; 805 793 }; 806 794 807 795 struct iscsi_portal_group { ··· 825 809 spinlock_t tpg_state_lock; 826 810 struct se_portal_group tpg_se_tpg; 827 811 struct mutex tpg_access_lock; 828 - struct mutex np_login_lock; 812 + struct semaphore np_login_sem; 829 813 struct iscsi_tpg_attrib tpg_attrib; 830 814 struct iscsi_node_auth tpg_demo_auth; 831 815 /* Pointer to default list of iSCSI parameters for TPG */
+1 -3
drivers/target/iscsi/iscsi_target_datain_values.c
··· 1 1 /******************************************************************************* 2 2 * This file contains the iSCSI Target DataIN value generation functions. 3 3 * 4 - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 - * 6 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 4 + * (c) Copyright 2007-2013 Datera, Inc. 7 5 * 8 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 7 *
+1 -3
drivers/target/iscsi/iscsi_target_device.c
··· 2 2 * This file contains the iSCSI Virtual Device and Disk Transport 3 3 * agnostic related functions. 4 4 * 5 - \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 6 - * 7 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 5 + * (c) Copyright 2007-2013 Datera, Inc. 8 6 * 9 7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 10 8 *
+1 -3
drivers/target/iscsi/iscsi_target_erl0.c
··· 2 2 * This file contains error recovery level zero functions used by 3 3 * the iSCSI Target driver. 4 4 * 5 - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 6 - * 7 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 5 + * (c) Copyright 2007-2013 Datera, Inc. 8 6 * 9 7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 10 8 *
+1 -3
drivers/target/iscsi/iscsi_target_erl1.c
··· 1 1 /******************************************************************************* 2 2 * This file contains error recovery level one used by the iSCSI Target driver. 3 3 * 4 - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 - * 6 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 4 + * (c) Copyright 2007-2013 Datera, Inc. 7 5 * 8 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 7 *
+1 -3
drivers/target/iscsi/iscsi_target_erl2.c
··· 2 2 * This file contains error recovery level two functions used by 3 3 * the iSCSI Target driver. 4 4 * 5 - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 6 - * 7 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 5 + * (c) Copyright 2007-2013 Datera, Inc. 8 6 * 9 7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 10 8 *
+108 -78
drivers/target/iscsi/iscsi_target_login.c
··· 1 1 /******************************************************************************* 2 2 * This file contains the login functions used by the iSCSI Target driver. 3 3 * 4 - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 - * 6 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 4 + * (c) Copyright 2007-2013 Datera, Inc. 7 5 * 8 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 7 * ··· 48 50 pr_err("Unable to allocate memory for struct iscsi_login.\n"); 49 51 return NULL; 50 52 } 53 + conn->login = login; 51 54 login->conn = conn; 52 55 login->first_request = 1; 53 56 ··· 427 428 ISCSI_LOGIN_STATUS_NO_RESOURCES); 428 429 return -1; 429 430 } 430 - rc = strict_strtoul(param->value, 0, &mrdsl); 431 + rc = kstrtoul(param->value, 0, &mrdsl); 431 432 if (rc < 0) { 432 433 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 433 434 ISCSI_LOGIN_STATUS_NO_RESOURCES); ··· 683 684 iscsit_start_nopin_timer(conn); 684 685 } 685 686 686 - static int iscsi_post_login_handler( 687 + int iscsi_post_login_handler( 687 688 struct iscsi_np *np, 688 689 struct iscsi_conn *conn, 689 690 u8 zero_tsih) ··· 871 872 struct __kernel_sockaddr_storage *sockaddr) 872 873 { 873 874 struct socket *sock = NULL; 874 - int backlog = 5, ret, opt = 0, len; 875 + int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len; 875 876 876 877 switch (np->np_network_transport) { 877 878 case ISCSI_TCP: ··· 1006 1007 rc = conn->sock->ops->getname(conn->sock, 1007 1008 (struct sockaddr *)&sock_in6, &err, 1); 1008 1009 if (!rc) { 1009 - snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", 1010 - &sock_in6.sin6_addr.in6_u); 1010 + if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) 1011 + snprintf(conn->login_ip, sizeof(conn->login_ip), "[%pI6c]", 1012 + &sock_in6.sin6_addr.in6_u); 1013 + else 1014 + snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI4", 1015 + &sock_in6.sin6_addr.s6_addr32[3]); 1011 1016 conn->login_port = ntohs(sock_in6.sin6_port); 1012 1017 } 1013 1018 1014 1019 rc = conn->sock->ops->getname(conn->sock, 1015 1020 (struct sockaddr *)&sock_in6, &err, 0); 1016 1021 if (!rc) { 1017 - snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c", 1018 - &sock_in6.sin6_addr.in6_u); 1022 + if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) 1023 + snprintf(conn->local_ip, sizeof(conn->local_ip), "[%pI6c]", 1024 + &sock_in6.sin6_addr.in6_u); 1025 + else 1026 + snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI4", 1027 + &sock_in6.sin6_addr.s6_addr32[3]); 1019 1028 conn->local_port = ntohs(sock_in6.sin6_port); 1020 1029 } 1021 1030 } else { ··· 1123 1116 return 0; 1124 1117 } 1125 1118 1119 + void iscsi_target_login_sess_out(struct iscsi_conn *conn, 1120 + struct iscsi_np *np, bool zero_tsih, bool new_sess) 1121 + { 1122 + if (new_sess == false) 1123 + goto old_sess_out; 1124 + 1125 + pr_err("iSCSI Login negotiation failed.\n"); 1126 + iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 1127 + ISCSI_LOGIN_STATUS_INIT_ERR); 1128 + if (!zero_tsih || !conn->sess) 1129 + goto old_sess_out; 1130 + if (conn->sess->se_sess) 1131 + transport_free_session(conn->sess->se_sess); 1132 + if (conn->sess->session_index != 0) { 1133 + spin_lock_bh(&sess_idr_lock); 1134 + idr_remove(&sess_idr, conn->sess->session_index); 1135 + spin_unlock_bh(&sess_idr_lock); 1136 + } 1137 + kfree(conn->sess->sess_ops); 1138 + kfree(conn->sess); 1139 + 1140 + old_sess_out: 1141 + iscsi_stop_login_thread_timer(np); 1142 + /* 1143 + * If login negotiation fails check if the Time2Retain timer 1144 + * needs to be restarted. 1145 + */ 1146 + if (!zero_tsih && conn->sess) { 1147 + spin_lock_bh(&conn->sess->conn_lock); 1148 + if (conn->sess->session_state == TARG_SESS_STATE_FAILED) { 1149 + struct se_portal_group *se_tpg = 1150 + &ISCSI_TPG_C(conn)->tpg_se_tpg; 1151 + 1152 + atomic_set(&conn->sess->session_continuation, 0); 1153 + spin_unlock_bh(&conn->sess->conn_lock); 1154 + spin_lock_bh(&se_tpg->session_lock); 1155 + iscsit_start_time2retain_handler(conn->sess); 1156 + spin_unlock_bh(&se_tpg->session_lock); 1157 + } else 1158 + spin_unlock_bh(&conn->sess->conn_lock); 1159 + iscsit_dec_session_usage_count(conn->sess); 1160 + } 1161 + 1162 + if (!IS_ERR(conn->conn_rx_hash.tfm)) 1163 + crypto_free_hash(conn->conn_rx_hash.tfm); 1164 + if (!IS_ERR(conn->conn_tx_hash.tfm)) 1165 + crypto_free_hash(conn->conn_tx_hash.tfm); 1166 + 1167 + if (conn->conn_cpumask) 1168 + free_cpumask_var(conn->conn_cpumask); 1169 + 1170 + kfree(conn->conn_ops); 1171 + 1172 + if (conn->param_list) { 1173 + iscsi_release_param_list(conn->param_list); 1174 + conn->param_list = NULL; 1175 + } 1176 + iscsi_target_nego_release(conn); 1177 + 1178 + if (conn->sock) { 1179 + sock_release(conn->sock); 1180 + conn->sock = NULL; 1181 + } 1182 + 1183 + if (conn->conn_transport->iscsit_free_conn) 1184 + conn->conn_transport->iscsit_free_conn(conn); 1185 + 1186 + iscsit_put_transport(conn->conn_transport); 1187 + kfree(conn); 1188 + } 1189 + 1126 1190 static int __iscsi_target_login_thread(struct iscsi_np *np) 1127 1191 { 1128 1192 u8 *buffer, zero_tsih = 0; ··· 1202 1124 struct iscsi_login *login; 1203 1125 struct iscsi_portal_group *tpg = NULL; 1204 1126 struct iscsi_login_req *pdu; 1127 + struct iscsi_tpg_np *tpg_np; 1128 + bool new_sess = false; 1205 1129 1206 1130 flush_signals(current); 1207 1131 ··· 1344 1264 tpg = conn->tpg; 1345 1265 goto new_sess_out; 1346 1266 } 1267 + login->zero_tsih = zero_tsih; 1347 1268 1348 1269 tpg = conn->tpg; 1349 1270 if (!tpg) { ··· 1360 1279 goto old_sess_out; 1361 1280 } 1362 1281 1363 - if (iscsi_target_start_negotiation(login, conn) < 0) 1282 + ret = iscsi_target_start_negotiation(login, conn); 1283 + if (ret < 0) 1364 1284 goto new_sess_out; 1365 1285 1366 1286 if (!conn->sess) { ··· 1374 1292 if (signal_pending(current)) 1375 1293 goto new_sess_out; 1376 1294 1377 - ret = iscsi_post_login_handler(np, conn, zero_tsih); 1295 + if (ret == 1) { 1296 + tpg_np = conn->tpg_np; 1378 1297 1379 - if (ret < 0) 1380 - goto new_sess_out; 1298 + ret = iscsi_post_login_handler(np, conn, zero_tsih); 1299 + if (ret < 0) 1300 + goto new_sess_out; 1381 1301 1382 - iscsit_deaccess_np(np, tpg); 1302 + iscsit_deaccess_np(np, tpg, tpg_np); 1303 + } 1304 + 1383 1305 tpg = NULL; 1306 + tpg_np = NULL; 1384 1307 /* Get another socket */ 1385 1308 return 1; 1386 1309 1387 1310 new_sess_out: 1388 - pr_err("iSCSI Login negotiation failed.\n"); 1389 - iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 1390 - ISCSI_LOGIN_STATUS_INIT_ERR); 1391 - if (!zero_tsih || !conn->sess) 1392 - goto old_sess_out; 1393 - if (conn->sess->se_sess) 1394 - transport_free_session(conn->sess->se_sess); 1395 - if (conn->sess->session_index != 0) { 1396 - spin_lock_bh(&sess_idr_lock); 1397 - idr_remove(&sess_idr, conn->sess->session_index); 1398 - spin_unlock_bh(&sess_idr_lock); 1399 - } 1400 - kfree(conn->sess->sess_ops); 1401 - kfree(conn->sess); 1311 + new_sess = true; 1402 1312 old_sess_out: 1403 - iscsi_stop_login_thread_timer(np); 1404 - /* 1405 - * If login negotiation fails check if the Time2Retain timer 1406 - * needs to be restarted. 1407 - */ 1408 - if (!zero_tsih && conn->sess) { 1409 - spin_lock_bh(&conn->sess->conn_lock); 1410 - if (conn->sess->session_state == TARG_SESS_STATE_FAILED) { 1411 - struct se_portal_group *se_tpg = 1412 - &ISCSI_TPG_C(conn)->tpg_se_tpg; 1413 - 1414 - atomic_set(&conn->sess->session_continuation, 0); 1415 - spin_unlock_bh(&conn->sess->conn_lock); 1416 - spin_lock_bh(&se_tpg->session_lock); 1417 - iscsit_start_time2retain_handler(conn->sess); 1418 - spin_unlock_bh(&se_tpg->session_lock); 1419 - } else 1420 - spin_unlock_bh(&conn->sess->conn_lock); 1421 - iscsit_dec_session_usage_count(conn->sess); 1422 - } 1423 - 1424 - if (!IS_ERR(conn->conn_rx_hash.tfm)) 1425 - crypto_free_hash(conn->conn_rx_hash.tfm); 1426 - if (!IS_ERR(conn->conn_tx_hash.tfm)) 1427 - crypto_free_hash(conn->conn_tx_hash.tfm); 1428 - 1429 - if (conn->conn_cpumask) 1430 - free_cpumask_var(conn->conn_cpumask); 1431 - 1432 - kfree(conn->conn_ops); 1433 - 1434 - if (conn->param_list) { 1435 - iscsi_release_param_list(conn->param_list); 1436 - conn->param_list = NULL; 1437 - } 1438 - iscsi_target_nego_release(conn); 1439 - 1440 - if (conn->sock) { 1441 - sock_release(conn->sock); 1442 - conn->sock = NULL; 1443 - } 1444 - 1445 - if (conn->conn_transport->iscsit_free_conn) 1446 - conn->conn_transport->iscsit_free_conn(conn); 1447 - 1448 - iscsit_put_transport(conn->conn_transport); 1449 - 1450 - kfree(conn); 1313 + tpg_np = conn->tpg_np; 1314 + iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess); 1315 + new_sess = false; 1451 1316 1452 1317 if (tpg) { 1453 - iscsit_deaccess_np(np, tpg); 1318 + iscsit_deaccess_np(np, tpg, tpg_np); 1454 1319 tpg = NULL; 1320 + tpg_np = NULL; 1455 1321 } 1456 1322 1457 1323 out:
+3
drivers/target/iscsi/iscsi_target_login.h
··· 12 12 extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); 13 13 extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); 14 14 extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); 15 + extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); 16 + extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, 17 + bool, bool); 15 18 extern int iscsi_target_login_thread(void *); 16 19 extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *); 17 20
+349 -20
drivers/target/iscsi/iscsi_target_nego.c
··· 1 1 /******************************************************************************* 2 2 * This file contains main functions related to iSCSI Parameter negotiation. 3 3 * 4 - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 - * 6 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 4 + * (c) Copyright 2007-2013 Datera, Inc. 7 5 * 8 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 7 * ··· 375 377 return 0; 376 378 } 377 379 378 - static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login) 380 + static void iscsi_target_sk_data_ready(struct sock *sk, int count) 379 381 { 380 - if (iscsi_target_do_tx_login_io(conn, login) < 0) 381 - return -1; 382 + struct iscsi_conn *conn = sk->sk_user_data; 383 + bool rc; 382 384 383 - if (conn->conn_transport->iscsit_get_login_rx(conn, login) < 0) 384 - return -1; 385 + pr_debug("Entering iscsi_target_sk_data_ready: conn: %p\n", conn); 385 386 386 - return 0; 387 + write_lock_bh(&sk->sk_callback_lock); 388 + if (!sk->sk_user_data) { 389 + write_unlock_bh(&sk->sk_callback_lock); 390 + return; 391 + } 392 + if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) { 393 + write_unlock_bh(&sk->sk_callback_lock); 394 + pr_debug("Got LOGIN_FLAGS_READY=0, conn: %p >>>>\n", conn); 395 + return; 396 + } 397 + if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { 398 + write_unlock_bh(&sk->sk_callback_lock); 399 + pr_debug("Got LOGIN_FLAGS_CLOSED=1, conn: %p >>>>\n", conn); 400 + return; 401 + } 402 + if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { 403 + write_unlock_bh(&sk->sk_callback_lock); 404 + pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn); 405 + return; 406 + } 407 + 408 + rc = schedule_delayed_work(&conn->login_work, 0); 409 + if (rc == false) { 410 + pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work" 411 + " got false\n"); 412 + } 413 + write_unlock_bh(&sk->sk_callback_lock); 414 + } 415 + 416 + static void iscsi_target_sk_state_change(struct sock *); 417 + 418 + static void iscsi_target_set_sock_callbacks(struct iscsi_conn *conn) 419 + { 420 + struct sock *sk; 421 + 422 + if (!conn->sock) 423 + return; 424 + 425 + sk = conn->sock->sk; 426 + pr_debug("Entering iscsi_target_set_sock_callbacks: conn: %p\n", conn); 427 + 428 + write_lock_bh(&sk->sk_callback_lock); 429 + sk->sk_user_data = conn; 430 + conn->orig_data_ready = sk->sk_data_ready; 431 + conn->orig_state_change = sk->sk_state_change; 432 + sk->sk_data_ready = iscsi_target_sk_data_ready; 433 + sk->sk_state_change = iscsi_target_sk_state_change; 434 + write_unlock_bh(&sk->sk_callback_lock); 435 + 436 + sk->sk_sndtimeo = TA_LOGIN_TIMEOUT * HZ; 437 + sk->sk_rcvtimeo = TA_LOGIN_TIMEOUT * HZ; 438 + } 439 + 440 + static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn) 441 + { 442 + struct sock *sk; 443 + 444 + if (!conn->sock) 445 + return; 446 + 447 + sk = conn->sock->sk; 448 + pr_debug("Entering iscsi_target_restore_sock_callbacks: conn: %p\n", conn); 449 + 450 + write_lock_bh(&sk->sk_callback_lock); 451 + if (!sk->sk_user_data) { 452 + write_unlock_bh(&sk->sk_callback_lock); 453 + return; 454 + } 455 + sk->sk_user_data = NULL; 456 + sk->sk_data_ready = conn->orig_data_ready; 457 + sk->sk_state_change = conn->orig_state_change; 458 + write_unlock_bh(&sk->sk_callback_lock); 459 + 460 + sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 461 + sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 462 + } 463 + 464 + static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); 465 + 466 + static bool iscsi_target_sk_state_check(struct sock *sk) 467 + { 468 + if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { 469 + pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE," 470 + "returning FALSE\n"); 471 + return false; 472 + } 473 + return true; 474 + } 475 + 476 + static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) 477 + { 478 + struct iscsi_np *np = login->np; 479 + bool zero_tsih = login->zero_tsih; 480 + 481 + iscsi_remove_failed_auth_entry(conn); 482 + iscsi_target_nego_release(conn); 483 + iscsi_target_login_sess_out(conn, np, zero_tsih, true); 484 + } 485 + 486 + static void iscsi_target_login_timeout(unsigned long data) 487 + { 488 + struct iscsi_conn *conn = (struct iscsi_conn *)data; 489 + 490 + pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n"); 491 + 492 + if (conn->login_kworker) { 493 + pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n", 494 + conn->login_kworker->comm, conn->login_kworker->pid); 495 + send_sig(SIGINT, conn->login_kworker, 1); 496 + } 497 + } 498 + 499 + static void iscsi_target_do_login_rx(struct work_struct *work) 500 + { 501 + struct iscsi_conn *conn = container_of(work, 502 + struct iscsi_conn, login_work.work); 503 + struct iscsi_login *login = conn->login; 504 + struct iscsi_np *np = login->np; 505 + struct iscsi_portal_group *tpg = conn->tpg; 506 + struct iscsi_tpg_np *tpg_np = conn->tpg_np; 507 + struct timer_list login_timer; 508 + int rc, zero_tsih = login->zero_tsih; 509 + bool state; 510 + 511 + pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", 512 + conn, current->comm, current->pid); 513 + 514 + spin_lock(&tpg->tpg_state_lock); 515 + state = (tpg->tpg_state == TPG_STATE_ACTIVE); 516 + spin_unlock(&tpg->tpg_state_lock); 517 + 518 + if (state == false) { 519 + pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); 520 + iscsi_target_restore_sock_callbacks(conn); 521 + iscsi_target_login_drop(conn, login); 522 + iscsit_deaccess_np(np, tpg, tpg_np); 523 + return; 524 + } 525 + 526 + if (conn->sock) { 527 + struct sock *sk = conn->sock->sk; 528 + 529 + read_lock_bh(&sk->sk_callback_lock); 530 + state = iscsi_target_sk_state_check(sk); 531 + read_unlock_bh(&sk->sk_callback_lock); 532 + 533 + if (state == false) { 534 + pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); 535 + iscsi_target_restore_sock_callbacks(conn); 536 + iscsi_target_login_drop(conn, login); 537 + iscsit_deaccess_np(np, tpg, tpg_np); 538 + return; 539 + } 540 + } 541 + 542 + conn->login_kworker = current; 543 + allow_signal(SIGINT); 544 + 545 + init_timer(&login_timer); 546 + login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ); 547 + login_timer.data = (unsigned long)conn; 548 + login_timer.function = iscsi_target_login_timeout; 549 + add_timer(&login_timer); 550 + pr_debug("Starting login_timer for %s/%d\n", current->comm, current->pid); 551 + 552 + rc = conn->conn_transport->iscsit_get_login_rx(conn, login); 553 + del_timer_sync(&login_timer); 554 + flush_signals(current); 555 + conn->login_kworker = NULL; 556 + 557 + if (rc < 0) { 558 + iscsi_target_restore_sock_callbacks(conn); 559 + iscsi_target_login_drop(conn, login); 560 + iscsit_deaccess_np(np, tpg, tpg_np); 561 + return; 562 + } 563 + 564 + pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", 565 + conn, current->comm, current->pid); 566 + 567 + rc = iscsi_target_do_login(conn, login); 568 + if (rc < 0) { 569 + iscsi_target_restore_sock_callbacks(conn); 570 + iscsi_target_login_drop(conn, login); 571 + iscsit_deaccess_np(np, tpg, tpg_np); 572 + } else if (!rc) { 573 + if (conn->sock) { 574 + struct sock *sk = conn->sock->sk; 575 + 576 + write_lock_bh(&sk->sk_callback_lock); 577 + clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags); 578 + write_unlock_bh(&sk->sk_callback_lock); 579 + } 580 + } else if (rc == 1) { 581 + iscsi_target_nego_release(conn); 582 + iscsi_post_login_handler(np, conn, zero_tsih); 583 + iscsit_deaccess_np(np, tpg, tpg_np); 584 + } 585 + } 586 + 587 + static void iscsi_target_do_cleanup(struct work_struct *work) 588 + { 589 + struct iscsi_conn *conn = container_of(work, 590 + struct iscsi_conn, login_cleanup_work.work); 591 + struct sock *sk = conn->sock->sk; 592 + struct iscsi_login *login = conn->login; 593 + struct iscsi_np *np = login->np; 594 + struct iscsi_portal_group *tpg = conn->tpg; 595 + struct iscsi_tpg_np *tpg_np = conn->tpg_np; 596 + 597 + pr_debug("Entering iscsi_target_do_cleanup\n"); 598 + 599 + cancel_delayed_work_sync(&conn->login_work); 600 + conn->orig_state_change(sk); 601 + 602 + iscsi_target_restore_sock_callbacks(conn); 603 + iscsi_target_login_drop(conn, login); 604 + iscsit_deaccess_np(np, tpg, tpg_np); 605 + 606 + pr_debug("iscsi_target_do_cleanup done()\n"); 607 + } 608 + 609 + static void iscsi_target_sk_state_change(struct sock *sk) 610 + { 611 + struct iscsi_conn *conn; 612 + void (*orig_state_change)(struct sock *); 613 + bool state; 614 + 615 + pr_debug("Entering iscsi_target_sk_state_change\n"); 616 + 617 + write_lock_bh(&sk->sk_callback_lock); 618 + conn = sk->sk_user_data; 619 + if (!conn) { 620 + write_unlock_bh(&sk->sk_callback_lock); 621 + return; 622 + } 623 + orig_state_change = conn->orig_state_change; 624 + 625 + if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) { 626 + pr_debug("Got LOGIN_FLAGS_READY=0 sk_state_change conn: %p\n", 627 + conn); 628 + write_unlock_bh(&sk->sk_callback_lock); 629 + orig_state_change(sk); 630 + return; 631 + } 632 + if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { 633 + pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" 634 + " conn: %p\n", conn); 635 + write_unlock_bh(&sk->sk_callback_lock); 636 + orig_state_change(sk); 637 + return; 638 + } 639 + if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { 640 + pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", 641 + conn); 642 + write_unlock_bh(&sk->sk_callback_lock); 643 + orig_state_change(sk); 644 + return; 645 + } 646 + 647 + state = iscsi_target_sk_state_check(sk); 648 + write_unlock_bh(&sk->sk_callback_lock); 649 + 650 + pr_debug("iscsi_target_sk_state_change: state: %d\n", state); 651 + 652 + if (!state) { 653 + pr_debug("iscsi_target_sk_state_change got failed state\n"); 654 + schedule_delayed_work(&conn->login_cleanup_work, 0); 655 + return; 656 + } 657 + orig_state_change(sk); 387 658 } 388 659 389 660 /* ··· 910 643 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { 911 644 login->tsih = conn->sess->tsih; 912 645 login->login_complete = 1; 646 + iscsi_target_restore_sock_callbacks(conn); 913 647 if (iscsi_target_do_tx_login_io(conn, 914 648 login) < 0) 915 649 return -1; 916 - return 0; 650 + return 1; 917 651 } 918 652 break; 919 653 default: ··· 924 656 break; 925 657 } 926 658 927 - if (iscsi_target_do_login_io(conn, login) < 0) 659 + if (iscsi_target_do_tx_login_io(conn, login) < 0) 928 660 return -1; 929 661 930 662 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { 931 663 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT; 932 664 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK; 665 + } 666 + break; 667 + } 668 + 669 + if (conn->sock) { 670 + struct sock *sk = conn->sock->sk; 671 + bool state; 672 + 673 + read_lock_bh(&sk->sk_callback_lock); 674 + state = iscsi_target_sk_state_check(sk); 675 + read_unlock_bh(&sk->sk_callback_lock); 676 + 677 + if (!state) { 678 + pr_debug("iscsi_target_do_login() failed state for" 679 + " conn: %p\n", conn); 680 + return -1; 933 681 } 934 682 } 935 683 ··· 979 695 char *tmpbuf, *start = NULL, *end = NULL, *key, *value; 980 696 struct iscsi_session *sess = conn->sess; 981 697 struct iscsi_tiqn *tiqn; 698 + struct iscsi_tpg_np *tpg_np = NULL; 982 699 struct iscsi_login_req *login_req; 983 - u32 payload_length; 984 - int sessiontype = 0, ret = 0; 700 + struct se_node_acl *se_nacl; 701 + u32 payload_length, queue_depth = 0; 702 + int sessiontype = 0, ret = 0, tag_num, tag_size; 703 + 704 + INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx); 705 + INIT_DELAYED_WORK(&conn->login_cleanup_work, iscsi_target_do_cleanup); 706 + iscsi_target_set_sock_callbacks(conn); 707 + 708 + login->np = np; 985 709 986 710 login_req = (struct iscsi_login_req *) login->req; 987 711 payload_length = ntoh24(login_req->dlength); ··· 1083 791 goto out; 1084 792 } 1085 793 ret = 0; 1086 - goto out; 794 + goto alloc_tags; 1087 795 } 1088 796 1089 797 get_target: ··· 1114 822 /* 1115 823 * Locate Target Portal Group from Storage Node. 1116 824 */ 1117 - conn->tpg = iscsit_get_tpg_from_np(tiqn, np); 825 + conn->tpg = iscsit_get_tpg_from_np(tiqn, np, &tpg_np); 1118 826 if (!conn->tpg) { 1119 827 pr_err("Unable to locate Target Portal Group" 1120 828 " on %s\n", tiqn->tiqn); ··· 1124 832 ret = -1; 1125 833 goto out; 1126 834 } 835 + conn->tpg_np = tpg_np; 1127 836 pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt); 1128 837 /* 1129 838 * Setup crc32c modules from libcrypto 1130 839 */ 1131 840 if (iscsi_login_setup_crypto(conn) < 0) { 1132 841 pr_err("iscsi_login_setup_crypto() failed\n"); 842 + kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); 843 + iscsit_put_tiqn_for_login(tiqn); 844 + conn->tpg = NULL; 1133 845 ret = -1; 1134 846 goto out; 1135 847 } ··· 1142 846 * process login attempt. 1143 847 */ 1144 848 if (iscsit_access_np(np, conn->tpg) < 0) { 849 + kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); 1145 850 iscsit_put_tiqn_for_login(tiqn); 1146 851 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1147 852 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); 1148 - ret = -1; 1149 853 conn->tpg = NULL; 854 + ret = -1; 1150 855 goto out; 1151 856 } 1152 857 ··· 1180 883 ret = -1; 1181 884 goto out; 1182 885 } 886 + se_nacl = sess->se_sess->se_node_acl; 887 + queue_depth = se_nacl->queue_depth; 888 + /* 889 + * Setup pre-allocated tags based upon allowed per NodeACL CmdSN 890 + * depth for non immediate commands, plus extra tags for immediate 891 + * commands. 892 + * 893 + * Also enforce a ISCSIT_MIN_TAGS to prevent unnecessary contention 894 + * in per-cpu-ida tag allocation logic + small queue_depth. 895 + */ 896 + alloc_tags: 897 + tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); 898 + tag_num += ISCSIT_EXTRA_TAGS; 899 + tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 1183 900 1184 - ret = 0; 901 + ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size); 902 + if (ret < 0) { 903 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 904 + ISCSI_LOGIN_STATUS_NO_RESOURCES); 905 + ret = -1; 906 + } 1185 907 out: 1186 908 kfree(tmpbuf); 1187 909 return ret; ··· 1213 897 int ret; 1214 898 1215 899 ret = iscsi_target_do_login(conn, login); 1216 - if (ret != 0) 1217 - iscsi_remove_failed_auth_entry(conn); 900 + if (!ret) { 901 + if (conn->sock) { 902 + struct sock *sk = conn->sock->sk; 1218 903 1219 - iscsi_target_nego_release(conn); 904 + write_lock_bh(&sk->sk_callback_lock); 905 + set_bit(LOGIN_FLAGS_READY, &conn->login_flags); 906 + write_unlock_bh(&sk->sk_callback_lock); 907 + } 908 + } else if (ret < 0) { 909 + cancel_delayed_work_sync(&conn->login_work); 910 + cancel_delayed_work_sync(&conn->login_cleanup_work); 911 + iscsi_target_restore_sock_callbacks(conn); 912 + iscsi_remove_failed_auth_entry(conn); 913 + } 914 + if (ret != 0) 915 + iscsi_target_nego_release(conn); 916 + 1220 917 return ret; 1221 918 } 1222 919
+1 -3
drivers/target/iscsi/iscsi_target_nodeattrib.c
··· 1 1 /******************************************************************************* 2 2 * This file contains the main functions related to Initiator Node Attributes. 3 3 * 4 - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 - * 6 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 4 + * (c) Copyright 2007-2013 Datera, Inc. 7 5 * 8 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 7 *
+2 -4
drivers/target/iscsi/iscsi_target_parameters.c
··· 1 1 /******************************************************************************* 2 2 * This file contains main functions related to iSCSI Parameter negotiation. 3 3 * 4 - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 - * 6 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 4 + * (c) Copyright 2007-2013 Datera, Inc. 7 5 * 8 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 7 * ··· 1180 1182 unsigned long long tmp; 1181 1183 int rc; 1182 1184 1183 - rc = strict_strtoull(param->value, 0, &tmp); 1185 + rc = kstrtoull(param->value, 0, &tmp); 1184 1186 if (rc < 0) 1185 1187 return -1; 1186 1188
+1 -3
drivers/target/iscsi/iscsi_target_seq_pdu_list.c
··· 2 2 * This file contains main functions related to iSCSI DataSequenceInOrder=No 3 3 * and DataPDUInOrder=No. 4 4 * 5 - \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 6 - * 7 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 5 + * (c) Copyright 2007-2013 Datera, Inc. 8 6 * 9 7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 10 8 *
+3 -11
drivers/target/iscsi/iscsi_target_stat.c
··· 2 2 * Modern ConfigFS group context specific iSCSI statistics based on original 3 3 * iscsi_target_mib.c code 4 4 * 5 - * Copyright (c) 2011 Rising Tide Systems 6 - * 7 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 5 + * Copyright (c) 2011-2013 Datera, Inc. 8 6 * 9 7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 10 8 * ··· 175 177 static ssize_t iscsi_stat_instance_show_attr_vendor( 176 178 struct iscsi_wwn_stat_grps *igrps, char *page) 177 179 { 178 - return snprintf(page, PAGE_SIZE, "RisingTide Systems iSCSI-Target\n"); 180 + return snprintf(page, PAGE_SIZE, "Datera, Inc. iSCSI-Target\n"); 179 181 } 180 182 ISCSI_STAT_INSTANCE_ATTR_RO(vendor); 181 183 ··· 430 432 int ret; 431 433 432 434 spin_lock(&lstat->lock); 433 - if (lstat->last_intr_fail_ip_family == AF_INET6) { 434 - ret = snprintf(page, PAGE_SIZE, "[%s]\n", 435 - lstat->last_intr_fail_ip_addr); 436 - } else { 437 - ret = snprintf(page, PAGE_SIZE, "%s\n", 438 - lstat->last_intr_fail_ip_addr); 439 - } 435 + ret = snprintf(page, PAGE_SIZE, "%s\n", lstat->last_intr_fail_ip_addr); 440 436 spin_unlock(&lstat->lock); 441 437 442 438 return ret;
+1 -3
drivers/target/iscsi/iscsi_target_tmr.c
··· 1 1 /******************************************************************************* 2 2 * This file contains the iSCSI Target specific Task Management functions. 3 3 * 4 - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 - * 6 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 4 + * (c) Copyright 2007-2013 Datera, Inc. 7 5 * 8 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 7 *
+18 -11
drivers/target/iscsi/iscsi_target_tpg.c
··· 1 1 /******************************************************************************* 2 2 * This file contains iSCSI Target Portal Group related functions. 3 3 * 4 - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 - * 6 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 4 + * (c) Copyright 2007-2013 Datera, Inc. 7 5 * 8 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 7 * ··· 47 49 INIT_LIST_HEAD(&tpg->tpg_gnp_list); 48 50 INIT_LIST_HEAD(&tpg->tpg_list); 49 51 mutex_init(&tpg->tpg_access_lock); 50 - mutex_init(&tpg->np_login_lock); 52 + sema_init(&tpg->np_login_sem, 1); 51 53 spin_lock_init(&tpg->tpg_state_lock); 52 54 spin_lock_init(&tpg->tpg_np_lock); 53 55 ··· 127 129 128 130 struct iscsi_portal_group *iscsit_get_tpg_from_np( 129 131 struct iscsi_tiqn *tiqn, 130 - struct iscsi_np *np) 132 + struct iscsi_np *np, 133 + struct iscsi_tpg_np **tpg_np_out) 131 134 { 132 135 struct iscsi_portal_group *tpg = NULL; 133 136 struct iscsi_tpg_np *tpg_np; ··· 146 147 spin_lock(&tpg->tpg_np_lock); 147 148 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) { 148 149 if (tpg_np->tpg_np == np) { 150 + *tpg_np_out = tpg_np; 151 + kref_get(&tpg_np->tpg_np_kref); 149 152 spin_unlock(&tpg->tpg_np_lock); 150 153 spin_unlock(&tiqn->tiqn_tpg_lock); 151 154 return tpg; ··· 176 175 177 176 static void iscsit_clear_tpg_np_login_thread( 178 177 struct iscsi_tpg_np *tpg_np, 179 - struct iscsi_portal_group *tpg) 178 + struct iscsi_portal_group *tpg, 179 + bool shutdown) 180 180 { 181 181 if (!tpg_np->tpg_np) { 182 182 pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n"); 183 183 return; 184 184 } 185 185 186 - iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg); 186 + iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); 187 187 } 188 188 189 189 void iscsit_clear_tpg_np_login_threads( 190 - struct iscsi_portal_group *tpg) 190 + struct iscsi_portal_group *tpg, 191 + bool shutdown) 191 192 { 192 193 struct iscsi_tpg_np *tpg_np; 193 194 ··· 200 197 continue; 201 198 } 202 199 spin_unlock(&tpg->tpg_np_lock); 203 - iscsit_clear_tpg_np_login_thread(tpg_np, tpg); 200 + iscsit_clear_tpg_np_login_thread(tpg_np, tpg, shutdown); 204 201 spin_lock(&tpg->tpg_np_lock); 205 202 } 206 203 spin_unlock(&tpg->tpg_np_lock); ··· 270 267 spin_lock(&tpg->tpg_state_lock); 271 268 tpg->tpg_state = TPG_STATE_INACTIVE; 272 269 spin_unlock(&tpg->tpg_state_lock); 270 + 271 + iscsit_clear_tpg_np_login_threads(tpg, true); 273 272 274 273 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) { 275 274 pr_err("Unable to delete iSCSI Target Portal Group:" ··· 373 368 tpg->tpg_state = TPG_STATE_INACTIVE; 374 369 spin_unlock(&tpg->tpg_state_lock); 375 370 376 - iscsit_clear_tpg_np_login_threads(tpg); 371 + iscsit_clear_tpg_np_login_threads(tpg, false); 377 372 378 373 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) { 379 374 spin_lock(&tpg->tpg_state_lock); ··· 495 490 INIT_LIST_HEAD(&tpg_np->tpg_np_child_list); 496 491 INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list); 497 492 spin_lock_init(&tpg_np->tpg_np_parent_lock); 493 + init_completion(&tpg_np->tpg_np_comp); 494 + kref_init(&tpg_np->tpg_np_kref); 498 495 tpg_np->tpg_np = np; 499 496 tpg_np->tpg = tpg; 500 497 ··· 527 520 struct iscsi_portal_group *tpg, 528 521 struct iscsi_np *np) 529 522 { 530 - iscsit_clear_tpg_np_login_thread(tpg_np, tpg); 523 + iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true); 531 524 532 525 pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n", 533 526 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
+2 -2
drivers/target/iscsi/iscsi_target_tpg.h
··· 5 5 extern int iscsit_load_discovery_tpg(void); 6 6 extern void iscsit_release_discovery_tpg(void); 7 7 extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *, 8 - struct iscsi_np *); 8 + struct iscsi_np *, struct iscsi_tpg_np **); 9 9 extern int iscsit_get_tpg(struct iscsi_portal_group *); 10 10 extern void iscsit_put_tpg(struct iscsi_portal_group *); 11 - extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *); 11 + extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *, bool); 12 12 extern void iscsit_tpg_dump_params(struct iscsi_portal_group *); 13 13 extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *); 14 14 extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
+68 -99
drivers/target/iscsi/iscsi_target_tq.c
··· 1 1 /******************************************************************************* 2 2 * This file contains the iSCSI Login Thread and Thread Queue functions. 3 3 * 4 - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 - * 6 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 4 + * (c) Copyright 2007-2013 Datera, Inc. 7 5 * 8 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 7 * ··· 103 105 ts->status = ISCSI_THREAD_SET_FREE; 104 106 INIT_LIST_HEAD(&ts->ts_list); 105 107 spin_lock_init(&ts->ts_state_lock); 106 - init_completion(&ts->rx_post_start_comp); 107 - init_completion(&ts->tx_post_start_comp); 108 108 init_completion(&ts->rx_restart_comp); 109 109 init_completion(&ts->tx_restart_comp); 110 110 init_completion(&ts->rx_start_comp); 111 111 init_completion(&ts->tx_start_comp); 112 + sema_init(&ts->ts_activate_sem, 0); 112 113 113 114 ts->create_threads = 1; 114 115 ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s", ··· 136 139 return allocated_thread_pair_count; 137 140 } 138 141 142 + static void iscsi_deallocate_thread_one(struct iscsi_thread_set *ts) 143 + { 144 + spin_lock_bh(&ts->ts_state_lock); 145 + ts->status = ISCSI_THREAD_SET_DIE; 146 + 147 + if (ts->rx_thread) { 148 + complete(&ts->rx_start_comp); 149 + spin_unlock_bh(&ts->ts_state_lock); 150 + kthread_stop(ts->rx_thread); 151 + spin_lock_bh(&ts->ts_state_lock); 152 + } 153 + if (ts->tx_thread) { 154 + complete(&ts->tx_start_comp); 155 + spin_unlock_bh(&ts->ts_state_lock); 156 + kthread_stop(ts->tx_thread); 157 + spin_lock_bh(&ts->ts_state_lock); 158 + } 159 + spin_unlock_bh(&ts->ts_state_lock); 160 + /* 161 + * Release this thread_id in the thread_set_bitmap 162 + */ 163 + spin_lock(&ts_bitmap_lock); 164 + bitmap_release_region(iscsit_global->ts_bitmap, 165 + ts->thread_id, get_order(1)); 166 + spin_unlock(&ts_bitmap_lock); 167 + 168 + kfree(ts); 169 + } 170 + 139 171 void iscsi_deallocate_thread_sets(void) 140 172 { 141 - u32 released_count = 0; 142 173 struct iscsi_thread_set *ts = NULL; 174 + u32 released_count = 0; 143 175 144 176 while ((ts = iscsi_get_ts_from_inactive_list())) { 145 177 146 - spin_lock_bh(&ts->ts_state_lock); 147 - ts->status = ISCSI_THREAD_SET_DIE; 148 - spin_unlock_bh(&ts->ts_state_lock); 149 - 150 - if (ts->rx_thread) { 151 - send_sig(SIGINT, ts->rx_thread, 1); 152 - kthread_stop(ts->rx_thread); 153 - } 154 - if (ts->tx_thread) { 155 - send_sig(SIGINT, ts->tx_thread, 1); 156 - kthread_stop(ts->tx_thread); 157 - } 158 - /* 159 - * Release this thread_id in the thread_set_bitmap 160 - */ 161 - spin_lock(&ts_bitmap_lock); 162 - bitmap_release_region(iscsit_global->ts_bitmap, 163 - ts->thread_id, get_order(1)); 164 - spin_unlock(&ts_bitmap_lock); 165 - 178 + iscsi_deallocate_thread_one(ts); 166 179 released_count++; 167 - kfree(ts); 168 180 } 169 181 170 182 if (released_count) ··· 193 187 if (!ts) 194 188 break; 195 189 196 - spin_lock_bh(&ts->ts_state_lock); 197 - ts->status = ISCSI_THREAD_SET_DIE; 198 - spin_unlock_bh(&ts->ts_state_lock); 199 - 200 - if (ts->rx_thread) { 201 - send_sig(SIGINT, ts->rx_thread, 1); 202 - kthread_stop(ts->rx_thread); 203 - } 204 - if (ts->tx_thread) { 205 - send_sig(SIGINT, ts->tx_thread, 1); 206 - kthread_stop(ts->tx_thread); 207 - } 208 - /* 209 - * Release this thread_id in the thread_set_bitmap 210 - */ 211 - spin_lock(&ts_bitmap_lock); 212 - bitmap_release_region(iscsit_global->ts_bitmap, 213 - ts->thread_id, get_order(1)); 214 - spin_unlock(&ts_bitmap_lock); 215 - 190 + iscsi_deallocate_thread_one(ts); 216 191 released_count++; 217 - kfree(ts); 218 192 } 219 193 220 - if (released_count) { 194 + if (released_count) 221 195 pr_debug("Stopped %d thread set(s) (%d total threads)." 222 196 "\n", released_count, released_count * 2); 223 - } 224 197 } 225 198 226 199 void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts) ··· 209 224 spin_lock_bh(&ts->ts_state_lock); 210 225 conn->thread_set = ts; 211 226 ts->conn = conn; 227 + ts->status = ISCSI_THREAD_SET_ACTIVE; 212 228 spin_unlock_bh(&ts->ts_state_lock); 213 - /* 214 - * Start up the RX thread and wait on rx_post_start_comp. The RX 215 - * Thread will then do the same for the TX Thread in 216 - * iscsi_rx_thread_pre_handler(). 217 - */ 229 + 218 230 complete(&ts->rx_start_comp); 219 - wait_for_completion(&ts->rx_post_start_comp); 231 + complete(&ts->tx_start_comp); 232 + 233 + down(&ts->ts_activate_sem); 220 234 } 221 235 222 236 struct iscsi_thread_set *iscsi_get_thread_set(void) 223 237 { 224 - int allocate_ts = 0; 225 - struct completion comp; 226 - struct iscsi_thread_set *ts = NULL; 227 - /* 228 - * If no inactive thread set is available on the first call to 229 - * iscsi_get_ts_from_inactive_list(), sleep for a second and 230 - * try again. If still none are available after two attempts, 231 - * allocate a set ourselves. 232 - */ 238 + struct iscsi_thread_set *ts; 239 + 233 240 get_set: 234 241 ts = iscsi_get_ts_from_inactive_list(); 235 242 if (!ts) { 236 - if (allocate_ts == 2) 237 - iscsi_allocate_thread_sets(1); 238 - 239 - init_completion(&comp); 240 - wait_for_completion_timeout(&comp, 1 * HZ); 241 - 242 - allocate_ts++; 243 + iscsi_allocate_thread_sets(1); 243 244 goto get_set; 244 245 } 245 246 ··· 234 263 ts->thread_count = 2; 235 264 init_completion(&ts->rx_restart_comp); 236 265 init_completion(&ts->tx_restart_comp); 266 + sema_init(&ts->ts_activate_sem, 0); 237 267 238 268 return ts; 239 269 } ··· 372 400 static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts) 373 401 { 374 402 spin_lock_bh(&ts->ts_state_lock); 375 - if ((ts->status == ISCSI_THREAD_SET_DIE) || signal_pending(current)) { 403 + if (ts->status == ISCSI_THREAD_SET_DIE || kthread_should_stop() || 404 + signal_pending(current)) { 376 405 spin_unlock_bh(&ts->ts_state_lock); 377 406 return -1; 378 407 } ··· 392 419 goto sleep; 393 420 } 394 421 395 - flush_signals(current); 422 + if (ts->status != ISCSI_THREAD_SET_DIE) 423 + flush_signals(current); 396 424 397 425 if (ts->delay_inactive && (--ts->thread_count == 0)) { 398 426 spin_unlock_bh(&ts->ts_state_lock); ··· 420 446 if (iscsi_signal_thread_pre_handler(ts) < 0) 421 447 return NULL; 422 448 449 + iscsi_check_to_add_additional_sets(); 450 + 451 + spin_lock_bh(&ts->ts_state_lock); 423 452 if (!ts->conn) { 424 453 pr_err("struct iscsi_thread_set->conn is NULL for" 425 - " thread_id: %d, going back to sleep\n", ts->thread_id); 426 - goto sleep; 454 + " RX thread_id: %s/%d\n", current->comm, current->pid); 455 + spin_unlock_bh(&ts->ts_state_lock); 456 + return NULL; 427 457 } 428 - iscsi_check_to_add_additional_sets(); 429 - /* 430 - * The RX Thread starts up the TX Thread and sleeps. 431 - */ 432 458 ts->thread_clear |= ISCSI_CLEAR_RX_THREAD; 433 - complete(&ts->tx_start_comp); 434 - wait_for_completion(&ts->tx_post_start_comp); 459 + spin_unlock_bh(&ts->ts_state_lock); 460 + 461 + up(&ts->ts_activate_sem); 435 462 436 463 return ts->conn; 437 464 } ··· 447 472 goto sleep; 448 473 } 449 474 450 - flush_signals(current); 475 + if (ts->status != ISCSI_THREAD_SET_DIE) 476 + flush_signals(current); 451 477 452 478 if (ts->delay_inactive && (--ts->thread_count == 0)) { 453 479 spin_unlock_bh(&ts->ts_state_lock); ··· 474 498 if (iscsi_signal_thread_pre_handler(ts) < 0) 475 499 return NULL; 476 500 477 - if (!ts->conn) { 478 - pr_err("struct iscsi_thread_set->conn is NULL for " 479 - " thread_id: %d, going back to sleep\n", 480 - ts->thread_id); 481 - goto sleep; 482 - } 483 - 484 501 iscsi_check_to_add_additional_sets(); 485 - /* 486 - * From the TX thread, up the tx_post_start_comp that the RX Thread is 487 - * sleeping on in iscsi_rx_thread_pre_handler(), then up the 488 - * rx_post_start_comp that iscsi_activate_thread_set() is sleeping on. 489 - */ 490 - ts->thread_clear |= ISCSI_CLEAR_TX_THREAD; 491 - complete(&ts->tx_post_start_comp); 492 - complete(&ts->rx_post_start_comp); 493 502 494 503 spin_lock_bh(&ts->ts_state_lock); 495 - ts->status = ISCSI_THREAD_SET_ACTIVE; 504 + if (!ts->conn) { 505 + pr_err("struct iscsi_thread_set->conn is NULL for" 506 + " TX thread_id: %s/%d\n", current->comm, current->pid); 507 + spin_unlock_bh(&ts->ts_state_lock); 508 + return NULL; 509 + } 510 + ts->thread_clear |= ISCSI_CLEAR_TX_THREAD; 496 511 spin_unlock_bh(&ts->ts_state_lock); 512 + 513 + up(&ts->ts_activate_sem); 497 514 498 515 return ts->conn; 499 516 }
+1 -4
drivers/target/iscsi/iscsi_target_tq.h
··· 64 64 struct iscsi_conn *conn; 65 65 /* used for controlling ts state accesses */ 66 66 spinlock_t ts_state_lock; 67 - /* Used for rx side post startup */ 68 - struct completion rx_post_start_comp; 69 - /* Used for tx side post startup */ 70 - struct completion tx_post_start_comp; 71 67 /* used for restarting thread queue */ 72 68 struct completion rx_restart_comp; 73 69 /* used for restarting thread queue */ ··· 78 82 struct task_struct *tx_thread; 79 83 /* struct iscsi_thread_set in list list head*/ 80 84 struct list_head ts_list; 85 + struct semaphore ts_activate_sem; 81 86 }; 82 87 83 88 #endif /*** ISCSI_THREAD_QUEUE_H ***/
+23 -22
drivers/target/iscsi/iscsi_target_util.c
··· 1 1 /******************************************************************************* 2 2 * This file contains the iSCSI Target specific utility functions. 3 3 * 4 - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 - * 6 - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 4 + * (c) Copyright 2007-2013 Datera, Inc. 7 5 * 8 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 7 * ··· 17 19 ******************************************************************************/ 18 20 19 21 #include <linux/list.h> 22 + #include <linux/percpu_ida.h> 20 23 #include <scsi/scsi_tcq.h> 21 24 #include <scsi/iscsi_proto.h> 22 25 #include <target/target_core_base.h> ··· 148 149 spin_unlock_bh(&cmd->r2t_lock); 149 150 } 150 151 151 - struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) 152 - { 153 - struct iscsi_cmd *cmd; 154 - 155 - cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask); 156 - if (!cmd) 157 - return NULL; 158 - 159 - cmd->release_cmd = &iscsit_release_cmd; 160 - return cmd; 161 - } 162 - 163 152 /* 164 153 * May be called from software interrupt (timer) context for allocating 165 154 * iSCSI NopINs. ··· 155 168 struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) 156 169 { 157 170 struct iscsi_cmd *cmd; 171 + struct se_session *se_sess = conn->sess->se_sess; 172 + int size, tag; 158 173 159 - cmd = conn->conn_transport->iscsit_alloc_cmd(conn, gfp_mask); 160 - if (!cmd) { 161 - pr_err("Unable to allocate memory for struct iscsi_cmd.\n"); 162 - return NULL; 163 - } 174 + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask); 175 + size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 176 + cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size)); 177 + memset(cmd, 0, size); 178 + 179 + cmd->se_cmd.map_tag = tag; 164 180 cmd->conn = conn; 165 181 INIT_LIST_HEAD(&cmd->i_conn_node); 166 182 INIT_LIST_HEAD(&cmd->datain_list); ··· 679 689 680 690 void iscsit_release_cmd(struct iscsi_cmd *cmd) 681 691 { 692 + struct iscsi_session *sess; 693 + struct se_cmd *se_cmd = &cmd->se_cmd; 694 + 695 + if (cmd->conn) 696 + sess = cmd->conn->sess; 697 + else 698 + sess = cmd->sess; 699 + 700 + BUG_ON(!sess || !sess->se_sess); 701 + 682 702 kfree(cmd->buf_ptr); 683 703 kfree(cmd->pdu_list); 684 704 kfree(cmd->seq_list); ··· 696 696 kfree(cmd->iov_data); 697 697 kfree(cmd->text_in_ptr); 698 698 699 - kmem_cache_free(lio_cmd_cache, cmd); 699 + percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag); 700 700 } 701 + EXPORT_SYMBOL(iscsit_release_cmd); 701 702 702 703 static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, 703 704 bool check_queues) ··· 762 761 /* Fall-through */ 763 762 default: 764 763 __iscsit_free_cmd(cmd, false, shutdown); 765 - cmd->release_cmd(cmd); 764 + iscsit_release_cmd(cmd); 766 765 break; 767 766 } 768 767 }
+1 -1
drivers/target/loopback/tcm_loop.c
··· 3 3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver 4 4 * for emulated SAS initiator ports 5 5 * 6 - * © Copyright 2011 RisingTide Systems LLC. 6 + * © Copyright 2011-2013 Datera, Inc. 7 7 * 8 8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 9 9 *
+21 -18
drivers/target/target_core_alua.c
··· 3 3 * 4 4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA) 5 5 * 6 - * (c) Copyright 2009-2012 RisingTide Systems LLC. 6 + * (c) Copyright 2009-2013 Datera, Inc. 7 7 * 8 8 * Nicholas A. Bellinger <nab@kernel.org> 9 9 * ··· 557 557 * a ALUA logical unit group. 558 558 */ 559 559 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 560 + if (!tg_pt_gp_mem) 561 + return 0; 562 + 560 563 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 561 564 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 562 565 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); ··· 733 730 if (ret < 0) 734 731 pr_err("Error writing ALUA metadata file: %s\n", path); 735 732 fput(file); 736 - return ret ? -EIO : 0; 733 + return (ret < 0) ? -EIO : 0; 737 734 } 738 735 739 736 /* ··· 1759 1756 unsigned long tmp; 1760 1757 int ret; 1761 1758 1762 - ret = strict_strtoul(page, 0, &tmp); 1759 + ret = kstrtoul(page, 0, &tmp); 1763 1760 if (ret < 0) { 1764 1761 pr_err("Unable to extract alua_access_type\n"); 1765 - return -EINVAL; 1762 + return ret; 1766 1763 } 1767 1764 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { 1768 1765 pr_err("Illegal value for alua_access_type:" ··· 1797 1794 unsigned long tmp; 1798 1795 int ret; 1799 1796 1800 - ret = strict_strtoul(page, 0, &tmp); 1797 + ret = kstrtoul(page, 0, &tmp); 1801 1798 if (ret < 0) { 1802 1799 pr_err("Unable to extract nonop_delay_msecs\n"); 1803 - return -EINVAL; 1800 + return ret; 1804 1801 } 1805 1802 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { 1806 1803 pr_err("Passed nonop_delay_msecs: %lu, exceeds" ··· 1828 1825 unsigned long tmp; 1829 1826 int ret; 1830 1827 1831 - ret = strict_strtoul(page, 0, &tmp); 1828 + ret = kstrtoul(page, 0, &tmp); 1832 1829 if (ret < 0) { 1833 1830 pr_err("Unable to extract trans_delay_msecs\n"); 1834 - return -EINVAL; 1831 + return ret; 1835 1832 } 1836 1833 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { 1837 1834 pr_err("Passed trans_delay_msecs: %lu, exceeds" ··· 1859 1856 unsigned long tmp; 1860 1857 int ret; 1861 1858 1862 - ret = strict_strtoul(page, 0, &tmp); 1859 + ret = kstrtoul(page, 0, &tmp); 1863 1860 if (ret < 0) { 1864 1861 pr_err("Unable to extract implict_trans_secs\n"); 1865 - return -EINVAL; 1862 + return ret; 1866 1863 } 1867 1864 if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) { 1868 1865 pr_err("Passed implict_trans_secs: %lu, exceeds" ··· 1890 1887 unsigned long tmp; 1891 1888 int ret; 1892 1889 1893 - ret = strict_strtoul(page, 0, &tmp); 1890 + ret = kstrtoul(page, 0, &tmp); 1894 1891 if (ret < 0) { 1895 1892 pr_err("Unable to extract preferred ALUA value\n"); 1896 - return -EINVAL; 1893 + return ret; 1897 1894 } 1898 1895 if ((tmp != 0) && (tmp != 1)) { 1899 1896 pr_err("Illegal value for preferred ALUA: %lu\n", tmp); ··· 1925 1922 if (!lun->lun_sep) 1926 1923 return -ENODEV; 1927 1924 1928 - ret = strict_strtoul(page, 0, &tmp); 1925 + ret = kstrtoul(page, 0, &tmp); 1929 1926 if (ret < 0) { 1930 1927 pr_err("Unable to extract alua_tg_pt_offline value\n"); 1931 - return -EINVAL; 1928 + return ret; 1932 1929 } 1933 1930 if ((tmp != 0) && (tmp != 1)) { 1934 1931 pr_err("Illegal value for alua_tg_pt_offline: %lu\n", ··· 1964 1961 unsigned long tmp; 1965 1962 int ret; 1966 1963 1967 - ret = strict_strtoul(page, 0, &tmp); 1964 + ret = kstrtoul(page, 0, &tmp); 1968 1965 if (ret < 0) { 1969 1966 pr_err("Unable to extract alua_tg_pt_status\n"); 1970 - return -EINVAL; 1967 + return ret; 1971 1968 } 1972 1969 if ((tmp != ALUA_STATUS_NONE) && 1973 1970 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && ··· 1997 1994 unsigned long tmp; 1998 1995 int ret; 1999 1996 2000 - ret = strict_strtoul(page, 0, &tmp); 1997 + ret = kstrtoul(page, 0, &tmp); 2001 1998 if (ret < 0) { 2002 1999 pr_err("Unable to extract alua_tg_pt_write_md\n"); 2003 - return -EINVAL; 2000 + return ret; 2004 2001 } 2005 2002 if ((tmp != 0) && (tmp != 1)) { 2006 2003 pr_err("Illegal value for alua_tg_pt_write_md:"
+37 -23
drivers/target/target_core_configfs.c
··· 3 3 * 4 4 * This file contains ConfigFS logic for the Generic Target Engine project. 5 5 * 6 - * (c) Copyright 2008-2012 RisingTide Systems LLC. 6 + * (c) Copyright 2008-2013 Datera, Inc. 7 7 * 8 8 * Nicholas A. Bellinger <nab@kernel.org> 9 9 * ··· 48 48 #include "target_core_alua.h" 49 49 #include "target_core_pr.h" 50 50 #include "target_core_rd.h" 51 + #include "target_core_xcopy.h" 51 52 52 53 extern struct t10_alua_lu_gp *default_lu_gp; 53 54 ··· 269 268 }, 270 269 }; 271 270 272 - static struct configfs_subsystem *target_core_subsystem[] = { 271 + struct configfs_subsystem *target_core_subsystem[] = { 273 272 &target_core_fabrics, 274 273 NULL, 275 274 }; ··· 578 577 unsigned long val; \ 579 578 int ret; \ 580 579 \ 581 - ret = strict_strtoul(page, 0, &val); \ 580 + ret = kstrtoul(page, 0, &val); \ 582 581 if (ret < 0) { \ 583 - pr_err("strict_strtoul() failed with" \ 582 + pr_err("kstrtoul() failed with" \ 584 583 " ret: %d\n", ret); \ 585 584 return -EINVAL; \ 586 585 } \ ··· 636 635 637 636 DEF_DEV_ATTRIB(emulate_tpws); 638 637 SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR); 638 + 639 + DEF_DEV_ATTRIB(emulate_caw); 640 + SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR); 641 + 642 + DEF_DEV_ATTRIB(emulate_3pc); 643 + SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR); 639 644 640 645 DEF_DEV_ATTRIB(enforce_pr_isids); 641 646 SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); ··· 700 693 &target_core_dev_attrib_emulate_tas.attr, 701 694 &target_core_dev_attrib_emulate_tpu.attr, 702 695 &target_core_dev_attrib_emulate_tpws.attr, 696 + &target_core_dev_attrib_emulate_caw.attr, 697 + &target_core_dev_attrib_emulate_3pc.attr, 703 698 &target_core_dev_attrib_enforce_pr_isids.attr, 704 699 &target_core_dev_attrib_is_nonrot.attr, 705 700 &target_core_dev_attrib_emulate_rest_reord.attr, ··· 1319 1310 ret = -ENOMEM; 1320 1311 goto out; 1321 1312 } 1322 - ret = strict_strtoull(arg_p, 0, &tmp_ll); 1313 + ret = kstrtoull(arg_p, 0, &tmp_ll); 1323 1314 if (ret < 0) { 1324 - pr_err("strict_strtoull() failed for" 1315 + pr_err("kstrtoull() failed for" 1325 1316 " sa_res_key=\n"); 1326 1317 goto out; 1327 1318 } ··· 1845 1836 unsigned long lu_gp_id; 1846 1837 int ret; 1847 1838 1848 - ret = strict_strtoul(page, 0, &lu_gp_id); 1839 + ret = kstrtoul(page, 0, &lu_gp_id); 1849 1840 if (ret < 0) { 1850 - pr_err("strict_strtoul() returned %d for" 1841 + pr_err("kstrtoul() returned %d for" 1851 1842 " lu_gp_id\n", ret); 1852 - return -EINVAL; 1843 + return ret; 1853 1844 } 1854 1845 if (lu_gp_id > 0x0000ffff) { 1855 1846 pr_err("ALUA lu_gp_id: %lu exceeds maximum:" ··· 2041 2032 return -EINVAL; 2042 2033 } 2043 2034 2044 - ret = strict_strtoul(page, 0, &tmp); 2035 + ret = kstrtoul(page, 0, &tmp); 2045 2036 if (ret < 0) { 2046 2037 pr_err("Unable to extract new ALUA access state from" 2047 2038 " %s\n", page); 2048 - return -EINVAL; 2039 + return ret; 2049 2040 } 2050 2041 new_state = (int)tmp; 2051 2042 ··· 2088 2079 return -EINVAL; 2089 2080 } 2090 2081 2091 - ret = strict_strtoul(page, 0, &tmp); 2082 + ret = kstrtoul(page, 0, &tmp); 2092 2083 if (ret < 0) { 2093 2084 pr_err("Unable to extract new ALUA access status" 2094 2085 " from %s\n", page); 2095 - return -EINVAL; 2086 + return ret; 2096 2087 } 2097 2088 new_status = (int)tmp; 2098 2089 ··· 2148 2139 unsigned long tmp; 2149 2140 int ret; 2150 2141 2151 - ret = strict_strtoul(page, 0, &tmp); 2142 + ret = kstrtoul(page, 0, &tmp); 2152 2143 if (ret < 0) { 2153 2144 pr_err("Unable to extract alua_write_metadata\n"); 2154 - return -EINVAL; 2145 + return ret; 2155 2146 } 2156 2147 2157 2148 if ((tmp != 0) && (tmp != 1)) { ··· 2272 2263 unsigned long tg_pt_gp_id; 2273 2264 int ret; 2274 2265 2275 - ret = strict_strtoul(page, 0, &tg_pt_gp_id); 2266 + ret = kstrtoul(page, 0, &tg_pt_gp_id); 2276 2267 if (ret < 0) { 2277 - pr_err("strict_strtoul() returned %d for" 2268 + pr_err("kstrtoul() returned %d for" 2278 2269 " tg_pt_gp_id\n", ret); 2279 - return -EINVAL; 2270 + return ret; 2280 2271 } 2281 2272 if (tg_pt_gp_id > 0x0000ffff) { 2282 2273 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:" ··· 2685 2676 if (transport->pmode_enable_hba == NULL) 2686 2677 return -EINVAL; 2687 2678 2688 - ret = strict_strtoul(page, 0, &mode_flag); 2679 + ret = kstrtoul(page, 0, &mode_flag); 2689 2680 if (ret < 0) { 2690 2681 pr_err("Unable to extract hba mode flag: %d\n", ret); 2691 - return -EINVAL; 2682 + return ret; 2692 2683 } 2693 2684 2694 2685 if (hba->dev_count) { ··· 2776 2767 str++; /* Skip to start of plugin dependent ID */ 2777 2768 } 2778 2769 2779 - ret = strict_strtoul(str, 0, &plugin_dep_id); 2770 + ret = kstrtoul(str, 0, &plugin_dep_id); 2780 2771 if (ret < 0) { 2781 - pr_err("strict_strtoul() returned %d for" 2772 + pr_err("kstrtoul() returned %d for" 2782 2773 " plugin_dep_id\n", ret); 2783 - return ERR_PTR(-EINVAL); 2774 + return ERR_PTR(ret); 2784 2775 } 2785 2776 /* 2786 2777 * Load up TCM subsystem plugins if they have not already been loaded. ··· 2936 2927 if (ret < 0) 2937 2928 goto out; 2938 2929 2930 + ret = target_xcopy_setup_pt(); 2931 + if (ret < 0) 2932 + goto out; 2933 + 2939 2934 return 0; 2940 2935 2941 2936 out: ··· 3012 2999 3013 3000 core_dev_release_virtual_lun0(); 3014 3001 rd_module_exit(); 3002 + target_xcopy_release_pt(); 3015 3003 release_se_kmem_caches(); 3016 3004 } 3017 3005
+43 -1
drivers/target/target_core_device.c
··· 4 4 * This file contains the TCM Virtual Device and Disk Transport 5 5 * agnostic related functions. 6 6 * 7 - * (c) Copyright 2003-2012 RisingTide Systems LLC. 7 + * (c) Copyright 2003-2013 Datera, Inc. 8 8 * 9 9 * Nicholas A. Bellinger <nab@kernel.org> 10 10 * ··· 46 46 #include "target_core_alua.h" 47 47 #include "target_core_pr.h" 48 48 #include "target_core_ua.h" 49 + 50 + DEFINE_MUTEX(g_device_mutex); 51 + LIST_HEAD(g_device_list); 49 52 50 53 static struct se_hba *lun0_hba; 51 54 /* not static, needed by tpg.c */ ··· 893 890 return 0; 894 891 } 895 892 893 + int se_dev_set_emulate_caw(struct se_device *dev, int flag) 894 + { 895 + if (flag != 0 && flag != 1) { 896 + pr_err("Illegal value %d\n", flag); 897 + return -EINVAL; 898 + } 899 + dev->dev_attrib.emulate_caw = flag; 900 + pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n", 901 + dev, flag); 902 + 903 + return 0; 904 + } 905 + 906 + int se_dev_set_emulate_3pc(struct se_device *dev, int flag) 907 + { 908 + if (flag != 0 && flag != 1) { 909 + pr_err("Illegal value %d\n", flag); 910 + return -EINVAL; 911 + } 912 + dev->dev_attrib.emulate_3pc = flag; 913 + pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n", 914 + dev, flag); 915 + 916 + return 0; 917 + } 918 + 896 919 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 897 920 { 898 921 if ((flag != 0) && (flag != 1)) { ··· 1422 1393 INIT_LIST_HEAD(&dev->delayed_cmd_list); 1423 1394 INIT_LIST_HEAD(&dev->state_list); 1424 1395 INIT_LIST_HEAD(&dev->qf_cmd_list); 1396 + INIT_LIST_HEAD(&dev->g_dev_node); 1425 1397 spin_lock_init(&dev->stats_lock); 1426 1398 spin_lock_init(&dev->execute_task_lock); 1427 1399 spin_lock_init(&dev->delayed_cmd_lock); ··· 1430 1400 spin_lock_init(&dev->se_port_lock); 1431 1401 spin_lock_init(&dev->se_tmr_lock); 1432 1402 spin_lock_init(&dev->qf_cmd_lock); 1403 + sema_init(&dev->caw_sem, 1); 1433 1404 atomic_set(&dev->dev_ordered_id, 0); 1434 1405 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 1435 1406 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); ··· 1454 1423 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 1455 1424 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 1456 1425 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 1426 + dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 1427 + dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 1457 1428 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 1458 1429 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 1459 1430 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; ··· 1543 1510 spin_lock(&hba->device_lock); 1544 1511 hba->dev_count++; 1545 1512 spin_unlock(&hba->device_lock); 1513 + 1514 + mutex_lock(&g_device_mutex); 1515 + list_add_tail(&dev->g_dev_node, &g_device_list); 1516 + mutex_unlock(&g_device_mutex); 1517 + 1546 1518 return 0; 1547 1519 1548 1520 out_free_alua: ··· 1565 1527 1566 1528 if (dev->dev_flags & DF_CONFIGURED) { 1567 1529 destroy_workqueue(dev->tmr_wq); 1530 + 1531 + mutex_lock(&g_device_mutex); 1532 + list_del(&dev->g_dev_node); 1533 + mutex_unlock(&g_device_mutex); 1568 1534 1569 1535 spin_lock(&hba->device_lock); 1570 1536 hba->dev_count--;
+13 -5
drivers/target/target_core_fabric_configfs.c
··· 4 4 * This file contains generic fabric module configfs infrastructure for 5 5 * TCM v4.x code 6 6 * 7 - * (c) Copyright 2010-2012 RisingTide Systems LLC. 7 + * (c) Copyright 2010-2013 Datera, Inc. 8 8 * 9 9 * Nicholas A. Bellinger <nab@linux-iscsi.org> 10 10 * ··· 189 189 struct se_node_acl *se_nacl = lacl->se_lun_nacl; 190 190 struct se_portal_group *se_tpg = se_nacl->se_tpg; 191 191 unsigned long op; 192 + int ret; 192 193 193 - if (strict_strtoul(page, 0, &op)) 194 - return -EINVAL; 194 + ret = kstrtoul(page, 0, &op); 195 + if (ret) 196 + return ret; 195 197 196 198 if ((op != 1) && (op != 0)) 197 199 return -EINVAL; ··· 352 350 * Determine the Mapped LUN value. This is what the SCSI Initiator 353 351 * Port will actually see. 354 352 */ 355 - if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) { 353 + ret = kstrtoul(buf + 4, 0, &mapped_lun); 354 + if (ret) 355 + goto out; 356 + if (mapped_lun > UINT_MAX) { 356 357 ret = -EINVAL; 357 358 goto out; 358 359 } ··· 880 875 " \"lun_$LUN_NUMBER\"\n"); 881 876 return ERR_PTR(-EINVAL); 882 877 } 883 - if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX) 878 + errno = kstrtoul(name + 4, 0, &unpacked_lun); 879 + if (errno) 880 + return ERR_PTR(errno); 881 + if (unpacked_lun > UINT_MAX) 884 882 return ERR_PTR(-EINVAL); 885 883 886 884 lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
+1 -1
drivers/target/target_core_fabric_lib.c
··· 4 4 * This file contains generic high level protocol identifier and PR 5 5 * handlers for TCM fabric modules 6 6 * 7 - * (c) Copyright 2010-2012 RisingTide Systems LLC. 7 + * (c) Copyright 2010-2013 Datera, Inc. 8 8 * 9 9 * Nicholas A. Bellinger <nab@linux-iscsi.org> 10 10 *
+5 -7
drivers/target/target_core_file.c
··· 3 3 * 4 4 * This file contains the Storage Engine <-> FILEIO transport specific functions 5 5 * 6 - * (c) Copyright 2005-2012 RisingTide Systems LLC. 6 + * (c) Copyright 2005-2013 Datera, Inc. 7 7 * 8 8 * Nicholas A. Bellinger <nab@kernel.org> 9 9 * ··· 547 547 } 548 548 549 549 static sense_reason_t 550 - fd_execute_rw(struct se_cmd *cmd) 550 + fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 551 + enum dma_data_direction data_direction) 551 552 { 552 - struct scatterlist *sgl = cmd->t_data_sg; 553 - u32 sgl_nents = cmd->t_data_nents; 554 - enum dma_data_direction data_direction = cmd->data_direction; 555 553 struct se_device *dev = cmd->se_dev; 556 554 int ret = 0; 557 555 ··· 633 635 ret = -ENOMEM; 634 636 break; 635 637 } 636 - ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); 638 + ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size); 637 639 kfree(arg_p); 638 640 if (ret < 0) { 639 - pr_err("strict_strtoull() failed for" 641 + pr_err("kstrtoull() failed for" 640 642 " fd_dev_size=\n"); 641 643 goto out; 642 644 }
+1 -1
drivers/target/target_core_hba.c
··· 3 3 * 4 4 * This file contains the TCM HBA Transport related functions. 5 5 * 6 - * (c) Copyright 2003-2012 RisingTide Systems LLC. 6 + * (c) Copyright 2003-2013 Datera, Inc. 7 7 * 8 8 * Nicholas A. Bellinger <nab@kernel.org> 9 9 *
+5 -7
drivers/target/target_core_iblock.c
··· 4 4 * This file contains the Storage Engine <-> Linux BlockIO transport 5 5 * specific functions. 6 6 * 7 - * (c) Copyright 2003-2012 RisingTide Systems LLC. 7 + * (c) Copyright 2003-2013 Datera, Inc. 8 8 * 9 9 * Nicholas A. Bellinger <nab@kernel.org> 10 10 * ··· 536 536 ret = -ENOMEM; 537 537 break; 538 538 } 539 - ret = strict_strtoul(arg_p, 0, &tmp_readonly); 539 + ret = kstrtoul(arg_p, 0, &tmp_readonly); 540 540 kfree(arg_p); 541 541 if (ret < 0) { 542 - pr_err("strict_strtoul() failed for" 542 + pr_err("kstrtoul() failed for" 543 543 " readonly=\n"); 544 544 goto out; 545 545 } ··· 587 587 } 588 588 589 589 static sense_reason_t 590 - iblock_execute_rw(struct se_cmd *cmd) 590 + iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 591 + enum dma_data_direction data_direction) 591 592 { 592 - struct scatterlist *sgl = cmd->t_data_sg; 593 - u32 sgl_nents = cmd->t_data_nents; 594 - enum dma_data_direction data_direction = cmd->data_direction; 595 593 struct se_device *dev = cmd->se_dev; 596 594 struct iblock_req *ibr; 597 595 struct bio *bio;
+2
drivers/target/target_core_internal.h
··· 33 33 int se_dev_set_emulate_tas(struct se_device *, int); 34 34 int se_dev_set_emulate_tpu(struct se_device *, int); 35 35 int se_dev_set_emulate_tpws(struct se_device *, int); 36 + int se_dev_set_emulate_caw(struct se_device *, int); 37 + int se_dev_set_emulate_3pc(struct se_device *, int); 36 38 int se_dev_set_enforce_pr_isids(struct se_device *, int); 37 39 int se_dev_set_is_nonrot(struct se_device *, int); 38 40 int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
+2 -2
drivers/target/target_core_pr.c
··· 4 4 * This file contains SPC-3 compliant persistent reservations and 5 5 * legacy SPC-2 reservations with compatible reservation handling (CRH=1) 6 6 * 7 - * (c) Copyright 2009-2012 RisingTide Systems LLC. 7 + * (c) Copyright 2009-2013 Datera, Inc. 8 8 * 9 9 * Nicholas A. Bellinger <nab@kernel.org> 10 10 * ··· 1949 1949 pr_debug("Error writing APTPL metadata file: %s\n", path); 1950 1950 fput(file); 1951 1951 1952 - return ret ? -EIO : 0; 1952 + return (ret < 0) ? -EIO : 0; 1953 1953 } 1954 1954 1955 1955 /*
+3 -4
drivers/target/target_core_pscsi.c
··· 3 3 * 4 4 * This file contains the generic target mode <-> Linux SCSI subsystem plugin. 5 5 * 6 - * (c) Copyright 2003-2012 RisingTide Systems LLC. 6 + * (c) Copyright 2003-2013 Datera, Inc. 7 7 * 8 8 * Nicholas A. Bellinger <nab@kernel.org> 9 9 * ··· 1050 1050 req = blk_get_request(pdv->pdv_sd->request_queue, 1051 1051 (data_direction == DMA_TO_DEVICE), 1052 1052 GFP_KERNEL); 1053 - if (!req || IS_ERR(req)) { 1054 - pr_err("PSCSI: blk_get_request() failed: %ld\n", 1055 - req ? IS_ERR(req) : -ENOMEM); 1053 + if (!req) { 1054 + pr_err("PSCSI: blk_get_request() failed\n"); 1056 1055 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1057 1056 goto fail; 1058 1057 }
+3 -5
drivers/target/target_core_rd.c
··· 4 4 * This file contains the Storage Engine <-> Ramdisk transport 5 5 * specific functions. 6 6 * 7 - * (c) Copyright 2003-2012 RisingTide Systems LLC. 7 + * (c) Copyright 2003-2013 Datera, Inc. 8 8 * 9 9 * Nicholas A. Bellinger <nab@kernel.org> 10 10 * ··· 280 280 } 281 281 282 282 static sense_reason_t 283 - rd_execute_rw(struct se_cmd *cmd) 283 + rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 284 + enum dma_data_direction data_direction) 284 285 { 285 - struct scatterlist *sgl = cmd->t_data_sg; 286 - u32 sgl_nents = cmd->t_data_nents; 287 - enum dma_data_direction data_direction = cmd->data_direction; 288 286 struct se_device *se_dev = cmd->se_dev; 289 287 struct rd_dev *dev = RD_DEV(se_dev); 290 288 struct rd_dev_sg_table *table;
+240 -17
drivers/target/target_core_sbc.c
··· 1 1 /* 2 2 * SCSI Block Commands (SBC) parsing and emulation. 3 3 * 4 - * (c) Copyright 2002-2012 RisingTide Systems LLC. 4 + * (c) Copyright 2002-2013 Datera, Inc. 5 5 * 6 6 * Nicholas A. Bellinger <nab@kernel.org> 7 7 * ··· 25 25 #include <linux/ratelimit.h> 26 26 #include <asm/unaligned.h> 27 27 #include <scsi/scsi.h> 28 + #include <scsi/scsi_tcq.h> 28 29 29 30 #include <target/target_core_base.h> 30 31 #include <target/target_core_backend.h> ··· 281 280 return 0; 282 281 } 283 282 284 - static void xdreadwrite_callback(struct se_cmd *cmd) 283 + static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd) 285 284 { 286 285 unsigned char *buf, *addr; 287 286 struct scatterlist *sg; 288 287 unsigned int offset; 289 - int i; 290 - int count; 288 + sense_reason_t ret = TCM_NO_SENSE; 289 + int i, count; 291 290 /* 292 291 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 293 292 * ··· 302 301 buf = kmalloc(cmd->data_length, GFP_KERNEL); 303 302 if (!buf) { 304 303 pr_err("Unable to allocate xor_callback buf\n"); 305 - return; 304 + return TCM_OUT_OF_RESOURCES; 306 305 } 307 306 /* 308 307 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg ··· 321 320 offset = 0; 322 321 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 323 322 addr = kmap_atomic(sg_page(sg)); 324 - if (!addr) 323 + if (!addr) { 324 + ret = TCM_OUT_OF_RESOURCES; 325 325 goto out; 326 + } 326 327 327 328 for (i = 0; i < sg->length; i++) 328 329 *(addr + sg->offset + i) ^= *(buf + offset + i); ··· 335 332 336 333 out: 337 334 kfree(buf); 335 + return ret; 336 + } 337 + 338 + static sense_reason_t 339 + sbc_execute_rw(struct se_cmd *cmd) 340 + { 341 + return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, 342 + cmd->data_direction); 343 + } 344 + 345 + static sense_reason_t compare_and_write_post(struct se_cmd *cmd) 346 + { 347 + struct se_device *dev = cmd->se_dev; 348 + 349 + cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 350 + /* 351 + * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 352 + * before the original READ I/O submission. 353 + */ 354 + up(&dev->caw_sem); 355 + 356 + return TCM_NO_SENSE; 357 + } 358 + 359 + static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) 360 + { 361 + struct se_device *dev = cmd->se_dev; 362 + struct scatterlist *write_sg = NULL, *sg; 363 + unsigned char *buf, *addr; 364 + struct sg_mapping_iter m; 365 + unsigned int offset = 0, len; 366 + unsigned int nlbas = cmd->t_task_nolb; 367 + unsigned int block_size = dev->dev_attrib.block_size; 368 + unsigned int compare_len = (nlbas * block_size); 369 + sense_reason_t ret = TCM_NO_SENSE; 370 + int rc, i; 371 + 372 + /* 373 + * Handle early failure in transport_generic_request_failure(), 374 + * which will not have taken ->caw_mutex yet.. 375 + */ 376 + if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) 377 + return TCM_NO_SENSE; 378 + 379 + buf = kzalloc(cmd->data_length, GFP_KERNEL); 380 + if (!buf) { 381 + pr_err("Unable to allocate compare_and_write buf\n"); 382 + ret = TCM_OUT_OF_RESOURCES; 383 + goto out; 384 + } 385 + 386 + write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 387 + GFP_KERNEL); 388 + if (!write_sg) { 389 + pr_err("Unable to allocate compare_and_write sg\n"); 390 + ret = TCM_OUT_OF_RESOURCES; 391 + goto out; 392 + } 393 + /* 394 + * Setup verify and write data payloads from total NumberLBAs. 395 + */ 396 + rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, 397 + cmd->data_length); 398 + if (!rc) { 399 + pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); 400 + ret = TCM_OUT_OF_RESOURCES; 401 + goto out; 402 + } 403 + /* 404 + * Compare against SCSI READ payload against verify payload 405 + */ 406 + for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { 407 + addr = (unsigned char *)kmap_atomic(sg_page(sg)); 408 + if (!addr) { 409 + ret = TCM_OUT_OF_RESOURCES; 410 + goto out; 411 + } 412 + 413 + len = min(sg->length, compare_len); 414 + 415 + if (memcmp(addr, buf + offset, len)) { 416 + pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n", 417 + addr, buf + offset); 418 + kunmap_atomic(addr); 419 + goto miscompare; 420 + } 421 + kunmap_atomic(addr); 422 + 423 + offset += len; 424 + compare_len -= len; 425 + if (!compare_len) 426 + break; 427 + } 428 + 429 + i = 0; 430 + len = cmd->t_task_nolb * block_size; 431 + sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); 432 + /* 433 + * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. 434 + */ 435 + while (len) { 436 + sg_miter_next(&m); 437 + 438 + if (block_size < PAGE_SIZE) { 439 + sg_set_page(&write_sg[i], m.page, block_size, 440 + block_size); 441 + } else { 442 + sg_miter_next(&m); 443 + sg_set_page(&write_sg[i], m.page, block_size, 444 + 0); 445 + } 446 + len -= block_size; 447 + i++; 448 + } 449 + sg_miter_stop(&m); 450 + /* 451 + * Save the original SGL + nents values before updating to new 452 + * assignments, to be released in transport_free_pages() -> 453 + * transport_reset_sgl_orig() 454 + */ 455 + cmd->t_data_sg_orig = cmd->t_data_sg; 456 + cmd->t_data_sg = write_sg; 457 + cmd->t_data_nents_orig = cmd->t_data_nents; 458 + cmd->t_data_nents = 1; 459 + 460 + cmd->sam_task_attr = MSG_HEAD_TAG; 461 + cmd->transport_complete_callback = compare_and_write_post; 462 + /* 463 + * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 464 + * for submitting the adjusted SGL to write instance user-data. 465 + */ 466 + cmd->execute_cmd = sbc_execute_rw; 467 + 468 + spin_lock_irq(&cmd->t_state_lock); 469 + cmd->t_state = TRANSPORT_PROCESSING; 470 + cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 471 + spin_unlock_irq(&cmd->t_state_lock); 472 + 473 + __target_execute_cmd(cmd); 474 + 475 + kfree(buf); 476 + return ret; 477 + 478 + miscompare: 479 + pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n", 480 + dev->transport->name); 481 + ret = TCM_MISCOMPARE_VERIFY; 482 + out: 483 + /* 484 + * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in 485 + * sbc_compare_and_write() before the original READ I/O submission. 486 + */ 487 + up(&dev->caw_sem); 488 + kfree(write_sg); 489 + kfree(buf); 490 + return ret; 491 + } 492 + 493 + static sense_reason_t 494 + sbc_compare_and_write(struct se_cmd *cmd) 495 + { 496 + struct se_device *dev = cmd->se_dev; 497 + sense_reason_t ret; 498 + int rc; 499 + /* 500 + * Submit the READ first for COMPARE_AND_WRITE to perform the 501 + * comparision using SGLs at cmd->t_bidi_data_sg.. 502 + */ 503 + rc = down_interruptible(&dev->caw_sem); 504 + if ((rc != 0) || signal_pending(current)) { 505 + cmd->transport_complete_callback = NULL; 506 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 507 + } 508 + 509 + ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 510 + DMA_FROM_DEVICE); 511 + if (ret) { 512 + cmd->transport_complete_callback = NULL; 513 + up(&dev->caw_sem); 514 + return ret; 515 + } 516 + /* 517 + * Unlock of dev->caw_sem to occur in compare_and_write_callback() 518 + * upon MISCOMPARE, or in compare_and_write_done() upon completion 519 + * of WRITE instance user-data. 520 + */ 521 + return TCM_NO_SENSE; 338 522 } 339 523 340 524 sense_reason_t ··· 538 348 sectors = transport_get_sectors_6(cdb); 539 349 cmd->t_task_lba = transport_lba_21(cdb); 540 350 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 541 - cmd->execute_cmd = ops->execute_rw; 351 + cmd->execute_rw = ops->execute_rw; 352 + cmd->execute_cmd = sbc_execute_rw; 542 353 break; 543 354 case READ_10: 544 355 sectors = transport_get_sectors_10(cdb); 545 356 cmd->t_task_lba = transport_lba_32(cdb); 546 357 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 547 - cmd->execute_cmd = ops->execute_rw; 358 + cmd->execute_rw = ops->execute_rw; 359 + cmd->execute_cmd = sbc_execute_rw; 548 360 break; 549 361 case READ_12: 550 362 sectors = transport_get_sectors_12(cdb); 551 363 cmd->t_task_lba = transport_lba_32(cdb); 552 364 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 553 - cmd->execute_cmd = ops->execute_rw; 365 + cmd->execute_rw = ops->execute_rw; 366 + cmd->execute_cmd = sbc_execute_rw; 554 367 break; 555 368 case READ_16: 556 369 sectors = transport_get_sectors_16(cdb); 557 370 cmd->t_task_lba = transport_lba_64(cdb); 558 371 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 559 - cmd->execute_cmd = ops->execute_rw; 372 + cmd->execute_rw = ops->execute_rw; 373 + cmd->execute_cmd = sbc_execute_rw; 560 374 break; 561 375 case WRITE_6: 562 376 sectors = transport_get_sectors_6(cdb); 563 377 cmd->t_task_lba = transport_lba_21(cdb); 564 378 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 565 - cmd->execute_cmd = ops->execute_rw; 379 + cmd->execute_rw = ops->execute_rw; 380 + cmd->execute_cmd = sbc_execute_rw; 566 381 break; 567 382 case WRITE_10: 568 383 case WRITE_VERIFY: ··· 576 381 if (cdb[1] & 0x8) 577 382 cmd->se_cmd_flags |= SCF_FUA; 578 383 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 579 - cmd->execute_cmd = ops->execute_rw; 384 + cmd->execute_rw = ops->execute_rw; 385 + cmd->execute_cmd = sbc_execute_rw; 580 386 break; 581 387 case WRITE_12: 582 388 sectors = transport_get_sectors_12(cdb); ··· 585 389 if (cdb[1] & 0x8) 586 390 cmd->se_cmd_flags |= SCF_FUA; 587 391 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 588 - cmd->execute_cmd = ops->execute_rw; 392 + cmd->execute_rw = ops->execute_rw; 393 + cmd->execute_cmd = sbc_execute_rw; 589 394 break; 590 395 case WRITE_16: 591 396 sectors = transport_get_sectors_16(cdb); ··· 594 397 if (cdb[1] & 0x8) 595 398 cmd->se_cmd_flags |= SCF_FUA; 596 399 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 597 - cmd->execute_cmd = ops->execute_rw; 400 + cmd->execute_rw = ops->execute_rw; 401 + cmd->execute_cmd = sbc_execute_rw; 598 402 break; 599 403 case XDWRITEREAD_10: 600 404 if (cmd->data_direction != DMA_TO_DEVICE || ··· 609 411 /* 610 412 * Setup BIDI XOR callback to be run after I/O completion. 611 413 */ 612 - cmd->execute_cmd = ops->execute_rw; 414 + cmd->execute_rw = ops->execute_rw; 415 + cmd->execute_cmd = sbc_execute_rw; 613 416 cmd->transport_complete_callback = &xdreadwrite_callback; 614 417 if (cdb[1] & 0x8) 615 418 cmd->se_cmd_flags |= SCF_FUA; ··· 633 434 * Setup BIDI XOR callback to be run during after I/O 634 435 * completion. 635 436 */ 636 - cmd->execute_cmd = ops->execute_rw; 437 + cmd->execute_rw = ops->execute_rw; 438 + cmd->execute_cmd = sbc_execute_rw; 637 439 cmd->transport_complete_callback = &xdreadwrite_callback; 638 440 if (cdb[1] & 0x8) 639 441 cmd->se_cmd_flags |= SCF_FUA; ··· 661 461 } 662 462 break; 663 463 } 464 + case COMPARE_AND_WRITE: 465 + sectors = cdb[13]; 466 + /* 467 + * Currently enforce COMPARE_AND_WRITE for a single sector 468 + */ 469 + if (sectors > 1) { 470 + pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" 471 + " than 1\n", sectors); 472 + return TCM_INVALID_CDB_FIELD; 473 + } 474 + /* 475 + * Double size because we have two buffers, note that 476 + * zero is not an error.. 477 + */ 478 + size = 2 * sbc_get_size(cmd, sectors); 479 + cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 480 + cmd->t_task_nolb = sectors; 481 + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; 482 + cmd->execute_rw = ops->execute_rw; 483 + cmd->execute_cmd = sbc_compare_and_write; 484 + cmd->transport_complete_callback = compare_and_write_callback; 485 + break; 664 486 case READ_CAPACITY: 665 487 size = READ_CAP_LEN; 666 488 cmd->execute_cmd = sbc_emulate_readcapacity; ··· 822 600 return TCM_ADDRESS_OUT_OF_RANGE; 823 601 } 824 602 825 - size = sbc_get_size(cmd, sectors); 603 + if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) 604 + size = sbc_get_size(cmd, sectors); 826 605 } 827 606 828 607 return target_cmd_size_check(cmd, size);
+22 -5
drivers/target/target_core_spc.c
··· 1 1 /* 2 2 * SCSI Primary Commands (SPC) parsing and emulation. 3 3 * 4 - * (c) Copyright 2002-2012 RisingTide Systems LLC. 4 + * (c) Copyright 2002-2013 Datera, Inc. 5 5 * 6 6 * Nicholas A. Bellinger <nab@kernel.org> 7 7 * ··· 35 35 #include "target_core_alua.h" 36 36 #include "target_core_pr.h" 37 37 #include "target_core_ua.h" 38 - 38 + #include "target_core_xcopy.h" 39 39 40 40 static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) 41 41 { ··· 95 95 */ 96 96 spc_fill_alua_data(lun->lun_sep, buf); 97 97 98 + /* 99 + * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY 100 + */ 101 + if (dev->dev_attrib.emulate_3pc) 102 + buf[5] |= 0x8; 103 + 98 104 buf[7] = 0x2; /* CmdQue=1 */ 99 105 100 106 memcpy(&buf[8], "LIO-ORG ", 8); ··· 135 129 return 0; 136 130 } 137 131 138 - static void spc_parse_naa_6h_vendor_specific(struct se_device *dev, 139 - unsigned char *buf) 132 + void spc_parse_naa_6h_vendor_specific(struct se_device *dev, 133 + unsigned char *buf) 140 134 { 141 135 unsigned char *p = &dev->t10_wwn.unit_serial[0]; 142 136 int cnt; ··· 466 460 467 461 /* Set WSNZ to 1 */ 468 462 buf[4] = 0x01; 463 + /* 464 + * Set MAXIMUM COMPARE AND WRITE LENGTH 465 + */ 466 + if (dev->dev_attrib.emulate_caw) 467 + buf[5] = 0x01; 469 468 470 469 /* 471 470 * Set OPTIMAL TRANSFER LENGTH GRANULARITY ··· 1261 1250 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1262 1251 break; 1263 1252 case EXTENDED_COPY: 1264 - case READ_ATTRIBUTE: 1253 + *size = get_unaligned_be32(&cdb[10]); 1254 + cmd->execute_cmd = target_do_xcopy; 1255 + break; 1265 1256 case RECEIVE_COPY_RESULTS: 1257 + *size = get_unaligned_be32(&cdb[10]); 1258 + cmd->execute_cmd = target_do_receive_copy_results; 1259 + break; 1260 + case READ_ATTRIBUTE: 1266 1261 case WRITE_ATTRIBUTE: 1267 1262 *size = (cdb[10] << 24) | (cdb[11] << 16) | 1268 1263 (cdb[12] << 8) | cdb[13];
+1 -1
drivers/target/target_core_stat.c
··· 4 4 * Modern ConfigFS group context specific statistics based on original 5 5 * target_core_mib.c code 6 6 * 7 - * (c) Copyright 2006-2012 RisingTide Systems LLC. 7 + * (c) Copyright 2006-2013 Datera, Inc. 8 8 * 9 9 * Nicholas A. Bellinger <nab@linux-iscsi.org> 10 10 *
+1 -1
drivers/target/target_core_tmr.c
··· 3 3 * 4 4 * This file contains SPC-3 task management infrastructure 5 5 * 6 - * (c) Copyright 2009-2012 RisingTide Systems LLC. 6 + * (c) Copyright 2009-2013 Datera, Inc. 7 7 * 8 8 * Nicholas A. Bellinger <nab@kernel.org> 9 9 *
+1 -1
drivers/target/target_core_tpg.c
··· 3 3 * 4 4 * This file contains generic Target Portal Group related functions. 5 5 * 6 - * (c) Copyright 2002-2012 RisingTide Systems LLC. 6 + * (c) Copyright 2002-2013 Datera, Inc. 7 7 * 8 8 * Nicholas A. Bellinger <nab@kernel.org> 9 9 *
+143 -27
drivers/target/target_core_transport.c
··· 3 3 * 4 4 * This file contains the Generic Target Engine Core. 5 5 * 6 - * (c) Copyright 2002-2012 RisingTide Systems LLC. 6 + * (c) Copyright 2002-2013 Datera, Inc. 7 7 * 8 8 * Nicholas A. Bellinger <nab@kernel.org> 9 9 * ··· 67 67 static void transport_complete_task_attr(struct se_cmd *cmd); 68 68 static void transport_handle_queue_full(struct se_cmd *cmd, 69 69 struct se_device *dev); 70 - static int transport_generic_get_mem(struct se_cmd *cmd); 71 70 static int transport_put_cmd(struct se_cmd *cmd); 72 71 static void target_complete_ok_work(struct work_struct *work); 73 72 ··· 231 232 } 232 233 EXPORT_SYMBOL(transport_init_session); 233 234 235 + int transport_alloc_session_tags(struct se_session *se_sess, 236 + unsigned int tag_num, unsigned int tag_size) 237 + { 238 + int rc; 239 + 240 + se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL); 241 + if (!se_sess->sess_cmd_map) { 242 + pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 243 + return -ENOMEM; 244 + } 245 + 246 + rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 247 + if (rc < 0) { 248 + pr_err("Unable to init se_sess->sess_tag_pool," 249 + " tag_num: %u\n", tag_num); 250 + kfree(se_sess->sess_cmd_map); 251 + se_sess->sess_cmd_map = NULL; 252 + return -ENOMEM; 253 + } 254 + 255 + return 0; 256 + } 257 + EXPORT_SYMBOL(transport_alloc_session_tags); 258 + 259 + struct se_session *transport_init_session_tags(unsigned int tag_num, 260 + unsigned int tag_size) 261 + { 262 + struct se_session *se_sess; 263 + int rc; 264 + 265 + se_sess = transport_init_session(); 266 + if (IS_ERR(se_sess)) 267 + return se_sess; 268 + 269 + rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 270 + if (rc < 0) { 271 + transport_free_session(se_sess); 272 + return ERR_PTR(-ENOMEM); 273 + } 274 + 275 + return se_sess; 276 + } 277 + EXPORT_SYMBOL(transport_init_session_tags); 278 + 234 279 /* 235 280 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 236 281 */ ··· 410 367 411 368 void transport_free_session(struct se_session *se_sess) 412 369 { 370 + if (se_sess->sess_cmd_map) { 371 + percpu_ida_destroy(&se_sess->sess_tag_pool); 372 + kfree(se_sess->sess_cmd_map); 373 + } 413 374 kmem_cache_free(se_sess_cache, se_sess); 414 375 } 415 376 EXPORT_SYMBOL(transport_free_session); ··· 1253 1206 } 1254 1207 EXPORT_SYMBOL(transport_handle_cdb_direct); 1255 1208 1256 - static sense_reason_t 1209 + sense_reason_t 1257 1210 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1258 1211 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1259 1212 { ··· 1559 1512 * For SAM Task Attribute emulation for failed struct se_cmd 1560 1513 */ 1561 1514 transport_complete_task_attr(cmd); 1515 + /* 1516 + * Handle special case for COMPARE_AND_WRITE failure, where the 1517 + * callback is expected to drop the per device ->caw_mutex. 1518 + */ 1519 + if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1520 + cmd->transport_complete_callback) 1521 + cmd->transport_complete_callback(cmd); 1562 1522 1563 1523 switch (sense_reason) { 1564 1524 case TCM_NON_EXISTENT_LUN: ··· 1633 1579 } 1634 1580 EXPORT_SYMBOL(transport_generic_request_failure); 1635 1581 1636 - static void __target_execute_cmd(struct se_cmd *cmd) 1582 + void __target_execute_cmd(struct se_cmd *cmd) 1637 1583 { 1638 1584 sense_reason_t ret; 1639 1585 ··· 1838 1784 ret = cmd->se_tfo->queue_data_in(cmd); 1839 1785 break; 1840 1786 case DMA_TO_DEVICE: 1841 - if (cmd->t_bidi_data_sg) { 1787 + if (cmd->se_cmd_flags & SCF_BIDI) { 1842 1788 ret = cmd->se_tfo->queue_data_in(cmd); 1843 1789 if (ret < 0) 1844 1790 break; ··· 1910 1856 } 1911 1857 /* 1912 1858 * Check for a callback, used by amongst other things 1913 - * XDWRITE_READ_10 emulation. 1859 + * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 1914 1860 */ 1915 - if (cmd->transport_complete_callback) 1916 - cmd->transport_complete_callback(cmd); 1861 + if (cmd->transport_complete_callback) { 1862 + sense_reason_t rc; 1863 + 1864 + rc = cmd->transport_complete_callback(cmd); 1865 + if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { 1866 + return; 1867 + } else if (rc) { 1868 + ret = transport_send_check_condition_and_sense(cmd, 1869 + rc, 0); 1870 + if (ret == -EAGAIN || ret == -ENOMEM) 1871 + goto queue_full; 1872 + 1873 + transport_lun_remove_cmd(cmd); 1874 + transport_cmd_check_stop_to_fabric(cmd); 1875 + return; 1876 + } 1877 + } 1917 1878 1918 1879 switch (cmd->data_direction) { 1919 1880 case DMA_FROM_DEVICE: ··· 1954 1885 /* 1955 1886 * Check if we need to send READ payload for BIDI-COMMAND 1956 1887 */ 1957 - if (cmd->t_bidi_data_sg) { 1888 + if (cmd->se_cmd_flags & SCF_BIDI) { 1958 1889 spin_lock(&cmd->se_lun->lun_sep_lock); 1959 1890 if (cmd->se_lun->lun_sep) { 1960 1891 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += ··· 1999 1930 kfree(sgl); 2000 1931 } 2001 1932 1933 + static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 1934 + { 1935 + /* 1936 + * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 1937 + * emulation, and free + reset pointers if necessary.. 1938 + */ 1939 + if (!cmd->t_data_sg_orig) 1940 + return; 1941 + 1942 + kfree(cmd->t_data_sg); 1943 + cmd->t_data_sg = cmd->t_data_sg_orig; 1944 + cmd->t_data_sg_orig = NULL; 1945 + cmd->t_data_nents = cmd->t_data_nents_orig; 1946 + cmd->t_data_nents_orig = 0; 1947 + } 1948 + 2002 1949 static inline void transport_free_pages(struct se_cmd *cmd) 2003 1950 { 2004 - if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) 1951 + if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 1952 + transport_reset_sgl_orig(cmd); 2005 1953 return; 1954 + } 1955 + transport_reset_sgl_orig(cmd); 2006 1956 2007 1957 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2008 1958 cmd->t_data_sg = NULL; ··· 2117 2029 } 2118 2030 EXPORT_SYMBOL(transport_kunmap_data_sg); 2119 2031 2120 - static int 2121 - transport_generic_get_mem(struct se_cmd *cmd) 2032 + int 2033 + target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2034 + bool zero_page) 2122 2035 { 2123 - u32 length = cmd->data_length; 2124 - unsigned int nents; 2036 + struct scatterlist *sg; 2125 2037 struct page *page; 2126 - gfp_t zero_flag; 2038 + gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; 2039 + unsigned int nent; 2127 2040 int i = 0; 2128 2041 2129 - nents = DIV_ROUND_UP(length, PAGE_SIZE); 2130 - cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); 2131 - if (!cmd->t_data_sg) 2042 + nent = DIV_ROUND_UP(length, PAGE_SIZE); 2043 + sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); 2044 + if (!sg) 2132 2045 return -ENOMEM; 2133 2046 2134 - cmd->t_data_nents = nents; 2135 - sg_init_table(cmd->t_data_sg, nents); 2136 - 2137 - zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO; 2047 + sg_init_table(sg, nent); 2138 2048 2139 2049 while (length) { 2140 2050 u32 page_len = min_t(u32, length, PAGE_SIZE); ··· 2140 2054 if (!page) 2141 2055 goto out; 2142 2056 2143 - sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); 2057 + sg_set_page(&sg[i], page, page_len, 0); 2144 2058 length -= page_len; 2145 2059 i++; 2146 2060 } 2061 + *sgl = sg; 2062 + *nents = nent; 2147 2063 return 0; 2148 2064 2149 2065 out: 2150 2066 while (i > 0) { 2151 2067 i--; 2152 - __free_page(sg_page(&cmd->t_data_sg[i])); 2068 + __free_page(sg_page(&sg[i])); 2153 2069 } 2154 - kfree(cmd->t_data_sg); 2155 - cmd->t_data_sg = NULL; 2070 + kfree(sg); 2156 2071 return -ENOMEM; 2157 2072 } 2158 2073 ··· 2174 2087 */ 2175 2088 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2176 2089 cmd->data_length) { 2177 - ret = transport_generic_get_mem(cmd); 2090 + bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2091 + 2092 + if ((cmd->se_cmd_flags & SCF_BIDI) || 2093 + (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2094 + u32 bidi_length; 2095 + 2096 + if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2097 + bidi_length = cmd->t_task_nolb * 2098 + cmd->se_dev->dev_attrib.block_size; 2099 + else 2100 + bidi_length = cmd->data_length; 2101 + 2102 + ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2103 + &cmd->t_bidi_data_nents, 2104 + bidi_length, zero_flag); 2105 + if (ret < 0) 2106 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2107 + } 2108 + 2109 + ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2110 + cmd->data_length, zero_flag); 2178 2111 if (ret < 0) 2179 2112 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2180 2113 } ··· 2846 2739 transport_get_sense_codes(cmd, &asc, &ascq); 2847 2740 buffer[SPC_ASC_KEY_OFFSET] = asc; 2848 2741 buffer[SPC_ASCQ_KEY_OFFSET] = ascq; 2742 + break; 2743 + case TCM_MISCOMPARE_VERIFY: 2744 + /* CURRENT ERROR */ 2745 + buffer[0] = 0x70; 2746 + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2747 + buffer[SPC_SENSE_KEY_OFFSET] = MISCOMPARE; 2748 + /* MISCOMPARE DURING VERIFY OPERATION */ 2749 + buffer[SPC_ASC_KEY_OFFSET] = 0x1d; 2750 + buffer[SPC_ASCQ_KEY_OFFSET] = 0x00; 2849 2751 break; 2850 2752 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 2851 2753 default:
+1 -1
drivers/target/target_core_ua.c
··· 3 3 * 4 4 * This file contains logic for SPC-3 Unit Attention emulation 5 5 * 6 - * (c) Copyright 2009-2012 RisingTide Systems LLC. 6 + * (c) Copyright 2009-2013 Datera, Inc. 7 7 * 8 8 * Nicholas A. Bellinger <nab@kernel.org> 9 9 *
+1081
drivers/target/target_core_xcopy.c
··· 1 + /******************************************************************************* 2 + * Filename: target_core_xcopy.c 3 + * 4 + * This file contains support for SPC-4 Extended-Copy offload with generic 5 + * TCM backends. 6 + * 7 + * Copyright (c) 2011-2013 Datera, Inc. All rights reserved. 8 + * 9 + * Author: 10 + * Nicholas A. Bellinger <nab@daterainc.com> 11 + * 12 + * This program is free software; you can redistribute it and/or modify 13 + * it under the terms of the GNU General Public License as published by 14 + * the Free Software Foundation; either version 2 of the License, or 15 + * (at your option) any later version. 16 + * 17 + * This program is distributed in the hope that it will be useful, 18 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 + * GNU General Public License for more details. 21 + * 22 + ******************************************************************************/ 23 + 24 + #include <linux/slab.h> 25 + #include <linux/spinlock.h> 26 + #include <linux/list.h> 27 + #include <linux/configfs.h> 28 + #include <scsi/scsi.h> 29 + #include <scsi/scsi_cmnd.h> 30 + #include <asm/unaligned.h> 31 + 32 + #include <target/target_core_base.h> 33 + #include <target/target_core_backend.h> 34 + #include <target/target_core_fabric.h> 35 + #include <target/target_core_configfs.h> 36 + 37 + #include "target_core_pr.h" 38 + #include "target_core_ua.h" 39 + #include "target_core_xcopy.h" 40 + 41 + static struct workqueue_struct *xcopy_wq = NULL; 42 + /* 43 + * From target_core_spc.c 44 + */ 45 + extern void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *); 46 + /* 47 + * From target_core_device.c 48 + */ 49 + extern struct mutex g_device_mutex; 50 + extern struct list_head g_device_list; 51 + /* 52 + * From target_core_configfs.c 53 + */ 54 + extern struct configfs_subsystem *target_core_subsystem[]; 55 + 56 + static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) 57 + { 58 + int off = 0; 59 + 60 + buf[off++] = (0x6 << 4); 61 + buf[off++] = 0x01; 62 + buf[off++] = 0x40; 63 + buf[off] = (0x5 << 4); 64 + 65 + spc_parse_naa_6h_vendor_specific(dev, &buf[off]); 66 + return 0; 67 + } 68 + 69 + static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, 70 + bool src) 71 + { 72 + struct se_device *se_dev; 73 + struct configfs_subsystem *subsys = target_core_subsystem[0]; 74 + unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; 75 + int rc; 76 + 77 + if (src == true) 78 + dev_wwn = &xop->dst_tid_wwn[0]; 79 + else 80 + dev_wwn = &xop->src_tid_wwn[0]; 81 + 82 + mutex_lock(&g_device_mutex); 83 + list_for_each_entry(se_dev, &g_device_list, g_dev_node) { 84 + 85 + memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 86 + target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); 87 + 88 + rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); 89 + if (rc != 0) 90 + continue; 91 + 92 + if (src == true) { 93 + xop->dst_dev = se_dev; 94 + pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located" 95 + " se_dev\n", xop->dst_dev); 96 + } else { 97 + xop->src_dev = se_dev; 98 + pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located" 99 + " se_dev\n", xop->src_dev); 100 + } 101 + 102 + rc = configfs_depend_item(subsys, 103 + &se_dev->dev_group.cg_item); 104 + if (rc != 0) { 105 + pr_err("configfs_depend_item attempt failed:" 106 + " %d for se_dev: %p\n", rc, se_dev); 107 + mutex_unlock(&g_device_mutex); 108 + return rc; 109 + } 110 + 111 + pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p" 112 + " se_dev->se_dev_group: %p\n", subsys, se_dev, 113 + &se_dev->dev_group); 114 + 115 + mutex_unlock(&g_device_mutex); 116 + return 0; 117 + } 118 + mutex_unlock(&g_device_mutex); 119 + 120 + pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); 121 + return -EINVAL; 122 + } 123 + 124 + static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, 125 + unsigned char *p, bool src) 126 + { 127 + unsigned char *desc = p; 128 + unsigned short ript; 129 + u8 desig_len; 130 + /* 131 + * Extract RELATIVE INITIATOR PORT IDENTIFIER 132 + */ 133 + ript = get_unaligned_be16(&desc[2]); 134 + pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript); 135 + /* 136 + * Check for supported code set, association, and designator type 137 + */ 138 + if ((desc[4] & 0x0f) != 0x1) { 139 + pr_err("XCOPY 0xe4: code set of non binary type not supported\n"); 140 + return -EINVAL; 141 + } 142 + if ((desc[5] & 0x30) != 0x00) { 143 + pr_err("XCOPY 0xe4: association other than LUN not supported\n"); 144 + return -EINVAL; 145 + } 146 + if ((desc[5] & 0x0f) != 0x3) { 147 + pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n", 148 + (desc[5] & 0x0f)); 149 + return -EINVAL; 150 + } 151 + /* 152 + * Check for matching 16 byte length for NAA IEEE Registered Extended 153 + * Assigned designator 154 + */ 155 + desig_len = desc[7]; 156 + if (desig_len != 16) { 157 + pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len); 158 + return -EINVAL; 159 + } 160 + pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len); 161 + /* 162 + * Check for NAA IEEE Registered Extended Assigned header.. 163 + */ 164 + if ((desc[8] & 0xf0) != 0x60) { 165 + pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n", 166 + (desc[8] & 0xf0)); 167 + return -EINVAL; 168 + } 169 + 170 + if (src == true) { 171 + memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); 172 + /* 173 + * Determine if the source designator matches the local device 174 + */ 175 + if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0], 176 + XCOPY_NAA_IEEE_REGEX_LEN)) { 177 + xop->op_origin = XCOL_SOURCE_RECV_OP; 178 + xop->src_dev = se_cmd->se_dev; 179 + pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source" 180 + " received xop\n", xop->src_dev); 181 + } 182 + } else { 183 + memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); 184 + /* 185 + * Determine if the destination designator matches the local device 186 + */ 187 + if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0], 188 + XCOPY_NAA_IEEE_REGEX_LEN)) { 189 + xop->op_origin = XCOL_DEST_RECV_OP; 190 + xop->dst_dev = se_cmd->se_dev; 191 + pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination" 192 + " received xop\n", xop->dst_dev); 193 + } 194 + } 195 + 196 + return 0; 197 + } 198 + 199 + static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, 200 + struct xcopy_op *xop, unsigned char *p, 201 + unsigned short tdll) 202 + { 203 + struct se_device *local_dev = se_cmd->se_dev; 204 + unsigned char *desc = p; 205 + int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0; 206 + unsigned short start = 0; 207 + bool src = true; 208 + 209 + if (offset != 0) { 210 + pr_err("XCOPY target descriptor list length is not" 211 + " multiple of %d\n", XCOPY_TARGET_DESC_LEN); 212 + return -EINVAL; 213 + } 214 + if (tdll > 64) { 215 + pr_err("XCOPY target descriptor supports a maximum" 216 + " two src/dest descriptors, tdll: %hu too large..\n", tdll); 217 + return -EINVAL; 218 + } 219 + /* 220 + * Generate an IEEE Registered Extended designator based upon the 221 + * se_device the XCOPY was received upon.. 222 + */ 223 + memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 224 + target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]); 225 + 226 + while (start < tdll) { 227 + /* 228 + * Check target descriptor identification with 0xE4 type with 229 + * use VPD 0x83 WWPN matching .. 230 + */ 231 + switch (desc[0]) { 232 + case 0xe4: 233 + rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop, 234 + &desc[0], src); 235 + if (rc != 0) 236 + goto out; 237 + /* 238 + * Assume target descriptors are in source -> destination order.. 239 + */ 240 + if (src == true) 241 + src = false; 242 + else 243 + src = true; 244 + start += XCOPY_TARGET_DESC_LEN; 245 + desc += XCOPY_TARGET_DESC_LEN; 246 + ret++; 247 + break; 248 + default: 249 + pr_err("XCOPY unsupported descriptor type code:" 250 + " 0x%02x\n", desc[0]); 251 + goto out; 252 + } 253 + } 254 + 255 + if (xop->op_origin == XCOL_SOURCE_RECV_OP) 256 + rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); 257 + else 258 + rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); 259 + 260 + if (rc < 0) 261 + goto out; 262 + 263 + pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n", 264 + xop->src_dev, &xop->src_tid_wwn[0]); 265 + pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n", 266 + xop->dst_dev, &xop->dst_tid_wwn[0]); 267 + 268 + return ret; 269 + 270 + out: 271 + return -EINVAL; 272 + } 273 + 274 + static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop, 275 + unsigned char *p) 276 + { 277 + unsigned char *desc = p; 278 + int dc = (desc[1] & 0x02); 279 + unsigned short desc_len; 280 + 281 + desc_len = get_unaligned_be16(&desc[2]); 282 + if (desc_len != 0x18) { 283 + pr_err("XCOPY segment desc 0x02: Illegal desc_len:" 284 + " %hu\n", desc_len); 285 + return -EINVAL; 286 + } 287 + 288 + xop->stdi = get_unaligned_be16(&desc[4]); 289 + xop->dtdi = get_unaligned_be16(&desc[6]); 290 + pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n", 291 + desc_len, xop->stdi, xop->dtdi, dc); 292 + 293 + xop->nolb = get_unaligned_be16(&desc[10]); 294 + xop->src_lba = get_unaligned_be64(&desc[12]); 295 + xop->dst_lba = get_unaligned_be64(&desc[20]); 296 + pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n", 297 + xop->nolb, (unsigned long long)xop->src_lba, 298 + (unsigned long long)xop->dst_lba); 299 + 300 + if (dc != 0) { 301 + xop->dbl = (desc[29] << 16) & 0xff; 302 + xop->dbl |= (desc[30] << 8) & 0xff; 303 + xop->dbl |= desc[31] & 0xff; 304 + 305 + pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); 306 + } 307 + return 0; 308 + } 309 + 310 + static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, 311 + struct xcopy_op *xop, unsigned char *p, 312 + unsigned int sdll) 313 + { 314 + unsigned char *desc = p; 315 + unsigned int start = 0; 316 + int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0; 317 + 318 + if (offset != 0) { 319 + pr_err("XCOPY segment descriptor list length is not" 320 + " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN); 321 + return -EINVAL; 322 + } 323 + 324 + while (start < sdll) { 325 + /* 326 + * Check segment descriptor type code for block -> block 327 + */ 328 + switch (desc[0]) { 329 + case 0x02: 330 + rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc); 331 + if (rc < 0) 332 + goto out; 333 + 334 + ret++; 335 + start += XCOPY_SEGMENT_DESC_LEN; 336 + desc += XCOPY_SEGMENT_DESC_LEN; 337 + break; 338 + default: 339 + pr_err("XCOPY unspported segment descriptor" 340 + "type: 0x%02x\n", desc[0]); 341 + goto out; 342 + } 343 + } 344 + 345 + return ret; 346 + 347 + out: 348 + return -EINVAL; 349 + } 350 + 351 + /* 352 + * Start xcopy_pt ops 353 + */ 354 + 355 + struct xcopy_pt_cmd { 356 + bool remote_port; 357 + struct se_cmd se_cmd; 358 + struct xcopy_op *xcopy_op; 359 + struct completion xpt_passthrough_sem; 360 + }; 361 + 362 + static struct se_port xcopy_pt_port; 363 + static struct se_portal_group xcopy_pt_tpg; 364 + static struct se_session xcopy_pt_sess; 365 + static struct se_node_acl xcopy_pt_nacl; 366 + 367 + static char *xcopy_pt_get_fabric_name(void) 368 + { 369 + return "xcopy-pt"; 370 + } 371 + 372 + static u32 xcopy_pt_get_tag(struct se_cmd *se_cmd) 373 + { 374 + return 0; 375 + } 376 + 377 + static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) 378 + { 379 + return 0; 380 + } 381 + 382 + static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) 383 + { 384 + struct configfs_subsystem *subsys = target_core_subsystem[0]; 385 + struct se_device *remote_dev; 386 + 387 + if (xop->op_origin == XCOL_SOURCE_RECV_OP) 388 + remote_dev = xop->dst_dev; 389 + else 390 + remote_dev = xop->src_dev; 391 + 392 + pr_debug("Calling configfs_undepend_item for subsys: %p" 393 + " remote_dev: %p remote_dev->dev_group: %p\n", 394 + subsys, remote_dev, &remote_dev->dev_group.cg_item); 395 + 396 + configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item); 397 + } 398 + 399 + static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) 400 + { 401 + struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, 402 + struct xcopy_pt_cmd, se_cmd); 403 + 404 + if (xpt_cmd->remote_port) 405 + kfree(se_cmd->se_lun); 406 + 407 + kfree(xpt_cmd); 408 + } 409 + 410 + static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd) 411 + { 412 + struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, 413 + struct xcopy_pt_cmd, se_cmd); 414 + 415 + complete(&xpt_cmd->xpt_passthrough_sem); 416 + return 0; 417 + } 418 + 419 + static int xcopy_pt_write_pending(struct se_cmd *se_cmd) 420 + { 421 + return 0; 422 + } 423 + 424 + static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd) 425 + { 426 + return 0; 427 + } 428 + 429 + static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd) 430 + { 431 + return 0; 432 + } 433 + 434 + static int xcopy_pt_queue_status(struct se_cmd *se_cmd) 435 + { 436 + return 0; 437 + } 438 + 439 + static struct target_core_fabric_ops xcopy_pt_tfo = { 440 + .get_fabric_name = xcopy_pt_get_fabric_name, 441 + .get_task_tag = xcopy_pt_get_tag, 442 + .get_cmd_state = xcopy_pt_get_cmd_state, 443 + .release_cmd = xcopy_pt_release_cmd, 444 + .check_stop_free = xcopy_pt_check_stop_free, 445 + .write_pending = xcopy_pt_write_pending, 446 + .write_pending_status = xcopy_pt_write_pending_status, 447 + .queue_data_in = xcopy_pt_queue_data_in, 448 + .queue_status = xcopy_pt_queue_status, 449 + }; 450 + 451 + /* 452 + * End xcopy_pt_ops 453 + */ 454 + 455 + int target_xcopy_setup_pt(void) 456 + { 457 + xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0); 458 + if (!xcopy_wq) { 459 + pr_err("Unable to allocate xcopy_wq\n"); 460 + return -ENOMEM; 461 + } 462 + 463 + memset(&xcopy_pt_port, 0, sizeof(struct se_port)); 464 + INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list); 465 + INIT_LIST_HEAD(&xcopy_pt_port.sep_list); 466 + mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex); 467 + 468 + memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group)); 469 + INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node); 470 + INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list); 471 + INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list); 472 + 473 + xcopy_pt_port.sep_tpg = &xcopy_pt_tpg; 474 + xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo; 475 + 476 + memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl)); 477 + INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list); 478 + INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list); 479 + memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); 480 + INIT_LIST_HEAD(&xcopy_pt_sess.sess_list); 481 + INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list); 482 + 483 + xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; 484 + xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; 485 + 486 + xcopy_pt_sess.se_tpg = &xcopy_pt_tpg; 487 + xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl; 488 + 489 + return 0; 490 + } 491 + 492 + void target_xcopy_release_pt(void) 493 + { 494 + if (xcopy_wq) 495 + destroy_workqueue(xcopy_wq); 496 + } 497 + 498 + static void target_xcopy_setup_pt_port( 499 + struct xcopy_pt_cmd *xpt_cmd, 500 + struct xcopy_op *xop, 501 + bool remote_port) 502 + { 503 + struct se_cmd *ec_cmd = xop->xop_se_cmd; 504 + struct se_cmd *pt_cmd = &xpt_cmd->se_cmd; 505 + 506 + if (xop->op_origin == XCOL_SOURCE_RECV_OP) { 507 + /* 508 + * Honor destination port reservations for X-COPY PUSH emulation 509 + * when CDB is received on local source port, and READs blocks to 510 + * WRITE on remote destination port. 511 + */ 512 + if (remote_port) { 513 + xpt_cmd->remote_port = remote_port; 514 + pt_cmd->se_lun->lun_sep = &xcopy_pt_port; 515 + pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to" 516 + " cmd->se_lun->lun_sep for X-COPY data PUSH\n", 517 + pt_cmd->se_lun->lun_sep); 518 + } else { 519 + pt_cmd->se_lun = ec_cmd->se_lun; 520 + pt_cmd->se_dev = ec_cmd->se_dev; 521 + 522 + pr_debug("Honoring local SRC port from ec_cmd->se_dev:" 523 + " %p\n", pt_cmd->se_dev); 524 + pt_cmd->se_lun = ec_cmd->se_lun; 525 + pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n", 526 + pt_cmd->se_lun); 527 + } 528 + } else { 529 + /* 530 + * Honor source port reservation for X-COPY PULL emulation 531 + * when CDB is received on local desintation port, and READs 532 + * blocks from the remote source port to WRITE on local 533 + * destination port. 534 + */ 535 + if (remote_port) { 536 + xpt_cmd->remote_port = remote_port; 537 + pt_cmd->se_lun->lun_sep = &xcopy_pt_port; 538 + pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to" 539 + " cmd->se_lun->lun_sep for X-COPY data PULL\n", 540 + pt_cmd->se_lun->lun_sep); 541 + } else { 542 + pt_cmd->se_lun = ec_cmd->se_lun; 543 + pt_cmd->se_dev = ec_cmd->se_dev; 544 + 545 + pr_debug("Honoring local DST port from ec_cmd->se_dev:" 546 + " %p\n", pt_cmd->se_dev); 547 + pt_cmd->se_lun = ec_cmd->se_lun; 548 + pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n", 549 + pt_cmd->se_lun); 550 + } 551 + } 552 + } 553 + 554 + static int target_xcopy_init_pt_lun( 555 + struct xcopy_pt_cmd *xpt_cmd, 556 + struct xcopy_op *xop, 557 + struct se_device *se_dev, 558 + struct se_cmd *pt_cmd, 559 + bool remote_port) 560 + { 561 + /* 562 + * Don't allocate + init an pt_cmd->se_lun if honoring local port for 563 + * reservations. The pt_cmd->se_lun pointer will be setup from within 564 + * target_xcopy_setup_pt_port() 565 + */ 566 + if (remote_port == false) { 567 + pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; 568 + return 0; 569 + } 570 + 571 + pt_cmd->se_lun = kzalloc(sizeof(struct se_lun), GFP_KERNEL); 572 + if (!pt_cmd->se_lun) { 573 + pr_err("Unable to allocate pt_cmd->se_lun\n"); 574 + return -ENOMEM; 575 + } 576 + init_completion(&pt_cmd->se_lun->lun_shutdown_comp); 577 + INIT_LIST_HEAD(&pt_cmd->se_lun->lun_cmd_list); 578 + INIT_LIST_HEAD(&pt_cmd->se_lun->lun_acl_list); 579 + spin_lock_init(&pt_cmd->se_lun->lun_acl_lock); 580 + spin_lock_init(&pt_cmd->se_lun->lun_cmd_lock); 581 + spin_lock_init(&pt_cmd->se_lun->lun_sep_lock); 582 + 583 + pt_cmd->se_dev = se_dev; 584 + 585 + pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev); 586 + pt_cmd->se_lun->lun_se_dev = se_dev; 587 + pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; 588 + 589 + pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n", 590 + pt_cmd->se_lun->lun_se_dev); 591 + 592 + return 0; 593 + } 594 + 595 + static int target_xcopy_setup_pt_cmd( 596 + struct xcopy_pt_cmd *xpt_cmd, 597 + struct xcopy_op *xop, 598 + struct se_device *se_dev, 599 + unsigned char *cdb, 600 + bool remote_port, 601 + bool alloc_mem) 602 + { 603 + struct se_cmd *cmd = &xpt_cmd->se_cmd; 604 + sense_reason_t sense_rc; 605 + int ret = 0, rc; 606 + /* 607 + * Setup LUN+port to honor reservations based upon xop->op_origin for 608 + * X-COPY PUSH or X-COPY PULL based upon where the CDB was received. 609 + */ 610 + rc = target_xcopy_init_pt_lun(xpt_cmd, xop, se_dev, cmd, remote_port); 611 + if (rc < 0) { 612 + ret = rc; 613 + goto out; 614 + } 615 + xpt_cmd->xcopy_op = xop; 616 + target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port); 617 + 618 + sense_rc = target_setup_cmd_from_cdb(cmd, cdb); 619 + if (sense_rc) { 620 + ret = -EINVAL; 621 + goto out; 622 + } 623 + 624 + if (alloc_mem) { 625 + rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 626 + cmd->data_length, false); 627 + if (rc < 0) { 628 + ret = rc; 629 + goto out; 630 + } 631 + /* 632 + * Set this bit so that transport_free_pages() allows the 633 + * caller to release SGLs + physical memory allocated by 634 + * transport_generic_get_mem().. 635 + */ 636 + cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 637 + } else { 638 + /* 639 + * Here the previously allocated SGLs for the internal READ 640 + * are mapped zero-copy to the internal WRITE. 641 + */ 642 + sense_rc = transport_generic_map_mem_to_cmd(cmd, 643 + xop->xop_data_sg, xop->xop_data_nents, 644 + NULL, 0); 645 + if (sense_rc) { 646 + ret = -EINVAL; 647 + goto out; 648 + } 649 + 650 + pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:" 651 + " %u\n", cmd->t_data_sg, cmd->t_data_nents); 652 + } 653 + 654 + return 0; 655 + 656 + out: 657 + if (remote_port == true) 658 + kfree(cmd->se_lun); 659 + return ret; 660 + } 661 + 662 + static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd) 663 + { 664 + struct se_cmd *se_cmd = &xpt_cmd->se_cmd; 665 + sense_reason_t sense_rc; 666 + 667 + sense_rc = transport_generic_new_cmd(se_cmd); 668 + if (sense_rc) 669 + return -EINVAL; 670 + 671 + if (se_cmd->data_direction == DMA_TO_DEVICE) 672 + target_execute_cmd(se_cmd); 673 + 674 + wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem); 675 + 676 + pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n", 677 + se_cmd->scsi_status); 678 + return 0; 679 + } 680 + 681 + static int target_xcopy_read_source( 682 + struct se_cmd *ec_cmd, 683 + struct xcopy_op *xop, 684 + struct se_device *src_dev, 685 + sector_t src_lba, 686 + u32 src_sectors) 687 + { 688 + struct xcopy_pt_cmd *xpt_cmd; 689 + struct se_cmd *se_cmd; 690 + u32 length = (src_sectors * src_dev->dev_attrib.block_size); 691 + int rc; 692 + unsigned char cdb[16]; 693 + bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP); 694 + 695 + xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL); 696 + if (!xpt_cmd) { 697 + pr_err("Unable to allocate xcopy_pt_cmd\n"); 698 + return -ENOMEM; 699 + } 700 + init_completion(&xpt_cmd->xpt_passthrough_sem); 701 + se_cmd = &xpt_cmd->se_cmd; 702 + 703 + memset(&cdb[0], 0, 16); 704 + cdb[0] = READ_16; 705 + put_unaligned_be64(src_lba, &cdb[2]); 706 + put_unaligned_be32(src_sectors, &cdb[10]); 707 + pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n", 708 + (unsigned long long)src_lba, src_sectors, length); 709 + 710 + transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 711 + DMA_FROM_DEVICE, 0, NULL); 712 + xop->src_pt_cmd = xpt_cmd; 713 + 714 + rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], 715 + remote_port, true); 716 + if (rc < 0) { 717 + transport_generic_free_cmd(se_cmd, 0); 718 + return rc; 719 + } 720 + 721 + xop->xop_data_sg = se_cmd->t_data_sg; 722 + xop->xop_data_nents = se_cmd->t_data_nents; 723 + pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ" 724 + " memory\n", xop->xop_data_sg, xop->xop_data_nents); 725 + 726 + rc = target_xcopy_issue_pt_cmd(xpt_cmd); 727 + if (rc < 0) { 728 + transport_generic_free_cmd(se_cmd, 0); 729 + return rc; 730 + } 731 + /* 732 + * Clear off the allocated t_data_sg, that has been saved for 733 + * zero-copy WRITE submission reuse in struct xcopy_op.. 734 + */ 735 + se_cmd->t_data_sg = NULL; 736 + se_cmd->t_data_nents = 0; 737 + 738 + return 0; 739 + } 740 + 741 + static int target_xcopy_write_destination( 742 + struct se_cmd *ec_cmd, 743 + struct xcopy_op *xop, 744 + struct se_device *dst_dev, 745 + sector_t dst_lba, 746 + u32 dst_sectors) 747 + { 748 + struct xcopy_pt_cmd *xpt_cmd; 749 + struct se_cmd *se_cmd; 750 + u32 length = (dst_sectors * dst_dev->dev_attrib.block_size); 751 + int rc; 752 + unsigned char cdb[16]; 753 + bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP); 754 + 755 + xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL); 756 + if (!xpt_cmd) { 757 + pr_err("Unable to allocate xcopy_pt_cmd\n"); 758 + return -ENOMEM; 759 + } 760 + init_completion(&xpt_cmd->xpt_passthrough_sem); 761 + se_cmd = &xpt_cmd->se_cmd; 762 + 763 + memset(&cdb[0], 0, 16); 764 + cdb[0] = WRITE_16; 765 + put_unaligned_be64(dst_lba, &cdb[2]); 766 + put_unaligned_be32(dst_sectors, &cdb[10]); 767 + pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n", 768 + (unsigned long long)dst_lba, dst_sectors, length); 769 + 770 + transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 771 + DMA_TO_DEVICE, 0, NULL); 772 + xop->dst_pt_cmd = xpt_cmd; 773 + 774 + rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0], 775 + remote_port, false); 776 + if (rc < 0) { 777 + struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd; 778 + /* 779 + * If the failure happened before the t_mem_list hand-off in 780 + * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that 781 + * core releases this memory on error during X-COPY WRITE I/O. 782 + */ 783 + src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 784 + src_cmd->t_data_sg = xop->xop_data_sg; 785 + src_cmd->t_data_nents = xop->xop_data_nents; 786 + 787 + transport_generic_free_cmd(se_cmd, 0); 788 + return rc; 789 + } 790 + 791 + rc = target_xcopy_issue_pt_cmd(xpt_cmd); 792 + if (rc < 0) { 793 + se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 794 + transport_generic_free_cmd(se_cmd, 0); 795 + return rc; 796 + } 797 + 798 + return 0; 799 + } 800 + 801 + static void target_xcopy_do_work(struct work_struct *work) 802 + { 803 + struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work); 804 + struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev; 805 + struct se_cmd *ec_cmd = xop->xop_se_cmd; 806 + sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba; 807 + unsigned int max_sectors; 808 + int rc; 809 + unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0; 810 + 811 + end_lba = src_lba + nolb; 812 + /* 813 + * Break up XCOPY I/O into hw_max_sectors sized I/O based on the 814 + * smallest max_sectors between src_dev + dev_dev, or 815 + */ 816 + max_sectors = min(src_dev->dev_attrib.hw_max_sectors, 817 + dst_dev->dev_attrib.hw_max_sectors); 818 + max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS); 819 + 820 + max_nolb = min_t(u16, max_sectors, ((u16)(~0U))); 821 + 822 + pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n", 823 + nolb, max_nolb, (unsigned long long)end_lba); 824 + pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n", 825 + (unsigned long long)src_lba, (unsigned long long)dst_lba); 826 + 827 + while (src_lba < end_lba) { 828 + cur_nolb = min(nolb, max_nolb); 829 + 830 + pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu," 831 + " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb); 832 + 833 + rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb); 834 + if (rc < 0) 835 + goto out; 836 + 837 + src_lba += cur_nolb; 838 + pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n", 839 + (unsigned long long)src_lba); 840 + 841 + pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu," 842 + " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb); 843 + 844 + rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev, 845 + dst_lba, cur_nolb); 846 + if (rc < 0) { 847 + transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0); 848 + goto out; 849 + } 850 + 851 + dst_lba += cur_nolb; 852 + pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n", 853 + (unsigned long long)dst_lba); 854 + 855 + copied_nolb += cur_nolb; 856 + nolb -= cur_nolb; 857 + 858 + transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0); 859 + xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 860 + 861 + transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0); 862 + } 863 + 864 + xcopy_pt_undepend_remotedev(xop); 865 + kfree(xop); 866 + 867 + pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n", 868 + (unsigned long long)src_lba, (unsigned long long)dst_lba); 869 + pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n", 870 + copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size); 871 + 872 + pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n"); 873 + target_complete_cmd(ec_cmd, SAM_STAT_GOOD); 874 + return; 875 + 876 + out: 877 + xcopy_pt_undepend_remotedev(xop); 878 + kfree(xop); 879 + 880 + pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n"); 881 + ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 882 + target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION); 883 + } 884 + 885 + sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) 886 + { 887 + struct xcopy_op *xop = NULL; 888 + unsigned char *p = NULL, *seg_desc; 889 + unsigned int list_id, list_id_usage, sdll, inline_dl, sa; 890 + int rc; 891 + unsigned short tdll; 892 + 893 + sa = se_cmd->t_task_cdb[1] & 0x1f; 894 + if (sa != 0x00) { 895 + pr_err("EXTENDED_COPY(LID4) not supported\n"); 896 + return TCM_UNSUPPORTED_SCSI_OPCODE; 897 + } 898 + 899 + p = transport_kmap_data_sg(se_cmd); 900 + if (!p) { 901 + pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); 902 + return TCM_OUT_OF_RESOURCES; 903 + } 904 + 905 + list_id = p[0]; 906 + if (list_id != 0x00) { 907 + pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id); 908 + goto out; 909 + } 910 + list_id_usage = (p[1] & 0x18); 911 + /* 912 + * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH 913 + */ 914 + tdll = get_unaligned_be16(&p[2]); 915 + sdll = get_unaligned_be32(&p[8]); 916 + 917 + inline_dl = get_unaligned_be32(&p[12]); 918 + if (inline_dl != 0) { 919 + pr_err("XCOPY with non zero inline data length\n"); 920 + goto out; 921 + } 922 + 923 + xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); 924 + if (!xop) { 925 + pr_err("Unable to allocate xcopy_op\n"); 926 + goto out; 927 + } 928 + xop->xop_se_cmd = se_cmd; 929 + 930 + pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" 931 + " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, 932 + tdll, sdll, inline_dl); 933 + 934 + rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll); 935 + if (rc <= 0) 936 + goto out; 937 + 938 + pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, 939 + rc * XCOPY_TARGET_DESC_LEN); 940 + seg_desc = &p[16]; 941 + seg_desc += (rc * XCOPY_TARGET_DESC_LEN); 942 + 943 + rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll); 944 + if (rc <= 0) { 945 + xcopy_pt_undepend_remotedev(xop); 946 + goto out; 947 + } 948 + transport_kunmap_data_sg(se_cmd); 949 + 950 + pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, 951 + rc * XCOPY_SEGMENT_DESC_LEN); 952 + INIT_WORK(&xop->xop_work, target_xcopy_do_work); 953 + queue_work(xcopy_wq, &xop->xop_work); 954 + return TCM_NO_SENSE; 955 + 956 + out: 957 + if (p) 958 + transport_kunmap_data_sg(se_cmd); 959 + kfree(xop); 960 + return TCM_INVALID_CDB_FIELD; 961 + } 962 + 963 + static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) 964 + { 965 + unsigned char *p; 966 + 967 + p = transport_kmap_data_sg(se_cmd); 968 + if (!p) { 969 + pr_err("transport_kmap_data_sg failed in" 970 + " target_rcr_operating_parameters\n"); 971 + return TCM_OUT_OF_RESOURCES; 972 + } 973 + 974 + if (se_cmd->data_length < 54) { 975 + pr_err("Receive Copy Results Op Parameters length" 976 + " too small: %u\n", se_cmd->data_length); 977 + transport_kunmap_data_sg(se_cmd); 978 + return TCM_INVALID_CDB_FIELD; 979 + } 980 + /* 981 + * Set SNLID=1 (Supports no List ID) 982 + */ 983 + p[4] = 0x1; 984 + /* 985 + * MAXIMUM TARGET DESCRIPTOR COUNT 986 + */ 987 + put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]); 988 + /* 989 + * MAXIMUM SEGMENT DESCRIPTOR COUNT 990 + */ 991 + put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]); 992 + /* 993 + * MAXIMUM DESCRIPTOR LIST LENGTH 994 + */ 995 + put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]); 996 + /* 997 + * MAXIMUM SEGMENT LENGTH 998 + */ 999 + put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]); 1000 + /* 1001 + * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED) 1002 + */ 1003 + put_unaligned_be32(0x0, &p[20]); 1004 + /* 1005 + * HELD DATA LIMIT 1006 + */ 1007 + put_unaligned_be32(0x0, &p[24]); 1008 + /* 1009 + * MAXIMUM STREAM DEVICE TRANSFER SIZE 1010 + */ 1011 + put_unaligned_be32(0x0, &p[28]); 1012 + /* 1013 + * TOTAL CONCURRENT COPIES 1014 + */ 1015 + put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]); 1016 + /* 1017 + * MAXIMUM CONCURRENT COPIES 1018 + */ 1019 + p[36] = RCR_OP_MAX_CONCURR_COPIES; 1020 + /* 1021 + * DATA SEGMENT GRANULARITY (log 2) 1022 + */ 1023 + p[37] = RCR_OP_DATA_SEG_GRAN_LOG2; 1024 + /* 1025 + * INLINE DATA GRANULARITY log 2) 1026 + */ 1027 + p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2; 1028 + /* 1029 + * HELD DATA GRANULARITY 1030 + */ 1031 + p[39] = RCR_OP_HELD_DATA_GRAN_LOG2; 1032 + /* 1033 + * IMPLEMENTED DESCRIPTOR LIST LENGTH 1034 + */ 1035 + p[43] = 0x2; 1036 + /* 1037 + * List of implemented descriptor type codes (ordered) 1038 + */ 1039 + p[44] = 0x02; /* Copy Block to Block device */ 1040 + p[45] = 0xe4; /* Identification descriptor target descriptor */ 1041 + 1042 + /* 1043 + * AVAILABLE DATA (n-3) 1044 + */ 1045 + put_unaligned_be32(42, &p[0]); 1046 + 1047 + transport_kunmap_data_sg(se_cmd); 1048 + target_complete_cmd(se_cmd, GOOD); 1049 + 1050 + return TCM_NO_SENSE; 1051 + } 1052 + 1053 + sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd) 1054 + { 1055 + unsigned char *cdb = &se_cmd->t_task_cdb[0]; 1056 + int sa = (cdb[1] & 0x1f), list_id = cdb[2]; 1057 + sense_reason_t rc = TCM_NO_SENSE; 1058 + 1059 + pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:" 1060 + " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length); 1061 + 1062 + if (list_id != 0) { 1063 + pr_err("Receive Copy Results with non zero list identifier" 1064 + " not supported\n"); 1065 + return TCM_INVALID_CDB_FIELD; 1066 + } 1067 + 1068 + switch (sa) { 1069 + case RCR_SA_OPERATING_PARAMETERS: 1070 + rc = target_rcr_operating_parameters(se_cmd); 1071 + break; 1072 + case RCR_SA_COPY_STATUS: 1073 + case RCR_SA_RECEIVE_DATA: 1074 + case RCR_SA_FAILED_SEGMENT_DETAILS: 1075 + default: 1076 + pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa); 1077 + return TCM_INVALID_CDB_FIELD; 1078 + } 1079 + 1080 + return rc; 1081 + }
+62
drivers/target/target_core_xcopy.h
··· 1 + #define XCOPY_TARGET_DESC_LEN 32 2 + #define XCOPY_SEGMENT_DESC_LEN 28 3 + #define XCOPY_NAA_IEEE_REGEX_LEN 16 4 + #define XCOPY_MAX_SECTORS 1024 5 + 6 + enum xcopy_origin_list { 7 + XCOL_SOURCE_RECV_OP = 0x01, 8 + XCOL_DEST_RECV_OP = 0x02, 9 + }; 10 + 11 + struct xcopy_pt_cmd; 12 + 13 + struct xcopy_op { 14 + int op_origin; 15 + 16 + struct se_cmd *xop_se_cmd; 17 + struct se_device *src_dev; 18 + unsigned char src_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; 19 + struct se_device *dst_dev; 20 + unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; 21 + unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; 22 + 23 + sector_t src_lba; 24 + sector_t dst_lba; 25 + unsigned short stdi; 26 + unsigned short dtdi; 27 + unsigned short nolb; 28 + unsigned int dbl; 29 + 30 + struct xcopy_pt_cmd *src_pt_cmd; 31 + struct xcopy_pt_cmd *dst_pt_cmd; 32 + 33 + u32 xop_data_nents; 34 + struct scatterlist *xop_data_sg; 35 + struct work_struct xop_work; 36 + }; 37 + 38 + /* 39 + * Receive Copy Results Sevice Actions 40 + */ 41 + #define RCR_SA_COPY_STATUS 0x00 42 + #define RCR_SA_RECEIVE_DATA 0x01 43 + #define RCR_SA_OPERATING_PARAMETERS 0x03 44 + #define RCR_SA_FAILED_SEGMENT_DETAILS 0x04 45 + 46 + /* 47 + * Receive Copy Results defs for Operating Parameters 48 + */ 49 + #define RCR_OP_MAX_TARGET_DESC_COUNT 0x2 50 + #define RCR_OP_MAX_SG_DESC_COUNT 0x1 51 + #define RCR_OP_MAX_DESC_LIST_LEN 1024 52 + #define RCR_OP_MAX_SEGMENT_LEN 268435456 /* 256 MB */ 53 + #define RCR_OP_TOTAL_CONCURR_COPIES 0x1 /* Must be <= 16384 */ 54 + #define RCR_OP_MAX_CONCURR_COPIES 0x1 /* Must be <= 255 */ 55 + #define RCR_OP_DATA_SEG_GRAN_LOG2 9 /* 512 bytes in log 2 */ 56 + #define RCR_OP_INLINE_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */ 57 + #define RCR_OP_HELD_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */ 58 + 59 + extern int target_xcopy_setup_pt(void); 60 + extern void target_xcopy_release_pt(void); 61 + extern sense_reason_t target_do_xcopy(struct se_cmd *); 62 + extern sense_reason_t target_do_receive_copy_results(struct se_cmd *);
+5 -1
drivers/target/tcm_fc/tfc_conf.c
··· 311 311 */ 312 312 if (strstr(name, "tpgt_") != name) 313 313 return NULL; 314 - if (strict_strtoul(name + 5, 10, &index) || index > UINT_MAX) 314 + 315 + ret = kstrtoul(name + 5, 10, &index); 316 + if (ret) 317 + return NULL; 318 + if (index > UINT_MAX) 315 319 return NULL; 316 320 317 321 lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
+103 -33
drivers/vhost/scsi.c
··· 1 1 /******************************************************************************* 2 2 * Vhost kernel TCM fabric driver for virtio SCSI initiators 3 3 * 4 - * (C) Copyright 2010-2012 RisingTide Systems LLC. 4 + * (C) Copyright 2010-2013 Datera, Inc. 5 5 * (C) Copyright 2010-2012 IBM Corp. 6 6 * 7 7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 8 8 * 9 - * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com> 9 + * Authors: Nicholas A. Bellinger <nab@daterainc.com> 10 10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 11 11 * 12 12 * This program is free software; you can redistribute it and/or modify ··· 48 48 #include <linux/virtio_scsi.h> 49 49 #include <linux/llist.h> 50 50 #include <linux/bitmap.h> 51 + #include <linux/percpu_ida.h> 51 52 52 53 #include "vhost.h" 53 54 54 55 #define TCM_VHOST_VERSION "v0.1" 55 56 #define TCM_VHOST_NAMELEN 256 56 57 #define TCM_VHOST_MAX_CDB_SIZE 32 58 + #define TCM_VHOST_DEFAULT_TAGS 256 59 + #define TCM_VHOST_PREALLOC_SGLS 2048 60 + #define TCM_VHOST_PREALLOC_PAGES 2048 57 61 58 62 struct vhost_scsi_inflight { 59 63 /* Wait for the flush operation to finish */ ··· 83 79 u32 tvc_lun; 84 80 /* Pointer to the SGL formatted memory from virtio-scsi */ 85 81 struct scatterlist *tvc_sgl; 82 + struct page **tvc_upages; 86 83 /* Pointer to response */ 87 84 struct virtio_scsi_cmd_resp __user *tvc_resp; 88 85 /* Pointer to vhost_scsi for our device */ ··· 455 450 { 456 451 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, 457 452 struct tcm_vhost_cmd, tvc_se_cmd); 453 + struct se_session *se_sess = se_cmd->se_sess; 458 454 459 455 if (tv_cmd->tvc_sgl_count) { 460 456 u32 i; 461 457 for (i = 0; i < tv_cmd->tvc_sgl_count; i++) 462 458 put_page(sg_page(&tv_cmd->tvc_sgl[i])); 463 - 464 - kfree(tv_cmd->tvc_sgl); 465 459 } 466 460 467 461 tcm_vhost_put_inflight(tv_cmd->inflight); 468 - kfree(tv_cmd); 462 + percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 469 463 } 470 464 471 465 static int tcm_vhost_shutdown_session(struct se_session *se_sess) ··· 708 704 } 709 705 710 706 static struct tcm_vhost_cmd * 711 - vhost_scsi_allocate_cmd(struct vhost_virtqueue *vq, 707 + vhost_scsi_get_tag(struct vhost_virtqueue *vq, 712 708 struct tcm_vhost_tpg *tpg, 713 709 struct virtio_scsi_cmd_req *v_req, 714 710 u32 exp_data_len, ··· 716 712 { 717 713 struct tcm_vhost_cmd *cmd; 718 714 struct tcm_vhost_nexus *tv_nexus; 715 + struct se_session *se_sess; 716 + struct scatterlist *sg; 717 + struct page **pages; 718 + int tag; 719 719 720 720 tv_nexus = tpg->tpg_nexus; 721 721 if (!tv_nexus) { 722 722 pr_err("Unable to locate active struct tcm_vhost_nexus\n"); 723 723 return ERR_PTR(-EIO); 724 724 } 725 + se_sess = tv_nexus->tvn_se_sess; 725 726 726 - cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC); 727 - if (!cmd) { 728 - pr_err("Unable to allocate struct tcm_vhost_cmd\n"); 729 - return ERR_PTR(-ENOMEM); 730 - } 727 + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_KERNEL); 728 + cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; 729 + sg = cmd->tvc_sgl; 730 + pages = cmd->tvc_upages; 731 + memset(cmd, 0, sizeof(struct tcm_vhost_cmd)); 732 + 733 + cmd->tvc_sgl = sg; 734 + cmd->tvc_upages = pages; 735 + cmd->tvc_se_cmd.map_tag = tag; 731 736 cmd->tvc_tag = v_req->tag; 732 737 cmd->tvc_task_attr = v_req->task_attr; 733 738 cmd->tvc_exp_data_len = exp_data_len; ··· 753 740 * Returns the number of scatterlist entries used or -errno on error. 754 741 */ 755 742 static int 756 - vhost_scsi_map_to_sgl(struct scatterlist *sgl, 743 + vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd, 744 + struct scatterlist *sgl, 757 745 unsigned int sgl_count, 758 746 struct iovec *iov, 759 747 int write) ··· 766 752 struct page **pages; 767 753 int ret, i; 768 754 755 + if (sgl_count > TCM_VHOST_PREALLOC_SGLS) { 756 + pr_err("vhost_scsi_map_to_sgl() psgl_count: %u greater than" 757 + " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n", 758 + sgl_count, TCM_VHOST_PREALLOC_SGLS); 759 + return -ENOBUFS; 760 + } 761 + 769 762 pages_nr = iov_num_pages(iov); 770 763 if (pages_nr > sgl_count) 771 764 return -ENOBUFS; 772 765 773 - pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL); 774 - if (!pages) 775 - return -ENOMEM; 766 + if (pages_nr > TCM_VHOST_PREALLOC_PAGES) { 767 + pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" 768 + " preallocated TCM_VHOST_PREALLOC_PAGES: %u\n", 769 + pages_nr, TCM_VHOST_PREALLOC_PAGES); 770 + return -ENOBUFS; 771 + } 772 + 773 + pages = tv_cmd->tvc_upages; 776 774 777 775 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages); 778 776 /* No pages were pinned */ ··· 809 783 } 810 784 811 785 out: 812 - kfree(pages); 813 786 return ret; 814 787 } 815 788 ··· 832 807 833 808 /* TODO overflow checking */ 834 809 835 - sg = kmalloc(sizeof(cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC); 836 - if (!sg) 837 - return -ENOMEM; 838 - pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__, 839 - sg, sgl_count, !sg); 810 + sg = cmd->tvc_sgl; 811 + pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count); 840 812 sg_init_table(sg, sgl_count); 841 813 842 - cmd->tvc_sgl = sg; 843 814 cmd->tvc_sgl_count = sgl_count; 844 815 845 816 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count); 846 817 for (i = 0; i < niov; i++) { 847 - ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write); 818 + ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i], 819 + write); 848 820 if (ret < 0) { 849 821 for (i = 0; i < cmd->tvc_sgl_count; i++) 850 822 put_page(sg_page(&cmd->tvc_sgl[i])); 851 - kfree(cmd->tvc_sgl); 852 - cmd->tvc_sgl = NULL; 823 + 853 824 cmd->tvc_sgl_count = 0; 854 825 return ret; 855 826 } ··· 1010 989 for (i = 0; i < data_num; i++) 1011 990 exp_data_len += vq->iov[data_first + i].iov_len; 1012 991 1013 - cmd = vhost_scsi_allocate_cmd(vq, tpg, &v_req, 1014 - exp_data_len, data_direction); 992 + cmd = vhost_scsi_get_tag(vq, tpg, &v_req, 993 + exp_data_len, data_direction); 1015 994 if (IS_ERR(cmd)) { 1016 - vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", 995 + vq_err(vq, "vhost_scsi_get_tag failed %ld\n", 1017 996 PTR_ERR(cmd)); 1018 997 goto err_cmd; 1019 998 } ··· 1675 1654 kfree(nacl); 1676 1655 } 1677 1656 1657 + static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus, 1658 + struct se_session *se_sess) 1659 + { 1660 + struct tcm_vhost_cmd *tv_cmd; 1661 + unsigned int i; 1662 + 1663 + if (!se_sess->sess_cmd_map) 1664 + return; 1665 + 1666 + for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) { 1667 + tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; 1668 + 1669 + kfree(tv_cmd->tvc_sgl); 1670 + kfree(tv_cmd->tvc_upages); 1671 + } 1672 + } 1673 + 1678 1674 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, 1679 1675 const char *name) 1680 1676 { 1681 1677 struct se_portal_group *se_tpg; 1678 + struct se_session *se_sess; 1682 1679 struct tcm_vhost_nexus *tv_nexus; 1680 + struct tcm_vhost_cmd *tv_cmd; 1681 + unsigned int i; 1683 1682 1684 1683 mutex_lock(&tpg->tv_tpg_mutex); 1685 1684 if (tpg->tpg_nexus) { ··· 1716 1675 return -ENOMEM; 1717 1676 } 1718 1677 /* 1719 - * Initialize the struct se_session pointer 1678 + * Initialize the struct se_session pointer and setup tagpool 1679 + * for struct tcm_vhost_cmd descriptors 1720 1680 */ 1721 - tv_nexus->tvn_se_sess = transport_init_session(); 1681 + tv_nexus->tvn_se_sess = transport_init_session_tags( 1682 + TCM_VHOST_DEFAULT_TAGS, 1683 + sizeof(struct tcm_vhost_cmd)); 1722 1684 if (IS_ERR(tv_nexus->tvn_se_sess)) { 1723 1685 mutex_unlock(&tpg->tv_tpg_mutex); 1724 1686 kfree(tv_nexus); 1725 1687 return -ENOMEM; 1688 + } 1689 + se_sess = tv_nexus->tvn_se_sess; 1690 + for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) { 1691 + tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; 1692 + 1693 + tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) * 1694 + TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL); 1695 + if (!tv_cmd->tvc_sgl) { 1696 + mutex_unlock(&tpg->tv_tpg_mutex); 1697 + pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); 1698 + goto out; 1699 + } 1700 + 1701 + tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) * 1702 + TCM_VHOST_PREALLOC_PAGES, GFP_KERNEL); 1703 + if (!tv_cmd->tvc_upages) { 1704 + mutex_unlock(&tpg->tv_tpg_mutex); 1705 + pr_err("Unable to allocate tv_cmd->tvc_upages\n"); 1706 + goto out; 1707 + } 1726 1708 } 1727 1709 /* 1728 1710 * Since we are running in 'demo mode' this call with generate a ··· 1758 1694 mutex_unlock(&tpg->tv_tpg_mutex); 1759 1695 pr_debug("core_tpg_check_initiator_node_acl() failed" 1760 1696 " for %s\n", name); 1761 - transport_free_session(tv_nexus->tvn_se_sess); 1762 - kfree(tv_nexus); 1763 - return -ENOMEM; 1697 + goto out; 1764 1698 } 1765 1699 /* 1766 1700 * Now register the TCM vhost virtual I_T Nexus as active with the ··· 1770 1708 1771 1709 mutex_unlock(&tpg->tv_tpg_mutex); 1772 1710 return 0; 1711 + 1712 + out: 1713 + tcm_vhost_free_cmd_map_res(tv_nexus, se_sess); 1714 + transport_free_session(se_sess); 1715 + kfree(tv_nexus); 1716 + return -ENOMEM; 1773 1717 } 1774 1718 1775 1719 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) ··· 1815 1747 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" 1816 1748 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), 1817 1749 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 1750 + 1751 + tcm_vhost_free_cmd_map_res(tv_nexus, se_sess); 1818 1752 /* 1819 1753 * Release the SCSI I_T Nexus to the emulated vhost Target Port 1820 1754 */
+60
include/linux/percpu_ida.h
··· 1 + #ifndef __PERCPU_IDA_H__ 2 + #define __PERCPU_IDA_H__ 3 + 4 + #include <linux/types.h> 5 + #include <linux/bitops.h> 6 + #include <linux/init.h> 7 + #include <linux/spinlock_types.h> 8 + #include <linux/wait.h> 9 + #include <linux/cpumask.h> 10 + 11 + struct percpu_ida_cpu; 12 + 13 + struct percpu_ida { 14 + /* 15 + * number of tags available to be allocated, as passed to 16 + * percpu_ida_init() 17 + */ 18 + unsigned nr_tags; 19 + 20 + struct percpu_ida_cpu __percpu *tag_cpu; 21 + 22 + /* 23 + * Bitmap of cpus that (may) have tags on their percpu freelists: 24 + * steal_tags() uses this to decide when to steal tags, and which cpus 25 + * to try stealing from. 26 + * 27 + * It's ok for a freelist to be empty when its bit is set - steal_tags() 28 + * will just keep looking - but the bitmap _must_ be set whenever a 29 + * percpu freelist does have tags. 30 + */ 31 + cpumask_t cpus_have_tags; 32 + 33 + struct { 34 + spinlock_t lock; 35 + /* 36 + * When we go to steal tags from another cpu (see steal_tags()), 37 + * we want to pick a cpu at random. Cycling through them every 38 + * time we steal is a bit easier and more or less equivalent: 39 + */ 40 + unsigned cpu_last_stolen; 41 + 42 + /* For sleeping on allocation failure */ 43 + wait_queue_head_t wait; 44 + 45 + /* 46 + * Global freelist - it's a stack where nr_free points to the 47 + * top 48 + */ 49 + unsigned nr_free; 50 + unsigned *freelist; 51 + } ____cacheline_aligned_in_smp; 52 + }; 53 + 54 + int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp); 55 + void percpu_ida_free(struct percpu_ida *pool, unsigned tag); 56 + 57 + void percpu_ida_destroy(struct percpu_ida *pool); 58 + int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags); 59 + 60 + #endif /* __PERCPU_IDA_H__ */
+1
include/scsi/scsi.h
··· 144 144 #define ACCESS_CONTROL_IN 0x86 145 145 #define ACCESS_CONTROL_OUT 0x87 146 146 #define READ_16 0x88 147 + #define COMPARE_AND_WRITE 0x89 147 148 #define WRITE_16 0x8a 148 149 #define READ_ATTRIBUTE 0x8c 149 150 #define WRITE_ATTRIBUTE 0x8d
+7 -1
include/target/iscsi/iscsi_transport.h
··· 6 6 #define ISCSIT_TRANSPORT_NAME 16 7 7 char name[ISCSIT_TRANSPORT_NAME]; 8 8 int transport_type; 9 + int priv_size; 9 10 struct module *owner; 10 11 struct list_head t_node; 11 12 int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *); 12 13 int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *); 13 14 void (*iscsit_free_np)(struct iscsi_np *); 14 15 void (*iscsit_free_conn)(struct iscsi_conn *); 15 - struct iscsi_cmd *(*iscsit_alloc_cmd)(struct iscsi_conn *, gfp_t); 16 16 int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *); 17 17 int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32); 18 18 int (*iscsit_immediate_queue)(struct iscsi_conn *, struct iscsi_cmd *, int); ··· 21 21 int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *); 22 22 int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *); 23 23 }; 24 + 25 + static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd) 26 + { 27 + return (void *)(cmd + 1); 28 + } 24 29 25 30 /* 26 31 * From iscsi_target_transport.c ··· 97 92 extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); 98 93 extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, 99 94 unsigned char *, __be32); 95 + extern void iscsit_release_cmd(struct iscsi_cmd *);
+6 -1
include/target/target_core_backend.h
··· 39 39 }; 40 40 41 41 struct sbc_ops { 42 - sense_reason_t (*execute_rw)(struct se_cmd *cmd); 42 + sense_reason_t (*execute_rw)(struct se_cmd *cmd, struct scatterlist *, 43 + u32, enum dma_data_direction); 43 44 sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd); 44 45 sense_reason_t (*execute_write_same)(struct se_cmd *cmd); 45 46 sense_reason_t (*execute_write_same_unmap)(struct se_cmd *cmd); ··· 74 73 /* core helpers also used by command snooping in pscsi */ 75 74 void *transport_kmap_data_sg(struct se_cmd *); 76 75 void transport_kunmap_data_sg(struct se_cmd *); 76 + /* core helpers also used by xcopy during internal command setup */ 77 + int target_alloc_sgl(struct scatterlist **, unsigned int *, u32, bool); 78 + sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, 79 + struct scatterlist *, u32, struct scatterlist *, u32); 77 80 78 81 void array_free(void *array, int n); 79 82
+24 -2
include/target/target_core_base.h
··· 5 5 #include <linux/configfs.h> 6 6 #include <linux/dma-mapping.h> 7 7 #include <linux/blkdev.h> 8 + #include <linux/percpu_ida.h> 8 9 #include <scsi/scsi_cmnd.h> 9 10 #include <net/sock.h> 10 11 #include <net/tcp.h> 11 12 12 - #define TARGET_CORE_MOD_VERSION "v4.1.0-rc2-ml" 13 + #define TARGET_CORE_MOD_VERSION "v4.1.0" 13 14 #define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION 14 15 15 16 /* Maximum Number of LUNs per Target Portal Group */ ··· 97 96 * block/blk-lib.c:blkdev_issue_discard() 98 97 */ 99 98 #define DA_EMULATE_TPWS 0 99 + /* Emulation for CompareAndWrite (AtomicTestandSet) by default */ 100 + #define DA_EMULATE_CAW 1 101 + /* Emulation for 3rd Party Copy (ExtendedCopy) by default */ 102 + #define DA_EMULATE_3PC 1 100 103 /* No Emulation for PSCSI by default */ 101 104 #define DA_EMULATE_ALUA 0 102 105 /* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ ··· 163 158 SCF_ALUA_NON_OPTIMIZED = 0x00008000, 164 159 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, 165 160 SCF_ACK_KREF = 0x00040000, 161 + SCF_COMPARE_AND_WRITE = 0x00080000, 162 + SCF_COMPARE_AND_WRITE_POST = 0x00100000, 163 + SCF_CMD_XCOPY_PASSTHROUGH = 0x00200000, 166 164 }; 167 165 168 166 /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ ··· 204 196 TCM_ADDRESS_OUT_OF_RANGE = R(0x11), 205 197 TCM_OUT_OF_RESOURCES = R(0x12), 206 198 TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13), 199 + TCM_MISCOMPARE_VERIFY = R(0x14), 207 200 #undef R 208 201 }; 209 202 ··· 424 415 enum dma_data_direction data_direction; 425 416 /* For SAM Task Attribute */ 426 417 int sam_task_attr; 418 + /* Used for se_sess->sess_tag_pool */ 419 + unsigned int map_tag; 427 420 /* Transport protocol dependent state, see transport_state_table */ 428 421 enum transport_state_table t_state; 429 422 unsigned cmd_wait_set:1; ··· 455 444 struct kref cmd_kref; 456 445 struct target_core_fabric_ops *se_tfo; 457 446 sense_reason_t (*execute_cmd)(struct se_cmd *); 458 - void (*transport_complete_callback)(struct se_cmd *); 447 + sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *, 448 + u32, enum dma_data_direction); 449 + sense_reason_t (*transport_complete_callback)(struct se_cmd *); 459 450 460 451 unsigned char *t_task_cdb; 461 452 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; 462 453 unsigned long long t_task_lba; 454 + unsigned int t_task_nolb; 463 455 unsigned int transport_state; 464 456 #define CMD_T_ABORTED (1 << 0) 465 457 #define CMD_T_ACTIVE (1 << 1) ··· 483 469 struct work_struct work; 484 470 485 471 struct scatterlist *t_data_sg; 472 + struct scatterlist *t_data_sg_orig; 486 473 unsigned int t_data_nents; 474 + unsigned int t_data_nents_orig; 487 475 void *t_data_vmap; 488 476 struct scatterlist *t_bidi_data_sg; 489 477 unsigned int t_bidi_data_nents; ··· 552 536 struct list_head sess_wait_list; 553 537 spinlock_t sess_cmd_lock; 554 538 struct kref sess_kref; 539 + void *sess_cmd_map; 540 + struct percpu_ida sess_tag_pool; 555 541 }; 556 542 557 543 struct se_device; ··· 607 589 int emulate_tas; 608 590 int emulate_tpu; 609 591 int emulate_tpws; 592 + int emulate_caw; 593 + int emulate_3pc; 610 594 int enforce_pr_isids; 611 595 int is_nonrot; 612 596 int emulate_rest_reord; ··· 676 656 spinlock_t se_port_lock; 677 657 spinlock_t se_tmr_lock; 678 658 spinlock_t qf_cmd_lock; 659 + struct semaphore caw_sem; 679 660 /* Used for legacy SPC-2 reservationsa */ 680 661 struct se_node_acl *dev_reserved_node_acl; 681 662 /* Used for ALUA Logical Unit Group membership */ ··· 690 669 struct list_head delayed_cmd_list; 691 670 struct list_head state_list; 692 671 struct list_head qf_cmd_list; 672 + struct list_head g_dev_node; 693 673 /* Pointer to associated SE HBA */ 694 674 struct se_hba *se_hba; 695 675 /* T10 Inquiry and VPD WWN Information */
+30
include/target/target_core_fabric.h
··· 84 84 }; 85 85 86 86 struct se_session *transport_init_session(void); 87 + int transport_alloc_session_tags(struct se_session *, unsigned int, 88 + unsigned int); 89 + struct se_session *transport_init_session_tags(unsigned int, unsigned int); 87 90 void __transport_register_session(struct se_portal_group *, 88 91 struct se_node_acl *, struct se_session *, void *); 89 92 void transport_register_session(struct se_portal_group *, ··· 134 131 void core_tmr_release_req(struct se_tmr_req *); 135 132 int transport_generic_handle_tmr(struct se_cmd *); 136 133 void transport_generic_request_failure(struct se_cmd *, sense_reason_t); 134 + void __target_execute_cmd(struct se_cmd *); 137 135 int transport_lookup_tmr_lun(struct se_cmd *, u32); 138 136 139 137 struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, ··· 178 174 struct t10_pr_registration *, int *); 179 175 char *iscsi_parse_pr_out_transport_id(struct se_portal_group *, const char *, 180 176 u32 *, char **); 177 + 178 + /* 179 + * The LIO target core uses DMA_TO_DEVICE to mean that data is going 180 + * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean 181 + * that data is coming from the target (eg handling a READ). However, 182 + * this is just the opposite of what we have to tell the DMA mapping 183 + * layer -- eg when handling a READ, the HBA will have to DMA the data 184 + * out of memory so it can send it to the initiator, which means we 185 + * need to use DMA_TO_DEVICE when we map the data. 186 + */ 187 + static inline enum dma_data_direction 188 + target_reverse_dma_direction(struct se_cmd *se_cmd) 189 + { 190 + if (se_cmd->se_cmd_flags & SCF_BIDI) 191 + return DMA_BIDIRECTIONAL; 192 + 193 + switch (se_cmd->data_direction) { 194 + case DMA_TO_DEVICE: 195 + return DMA_FROM_DEVICE; 196 + case DMA_FROM_DEVICE: 197 + return DMA_TO_DEVICE; 198 + case DMA_NONE: 199 + default: 200 + return DMA_NONE; 201 + } 202 + } 181 203 182 204 #endif /* TARGET_CORE_FABRICH */
+3 -2
lib/Makefile
··· 13 13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ 14 14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ 15 15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 16 - earlycpio.o percpu-refcount.o 16 + earlycpio.o percpu-refcount.o percpu_ida.o 17 17 18 18 obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o 19 19 lib-$(CONFIG_MMU) += ioremap.o ··· 25 25 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 26 26 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 27 27 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ 28 - bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o 28 + bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ 29 + percpu_ida.o 29 30 obj-y += string_helpers.o 30 31 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 31 32 obj-y += kstrtox.o
+335
lib/percpu_ida.c
··· 1 + /* 2 + * Percpu IDA library 3 + * 4 + * Copyright (C) 2013 Datera, Inc. Kent Overstreet 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License as 8 + * published by the Free Software Foundation; either version 2, or (at 9 + * your option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, but 12 + * WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 + * General Public License for more details. 15 + */ 16 + 17 + #include <linux/bitmap.h> 18 + #include <linux/bitops.h> 19 + #include <linux/bug.h> 20 + #include <linux/err.h> 21 + #include <linux/export.h> 22 + #include <linux/hardirq.h> 23 + #include <linux/idr.h> 24 + #include <linux/init.h> 25 + #include <linux/kernel.h> 26 + #include <linux/percpu.h> 27 + #include <linux/sched.h> 28 + #include <linux/slab.h> 29 + #include <linux/string.h> 30 + #include <linux/spinlock.h> 31 + #include <linux/percpu_ida.h> 32 + 33 + /* 34 + * Number of tags we move between the percpu freelist and the global freelist at 35 + * a time 36 + */ 37 + #define IDA_PCPU_BATCH_MOVE 32U 38 + 39 + /* Max size of percpu freelist, */ 40 + #define IDA_PCPU_SIZE ((IDA_PCPU_BATCH_MOVE * 3) / 2) 41 + 42 + struct percpu_ida_cpu { 43 + /* 44 + * Even though this is percpu, we need a lock for tag stealing by remote 45 + * CPUs: 46 + */ 47 + spinlock_t lock; 48 + 49 + /* nr_free/freelist form a stack of free IDs */ 50 + unsigned nr_free; 51 + unsigned freelist[]; 52 + }; 53 + 54 + static inline void move_tags(unsigned *dst, unsigned *dst_nr, 55 + unsigned *src, unsigned *src_nr, 56 + unsigned nr) 57 + { 58 + *src_nr -= nr; 59 + memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr); 60 + *dst_nr += nr; 61 + } 62 + 63 + /* 64 + * Try to steal tags from a remote cpu's percpu freelist. 65 + * 66 + * We first check how many percpu freelists have tags - we don't steal tags 67 + * unless enough percpu freelists have tags on them that it's possible more than 68 + * half the total tags could be stuck on remote percpu freelists. 69 + * 70 + * Then we iterate through the cpus until we find some tags - we don't attempt 71 + * to find the "best" cpu to steal from, to keep cacheline bouncing to a 72 + * minimum. 73 + */ 74 + static inline void steal_tags(struct percpu_ida *pool, 75 + struct percpu_ida_cpu *tags) 76 + { 77 + unsigned cpus_have_tags, cpu = pool->cpu_last_stolen; 78 + struct percpu_ida_cpu *remote; 79 + 80 + for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); 81 + cpus_have_tags * IDA_PCPU_SIZE > pool->nr_tags / 2; 82 + cpus_have_tags--) { 83 + cpu = cpumask_next(cpu, &pool->cpus_have_tags); 84 + 85 + if (cpu >= nr_cpu_ids) { 86 + cpu = cpumask_first(&pool->cpus_have_tags); 87 + if (cpu >= nr_cpu_ids) 88 + BUG(); 89 + } 90 + 91 + pool->cpu_last_stolen = cpu; 92 + remote = per_cpu_ptr(pool->tag_cpu, cpu); 93 + 94 + cpumask_clear_cpu(cpu, &pool->cpus_have_tags); 95 + 96 + if (remote == tags) 97 + continue; 98 + 99 + spin_lock(&remote->lock); 100 + 101 + if (remote->nr_free) { 102 + memcpy(tags->freelist, 103 + remote->freelist, 104 + sizeof(unsigned) * remote->nr_free); 105 + 106 + tags->nr_free = remote->nr_free; 107 + remote->nr_free = 0; 108 + } 109 + 110 + spin_unlock(&remote->lock); 111 + 112 + if (tags->nr_free) 113 + break; 114 + } 115 + } 116 + 117 + /* 118 + * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto 119 + * our percpu freelist: 120 + */ 121 + static inline void alloc_global_tags(struct percpu_ida *pool, 122 + struct percpu_ida_cpu *tags) 123 + { 124 + move_tags(tags->freelist, &tags->nr_free, 125 + pool->freelist, &pool->nr_free, 126 + min(pool->nr_free, IDA_PCPU_BATCH_MOVE)); 127 + } 128 + 129 + static inline unsigned alloc_local_tag(struct percpu_ida *pool, 130 + struct percpu_ida_cpu *tags) 131 + { 132 + int tag = -ENOSPC; 133 + 134 + spin_lock(&tags->lock); 135 + if (tags->nr_free) 136 + tag = tags->freelist[--tags->nr_free]; 137 + spin_unlock(&tags->lock); 138 + 139 + return tag; 140 + } 141 + 142 + /** 143 + * percpu_ida_alloc - allocate a tag 144 + * @pool: pool to allocate from 145 + * @gfp: gfp flags 146 + * 147 + * Returns a tag - an integer in the range [0..nr_tags) (passed to 148 + * tag_pool_init()), or otherwise -ENOSPC on allocation failure. 149 + * 150 + * Safe to be called from interrupt context (assuming it isn't passed 151 + * __GFP_WAIT, of course). 152 + * 153 + * @gfp indicates whether or not to wait until a free id is available (it's not 154 + * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep 155 + * however long it takes until another thread frees an id (same semantics as a 156 + * mempool). 157 + * 158 + * Will not fail if passed __GFP_WAIT. 159 + */ 160 + int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) 161 + { 162 + DEFINE_WAIT(wait); 163 + struct percpu_ida_cpu *tags; 164 + unsigned long flags; 165 + int tag; 166 + 167 + local_irq_save(flags); 168 + tags = this_cpu_ptr(pool->tag_cpu); 169 + 170 + /* Fastpath */ 171 + tag = alloc_local_tag(pool, tags); 172 + if (likely(tag >= 0)) { 173 + local_irq_restore(flags); 174 + return tag; 175 + } 176 + 177 + while (1) { 178 + spin_lock(&pool->lock); 179 + 180 + /* 181 + * prepare_to_wait() must come before steal_tags(), in case 182 + * percpu_ida_free() on another cpu flips a bit in 183 + * cpus_have_tags 184 + * 185 + * global lock held and irqs disabled, don't need percpu lock 186 + */ 187 + prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); 188 + 189 + if (!tags->nr_free) 190 + alloc_global_tags(pool, tags); 191 + if (!tags->nr_free) 192 + steal_tags(pool, tags); 193 + 194 + if (tags->nr_free) { 195 + tag = tags->freelist[--tags->nr_free]; 196 + if (tags->nr_free) 197 + cpumask_set_cpu(smp_processor_id(), 198 + &pool->cpus_have_tags); 199 + } 200 + 201 + spin_unlock(&pool->lock); 202 + local_irq_restore(flags); 203 + 204 + if (tag >= 0 || !(gfp & __GFP_WAIT)) 205 + break; 206 + 207 + schedule(); 208 + 209 + local_irq_save(flags); 210 + tags = this_cpu_ptr(pool->tag_cpu); 211 + } 212 + 213 + finish_wait(&pool->wait, &wait); 214 + return tag; 215 + } 216 + EXPORT_SYMBOL_GPL(percpu_ida_alloc); 217 + 218 + /** 219 + * percpu_ida_free - free a tag 220 + * @pool: pool @tag was allocated from 221 + * @tag: a tag previously allocated with percpu_ida_alloc() 222 + * 223 + * Safe to be called from interrupt context. 224 + */ 225 + void percpu_ida_free(struct percpu_ida *pool, unsigned tag) 226 + { 227 + struct percpu_ida_cpu *tags; 228 + unsigned long flags; 229 + unsigned nr_free; 230 + 231 + BUG_ON(tag >= pool->nr_tags); 232 + 233 + local_irq_save(flags); 234 + tags = this_cpu_ptr(pool->tag_cpu); 235 + 236 + spin_lock(&tags->lock); 237 + tags->freelist[tags->nr_free++] = tag; 238 + 239 + nr_free = tags->nr_free; 240 + spin_unlock(&tags->lock); 241 + 242 + if (nr_free == 1) { 243 + cpumask_set_cpu(smp_processor_id(), 244 + &pool->cpus_have_tags); 245 + wake_up(&pool->wait); 246 + } 247 + 248 + if (nr_free == IDA_PCPU_SIZE) { 249 + spin_lock(&pool->lock); 250 + 251 + /* 252 + * Global lock held and irqs disabled, don't need percpu 253 + * lock 254 + */ 255 + if (tags->nr_free == IDA_PCPU_SIZE) { 256 + move_tags(pool->freelist, &pool->nr_free, 257 + tags->freelist, &tags->nr_free, 258 + IDA_PCPU_BATCH_MOVE); 259 + 260 + wake_up(&pool->wait); 261 + } 262 + spin_unlock(&pool->lock); 263 + } 264 + 265 + local_irq_restore(flags); 266 + } 267 + EXPORT_SYMBOL_GPL(percpu_ida_free); 268 + 269 + /** 270 + * percpu_ida_destroy - release a tag pool's resources 271 + * @pool: pool to free 272 + * 273 + * Frees the resources allocated by percpu_ida_init(). 274 + */ 275 + void percpu_ida_destroy(struct percpu_ida *pool) 276 + { 277 + free_percpu(pool->tag_cpu); 278 + free_pages((unsigned long) pool->freelist, 279 + get_order(pool->nr_tags * sizeof(unsigned))); 280 + } 281 + EXPORT_SYMBOL_GPL(percpu_ida_destroy); 282 + 283 + /** 284 + * percpu_ida_init - initialize a percpu tag pool 285 + * @pool: pool to initialize 286 + * @nr_tags: number of tags that will be available for allocation 287 + * 288 + * Initializes @pool so that it can be used to allocate tags - integers in the 289 + * range [0, nr_tags). Typically, they'll be used by driver code to refer to a 290 + * preallocated array of tag structures. 291 + * 292 + * Allocation is percpu, but sharding is limited by nr_tags - for best 293 + * performance, the workload should not span more cpus than nr_tags / 128. 294 + */ 295 + int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags) 296 + { 297 + unsigned i, cpu, order; 298 + 299 + memset(pool, 0, sizeof(*pool)); 300 + 301 + init_waitqueue_head(&pool->wait); 302 + spin_lock_init(&pool->lock); 303 + pool->nr_tags = nr_tags; 304 + 305 + /* Guard against overflow */ 306 + if (nr_tags > (unsigned) INT_MAX + 1) { 307 + pr_err("percpu_ida_init(): nr_tags too large\n"); 308 + return -EINVAL; 309 + } 310 + 311 + order = get_order(nr_tags * sizeof(unsigned)); 312 + pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order); 313 + if (!pool->freelist) 314 + return -ENOMEM; 315 + 316 + for (i = 0; i < nr_tags; i++) 317 + pool->freelist[i] = i; 318 + 319 + pool->nr_free = nr_tags; 320 + 321 + pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) + 322 + IDA_PCPU_SIZE * sizeof(unsigned), 323 + sizeof(unsigned)); 324 + if (!pool->tag_cpu) 325 + goto err; 326 + 327 + for_each_possible_cpu(cpu) 328 + spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock); 329 + 330 + return 0; 331 + err: 332 + percpu_ida_destroy(pool); 333 + return -ENOMEM; 334 + } 335 + EXPORT_SYMBOL_GPL(percpu_ida_init);