Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-next-merge' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending into vhost-net-next

+4477 -1009
+1
drivers/infiniband/Kconfig
··· 59 59 source "drivers/infiniband/ulp/srpt/Kconfig" 60 60 61 61 source "drivers/infiniband/ulp/iser/Kconfig" 62 + source "drivers/infiniband/ulp/isert/Kconfig" 62 63 63 64 endif # INFINIBAND
+1
drivers/infiniband/Makefile
··· 13 13 obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ 14 14 obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/ 15 15 obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ 16 + obj-$(CONFIG_INFINIBAND_ISERT) += ulp/isert/
+5
drivers/infiniband/ulp/isert/Kconfig
··· 1 + config INFINIBAND_ISERT 2 + tristate "iSCSI Extentions for RDMA (iSER) target support" 3 + depends on INET && INFINIBAND_ADDR_TRANS && TARGET_CORE && ISCSI_TARGET 4 + ---help--- 5 + Support for iSCSI Extentions for RDMA (iSER) Target on Infiniband fabrics.
+2
drivers/infiniband/ulp/isert/Makefile
··· 1 + ccflags-y := -Idrivers/target -Idrivers/target/iscsi 2 + obj-$(CONFIG_INFINIBAND_ISERT) += ib_isert.o
+2281
drivers/infiniband/ulp/isert/ib_isert.c
··· 1 + /******************************************************************************* 2 + * This file contains iSCSI extentions for RDMA (iSER) Verbs 3 + * 4 + * (c) Copyright 2013 RisingTide Systems LLC. 5 + * 6 + * Nicholas A. Bellinger <nab@linux-iscsi.org> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + ****************************************************************************/ 18 + 19 + #include <linux/string.h> 20 + #include <linux/module.h> 21 + #include <linux/scatterlist.h> 22 + #include <linux/socket.h> 23 + #include <linux/in.h> 24 + #include <linux/in6.h> 25 + #include <rdma/ib_verbs.h> 26 + #include <rdma/rdma_cm.h> 27 + #include <target/target_core_base.h> 28 + #include <target/target_core_fabric.h> 29 + #include <target/iscsi/iscsi_transport.h> 30 + 31 + #include "isert_proto.h" 32 + #include "ib_isert.h" 33 + 34 + #define ISERT_MAX_CONN 8 35 + #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) 36 + #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) 37 + 38 + static DEFINE_MUTEX(device_list_mutex); 39 + static LIST_HEAD(device_list); 40 + static struct workqueue_struct *isert_rx_wq; 41 + static struct workqueue_struct *isert_comp_wq; 42 + static struct kmem_cache *isert_cmd_cache; 43 + 44 + static void 45 + isert_qp_event_callback(struct ib_event *e, void *context) 46 + { 47 + struct isert_conn *isert_conn = (struct isert_conn *)context; 48 + 49 + pr_err("isert_qp_event_callback event: %d\n", e->event); 50 + switch (e->event) { 51 + case IB_EVENT_COMM_EST: 52 + rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST); 53 + break; 54 + case IB_EVENT_QP_LAST_WQE_REACHED: 55 + pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n"); 56 + break; 57 + default: 58 + break; 59 + } 60 + } 61 + 62 + static int 63 + isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr) 64 + { 65 + int ret; 66 + 67 + ret = ib_query_device(ib_dev, devattr); 68 + if (ret) { 69 + pr_err("ib_query_device() failed: %d\n", ret); 70 + return ret; 71 + } 72 + pr_debug("devattr->max_sge: %d\n", devattr->max_sge); 73 + pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd); 74 + 75 + return 0; 76 + } 77 + 78 + static int 79 + isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) 80 + { 81 + struct isert_device *device = isert_conn->conn_device; 82 + struct ib_qp_init_attr attr; 83 + struct ib_device_attr devattr; 84 + int ret, index, min_index = 0; 85 + 86 + memset(&devattr, 0, sizeof(struct ib_device_attr)); 87 + ret = isert_query_device(cma_id->device, &devattr); 88 + if (ret) 89 + return ret; 90 + 91 + mutex_lock(&device_list_mutex); 92 + for (index = 0; index < device->cqs_used; index++) 93 + if (device->cq_active_qps[index] < 94 + device->cq_active_qps[min_index]) 95 + min_index = index; 96 + device->cq_active_qps[min_index]++; 97 + pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index); 98 + mutex_unlock(&device_list_mutex); 99 + 100 + memset(&attr, 0, sizeof(struct ib_qp_init_attr)); 101 + attr.event_handler = isert_qp_event_callback; 102 + attr.qp_context = isert_conn; 103 + attr.send_cq = device->dev_tx_cq[min_index]; 104 + attr.recv_cq = device->dev_rx_cq[min_index]; 105 + attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; 106 + attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; 107 + /* 108 + * FIXME: Use devattr.max_sge - 2 for max_send_sge as 109 + * work-around for RDMA_READ.. 110 + */ 111 + attr.cap.max_send_sge = devattr.max_sge - 2; 112 + isert_conn->max_sge = attr.cap.max_send_sge; 113 + 114 + attr.cap.max_recv_sge = 1; 115 + attr.sq_sig_type = IB_SIGNAL_REQ_WR; 116 + attr.qp_type = IB_QPT_RC; 117 + 118 + pr_debug("isert_conn_setup_qp cma_id->device: %p\n", 119 + cma_id->device); 120 + pr_debug("isert_conn_setup_qp conn_pd->device: %p\n", 121 + isert_conn->conn_pd->device); 122 + 123 + ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); 124 + if (ret) { 125 + pr_err("rdma_create_qp failed for cma_id %d\n", ret); 126 + return ret; 127 + } 128 + isert_conn->conn_qp = cma_id->qp; 129 + pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n"); 130 + 131 + return 0; 132 + } 133 + 134 + static void 135 + isert_cq_event_callback(struct ib_event *e, void *context) 136 + { 137 + pr_debug("isert_cq_event_callback event: %d\n", e->event); 138 + } 139 + 140 + static int 141 + isert_alloc_rx_descriptors(struct isert_conn *isert_conn) 142 + { 143 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 144 + struct iser_rx_desc *rx_desc; 145 + struct ib_sge *rx_sg; 146 + u64 dma_addr; 147 + int i, j; 148 + 149 + isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS * 150 + sizeof(struct iser_rx_desc), GFP_KERNEL); 151 + if (!isert_conn->conn_rx_descs) 152 + goto fail; 153 + 154 + rx_desc = isert_conn->conn_rx_descs; 155 + 156 + for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 157 + dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, 158 + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 159 + if (ib_dma_mapping_error(ib_dev, dma_addr)) 160 + goto dma_map_fail; 161 + 162 + rx_desc->dma_addr = dma_addr; 163 + 164 + rx_sg = &rx_desc->rx_sg; 165 + rx_sg->addr = rx_desc->dma_addr; 166 + rx_sg->length = ISER_RX_PAYLOAD_SIZE; 167 + rx_sg->lkey = isert_conn->conn_mr->lkey; 168 + } 169 + 170 + isert_conn->conn_rx_desc_head = 0; 171 + return 0; 172 + 173 + dma_map_fail: 174 + rx_desc = isert_conn->conn_rx_descs; 175 + for (j = 0; j < i; j++, rx_desc++) { 176 + ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 177 + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 178 + } 179 + kfree(isert_conn->conn_rx_descs); 180 + isert_conn->conn_rx_descs = NULL; 181 + fail: 182 + return -ENOMEM; 183 + } 184 + 185 + static void 186 + isert_free_rx_descriptors(struct isert_conn *isert_conn) 187 + { 188 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 189 + struct iser_rx_desc *rx_desc; 190 + int i; 191 + 192 + if (!isert_conn->conn_rx_descs) 193 + return; 194 + 195 + rx_desc = isert_conn->conn_rx_descs; 196 + for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 197 + ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 198 + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 199 + } 200 + 201 + kfree(isert_conn->conn_rx_descs); 202 + isert_conn->conn_rx_descs = NULL; 203 + } 204 + 205 + static void isert_cq_tx_callback(struct ib_cq *, void *); 206 + static void isert_cq_rx_callback(struct ib_cq *, void *); 207 + 208 + static int 209 + isert_create_device_ib_res(struct isert_device *device) 210 + { 211 + struct ib_device *ib_dev = device->ib_device; 212 + struct isert_cq_desc *cq_desc; 213 + int ret = 0, i, j; 214 + 215 + device->cqs_used = min_t(int, num_online_cpus(), 216 + device->ib_device->num_comp_vectors); 217 + device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); 218 + pr_debug("Using %d CQs, device %s supports %d vectors\n", 219 + device->cqs_used, device->ib_device->name, 220 + device->ib_device->num_comp_vectors); 221 + device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * 222 + device->cqs_used, GFP_KERNEL); 223 + if (!device->cq_desc) { 224 + pr_err("Unable to allocate device->cq_desc\n"); 225 + return -ENOMEM; 226 + } 227 + cq_desc = device->cq_desc; 228 + 229 + device->dev_pd = ib_alloc_pd(ib_dev); 230 + if (IS_ERR(device->dev_pd)) { 231 + ret = PTR_ERR(device->dev_pd); 232 + pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret); 233 + goto out_cq_desc; 234 + } 235 + 236 + for (i = 0; i < device->cqs_used; i++) { 237 + cq_desc[i].device = device; 238 + cq_desc[i].cq_index = i; 239 + 240 + device->dev_rx_cq[i] = ib_create_cq(device->ib_device, 241 + isert_cq_rx_callback, 242 + isert_cq_event_callback, 243 + (void *)&cq_desc[i], 244 + ISER_MAX_RX_CQ_LEN, i); 245 + if (IS_ERR(device->dev_rx_cq[i])) 246 + goto out_cq; 247 + 248 + device->dev_tx_cq[i] = ib_create_cq(device->ib_device, 249 + isert_cq_tx_callback, 250 + isert_cq_event_callback, 251 + (void *)&cq_desc[i], 252 + ISER_MAX_TX_CQ_LEN, i); 253 + if (IS_ERR(device->dev_tx_cq[i])) 254 + goto out_cq; 255 + 256 + if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP)) 257 + goto out_cq; 258 + 259 + if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP)) 260 + goto out_cq; 261 + } 262 + 263 + device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE); 264 + if (IS_ERR(device->dev_mr)) { 265 + ret = PTR_ERR(device->dev_mr); 266 + pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret); 267 + goto out_cq; 268 + } 269 + 270 + return 0; 271 + 272 + out_cq: 273 + for (j = 0; j < i; j++) { 274 + cq_desc = &device->cq_desc[j]; 275 + 276 + if (device->dev_rx_cq[j]) { 277 + cancel_work_sync(&cq_desc->cq_rx_work); 278 + ib_destroy_cq(device->dev_rx_cq[j]); 279 + } 280 + if (device->dev_tx_cq[j]) { 281 + cancel_work_sync(&cq_desc->cq_tx_work); 282 + ib_destroy_cq(device->dev_tx_cq[j]); 283 + } 284 + } 285 + ib_dealloc_pd(device->dev_pd); 286 + 287 + out_cq_desc: 288 + kfree(device->cq_desc); 289 + 290 + return ret; 291 + } 292 + 293 + static void 294 + isert_free_device_ib_res(struct isert_device *device) 295 + { 296 + struct isert_cq_desc *cq_desc; 297 + int i; 298 + 299 + for (i = 0; i < device->cqs_used; i++) { 300 + cq_desc = &device->cq_desc[i]; 301 + 302 + cancel_work_sync(&cq_desc->cq_rx_work); 303 + cancel_work_sync(&cq_desc->cq_tx_work); 304 + ib_destroy_cq(device->dev_rx_cq[i]); 305 + ib_destroy_cq(device->dev_tx_cq[i]); 306 + device->dev_rx_cq[i] = NULL; 307 + device->dev_tx_cq[i] = NULL; 308 + } 309 + 310 + ib_dereg_mr(device->dev_mr); 311 + ib_dealloc_pd(device->dev_pd); 312 + kfree(device->cq_desc); 313 + } 314 + 315 + static void 316 + isert_device_try_release(struct isert_device *device) 317 + { 318 + mutex_lock(&device_list_mutex); 319 + device->refcount--; 320 + if (!device->refcount) { 321 + isert_free_device_ib_res(device); 322 + list_del(&device->dev_node); 323 + kfree(device); 324 + } 325 + mutex_unlock(&device_list_mutex); 326 + } 327 + 328 + static struct isert_device * 329 + isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id) 330 + { 331 + struct isert_device *device; 332 + int ret; 333 + 334 + mutex_lock(&device_list_mutex); 335 + list_for_each_entry(device, &device_list, dev_node) { 336 + if (device->ib_device->node_guid == cma_id->device->node_guid) { 337 + device->refcount++; 338 + mutex_unlock(&device_list_mutex); 339 + return device; 340 + } 341 + } 342 + 343 + device = kzalloc(sizeof(struct isert_device), GFP_KERNEL); 344 + if (!device) { 345 + mutex_unlock(&device_list_mutex); 346 + return ERR_PTR(-ENOMEM); 347 + } 348 + 349 + INIT_LIST_HEAD(&device->dev_node); 350 + 351 + device->ib_device = cma_id->device; 352 + ret = isert_create_device_ib_res(device); 353 + if (ret) { 354 + kfree(device); 355 + mutex_unlock(&device_list_mutex); 356 + return ERR_PTR(ret); 357 + } 358 + 359 + device->refcount++; 360 + list_add_tail(&device->dev_node, &device_list); 361 + mutex_unlock(&device_list_mutex); 362 + 363 + return device; 364 + } 365 + 366 + static int 367 + isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 368 + { 369 + struct iscsi_np *np = cma_id->context; 370 + struct isert_np *isert_np = np->np_context; 371 + struct isert_conn *isert_conn; 372 + struct isert_device *device; 373 + struct ib_device *ib_dev = cma_id->device; 374 + int ret = 0; 375 + 376 + pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", 377 + cma_id, cma_id->context); 378 + 379 + isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); 380 + if (!isert_conn) { 381 + pr_err("Unable to allocate isert_conn\n"); 382 + return -ENOMEM; 383 + } 384 + isert_conn->state = ISER_CONN_INIT; 385 + INIT_LIST_HEAD(&isert_conn->conn_accept_node); 386 + init_completion(&isert_conn->conn_login_comp); 387 + init_waitqueue_head(&isert_conn->conn_wait); 388 + init_waitqueue_head(&isert_conn->conn_wait_comp_err); 389 + kref_init(&isert_conn->conn_kref); 390 + kref_get(&isert_conn->conn_kref); 391 + 392 + cma_id->context = isert_conn; 393 + isert_conn->conn_cm_id = cma_id; 394 + isert_conn->responder_resources = event->param.conn.responder_resources; 395 + isert_conn->initiator_depth = event->param.conn.initiator_depth; 396 + pr_debug("Using responder_resources: %u initiator_depth: %u\n", 397 + isert_conn->responder_resources, isert_conn->initiator_depth); 398 + 399 + isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + 400 + ISER_RX_LOGIN_SIZE, GFP_KERNEL); 401 + if (!isert_conn->login_buf) { 402 + pr_err("Unable to allocate isert_conn->login_buf\n"); 403 + ret = -ENOMEM; 404 + goto out; 405 + } 406 + 407 + isert_conn->login_req_buf = isert_conn->login_buf; 408 + isert_conn->login_rsp_buf = isert_conn->login_buf + 409 + ISCSI_DEF_MAX_RECV_SEG_LEN; 410 + pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", 411 + isert_conn->login_buf, isert_conn->login_req_buf, 412 + isert_conn->login_rsp_buf); 413 + 414 + isert_conn->login_req_dma = ib_dma_map_single(ib_dev, 415 + (void *)isert_conn->login_req_buf, 416 + ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); 417 + 418 + ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); 419 + if (ret) { 420 + pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n", 421 + ret); 422 + isert_conn->login_req_dma = 0; 423 + goto out_login_buf; 424 + } 425 + 426 + isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, 427 + (void *)isert_conn->login_rsp_buf, 428 + ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); 429 + 430 + ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 431 + if (ret) { 432 + pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n", 433 + ret); 434 + isert_conn->login_rsp_dma = 0; 435 + goto out_req_dma_map; 436 + } 437 + 438 + device = isert_device_find_by_ib_dev(cma_id); 439 + if (IS_ERR(device)) { 440 + ret = PTR_ERR(device); 441 + goto out_rsp_dma_map; 442 + } 443 + 444 + isert_conn->conn_device = device; 445 + isert_conn->conn_pd = device->dev_pd; 446 + isert_conn->conn_mr = device->dev_mr; 447 + 448 + ret = isert_conn_setup_qp(isert_conn, cma_id); 449 + if (ret) 450 + goto out_conn_dev; 451 + 452 + mutex_lock(&isert_np->np_accept_mutex); 453 + list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node); 454 + mutex_unlock(&isert_np->np_accept_mutex); 455 + 456 + pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np); 457 + wake_up(&isert_np->np_accept_wq); 458 + return 0; 459 + 460 + out_conn_dev: 461 + isert_device_try_release(device); 462 + out_rsp_dma_map: 463 + ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 464 + ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); 465 + out_req_dma_map: 466 + ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 467 + ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); 468 + out_login_buf: 469 + kfree(isert_conn->login_buf); 470 + out: 471 + kfree(isert_conn); 472 + return ret; 473 + } 474 + 475 + static void 476 + isert_connect_release(struct isert_conn *isert_conn) 477 + { 478 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 479 + struct isert_device *device = isert_conn->conn_device; 480 + int cq_index; 481 + 482 + pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 483 + 484 + if (isert_conn->conn_qp) { 485 + cq_index = ((struct isert_cq_desc *) 486 + isert_conn->conn_qp->recv_cq->cq_context)->cq_index; 487 + pr_debug("isert_connect_release: cq_index: %d\n", cq_index); 488 + isert_conn->conn_device->cq_active_qps[cq_index]--; 489 + 490 + rdma_destroy_qp(isert_conn->conn_cm_id); 491 + } 492 + 493 + isert_free_rx_descriptors(isert_conn); 494 + rdma_destroy_id(isert_conn->conn_cm_id); 495 + 496 + if (isert_conn->login_buf) { 497 + ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 498 + ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); 499 + ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 500 + ISCSI_DEF_MAX_RECV_SEG_LEN, 501 + DMA_FROM_DEVICE); 502 + kfree(isert_conn->login_buf); 503 + } 504 + kfree(isert_conn); 505 + 506 + if (device) 507 + isert_device_try_release(device); 508 + 509 + pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n"); 510 + } 511 + 512 + static void 513 + isert_connected_handler(struct rdma_cm_id *cma_id) 514 + { 515 + return; 516 + } 517 + 518 + static void 519 + isert_release_conn_kref(struct kref *kref) 520 + { 521 + struct isert_conn *isert_conn = container_of(kref, 522 + struct isert_conn, conn_kref); 523 + 524 + pr_debug("Calling isert_connect_release for final kref %s/%d\n", 525 + current->comm, current->pid); 526 + 527 + isert_connect_release(isert_conn); 528 + } 529 + 530 + static void 531 + isert_put_conn(struct isert_conn *isert_conn) 532 + { 533 + kref_put(&isert_conn->conn_kref, isert_release_conn_kref); 534 + } 535 + 536 + static void 537 + isert_disconnect_work(struct work_struct *work) 538 + { 539 + struct isert_conn *isert_conn = container_of(work, 540 + struct isert_conn, conn_logout_work); 541 + 542 + pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 543 + 544 + isert_conn->state = ISER_CONN_DOWN; 545 + 546 + if (isert_conn->post_recv_buf_count == 0 && 547 + atomic_read(&isert_conn->post_send_buf_count) == 0) { 548 + pr_debug("Calling wake_up(&isert_conn->conn_wait);\n"); 549 + wake_up(&isert_conn->conn_wait); 550 + } 551 + 552 + isert_put_conn(isert_conn); 553 + } 554 + 555 + static void 556 + isert_disconnected_handler(struct rdma_cm_id *cma_id) 557 + { 558 + struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; 559 + 560 + INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); 561 + schedule_work(&isert_conn->conn_logout_work); 562 + } 563 + 564 + static int 565 + isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 566 + { 567 + int ret = 0; 568 + 569 + pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n", 570 + event->event, event->status, cma_id->context, cma_id); 571 + 572 + switch (event->event) { 573 + case RDMA_CM_EVENT_CONNECT_REQUEST: 574 + pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n"); 575 + ret = isert_connect_request(cma_id, event); 576 + break; 577 + case RDMA_CM_EVENT_ESTABLISHED: 578 + pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n"); 579 + isert_connected_handler(cma_id); 580 + break; 581 + case RDMA_CM_EVENT_DISCONNECTED: 582 + pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n"); 583 + isert_disconnected_handler(cma_id); 584 + break; 585 + case RDMA_CM_EVENT_DEVICE_REMOVAL: 586 + case RDMA_CM_EVENT_ADDR_CHANGE: 587 + break; 588 + case RDMA_CM_EVENT_CONNECT_ERROR: 589 + default: 590 + pr_err("Unknown RDMA CMA event: %d\n", event->event); 591 + break; 592 + } 593 + 594 + if (ret != 0) { 595 + pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", 596 + event->event, ret); 597 + dump_stack(); 598 + } 599 + 600 + return ret; 601 + } 602 + 603 + static int 604 + isert_post_recv(struct isert_conn *isert_conn, u32 count) 605 + { 606 + struct ib_recv_wr *rx_wr, *rx_wr_failed; 607 + int i, ret; 608 + unsigned int rx_head = isert_conn->conn_rx_desc_head; 609 + struct iser_rx_desc *rx_desc; 610 + 611 + for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) { 612 + rx_desc = &isert_conn->conn_rx_descs[rx_head]; 613 + rx_wr->wr_id = (unsigned long)rx_desc; 614 + rx_wr->sg_list = &rx_desc->rx_sg; 615 + rx_wr->num_sge = 1; 616 + rx_wr->next = rx_wr + 1; 617 + rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1); 618 + } 619 + 620 + rx_wr--; 621 + rx_wr->next = NULL; /* mark end of work requests list */ 622 + 623 + isert_conn->post_recv_buf_count += count; 624 + ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr, 625 + &rx_wr_failed); 626 + if (ret) { 627 + pr_err("ib_post_recv() failed with ret: %d\n", ret); 628 + isert_conn->post_recv_buf_count -= count; 629 + } else { 630 + pr_debug("isert_post_recv(): Posted %d RX buffers\n", count); 631 + isert_conn->conn_rx_desc_head = rx_head; 632 + } 633 + return ret; 634 + } 635 + 636 + static int 637 + isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) 638 + { 639 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 640 + struct ib_send_wr send_wr, *send_wr_failed; 641 + int ret; 642 + 643 + ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, 644 + ISER_HEADERS_LEN, DMA_TO_DEVICE); 645 + 646 + send_wr.next = NULL; 647 + send_wr.wr_id = (unsigned long)tx_desc; 648 + send_wr.sg_list = tx_desc->tx_sg; 649 + send_wr.num_sge = tx_desc->num_sge; 650 + send_wr.opcode = IB_WR_SEND; 651 + send_wr.send_flags = IB_SEND_SIGNALED; 652 + 653 + atomic_inc(&isert_conn->post_send_buf_count); 654 + 655 + ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed); 656 + if (ret) { 657 + pr_err("ib_post_send() failed, ret: %d\n", ret); 658 + atomic_dec(&isert_conn->post_send_buf_count); 659 + } 660 + 661 + return ret; 662 + } 663 + 664 + static void 665 + isert_create_send_desc(struct isert_conn *isert_conn, 666 + struct isert_cmd *isert_cmd, 667 + struct iser_tx_desc *tx_desc) 668 + { 669 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 670 + 671 + ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, 672 + ISER_HEADERS_LEN, DMA_TO_DEVICE); 673 + 674 + memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr)); 675 + tx_desc->iser_header.flags = ISER_VER; 676 + 677 + tx_desc->num_sge = 1; 678 + tx_desc->isert_cmd = isert_cmd; 679 + 680 + if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) { 681 + tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; 682 + pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc); 683 + } 684 + } 685 + 686 + static int 687 + isert_init_tx_hdrs(struct isert_conn *isert_conn, 688 + struct iser_tx_desc *tx_desc) 689 + { 690 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 691 + u64 dma_addr; 692 + 693 + dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, 694 + ISER_HEADERS_LEN, DMA_TO_DEVICE); 695 + if (ib_dma_mapping_error(ib_dev, dma_addr)) { 696 + pr_err("ib_dma_mapping_error() failed\n"); 697 + return -ENOMEM; 698 + } 699 + 700 + tx_desc->dma_addr = dma_addr; 701 + tx_desc->tx_sg[0].addr = tx_desc->dma_addr; 702 + tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 703 + tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; 704 + 705 + pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u" 706 + " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr, 707 + tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey); 708 + 709 + return 0; 710 + } 711 + 712 + static void 713 + isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr) 714 + { 715 + isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; 716 + send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 717 + send_wr->opcode = IB_WR_SEND; 718 + send_wr->send_flags = IB_SEND_SIGNALED; 719 + send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0]; 720 + send_wr->num_sge = isert_cmd->tx_desc.num_sge; 721 + } 722 + 723 + static int 724 + isert_rdma_post_recvl(struct isert_conn *isert_conn) 725 + { 726 + struct ib_recv_wr rx_wr, *rx_wr_fail; 727 + struct ib_sge sge; 728 + int ret; 729 + 730 + memset(&sge, 0, sizeof(struct ib_sge)); 731 + sge.addr = isert_conn->login_req_dma; 732 + sge.length = ISER_RX_LOGIN_SIZE; 733 + sge.lkey = isert_conn->conn_mr->lkey; 734 + 735 + pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n", 736 + sge.addr, sge.length, sge.lkey); 737 + 738 + memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); 739 + rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf; 740 + rx_wr.sg_list = &sge; 741 + rx_wr.num_sge = 1; 742 + 743 + isert_conn->post_recv_buf_count++; 744 + ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail); 745 + if (ret) { 746 + pr_err("ib_post_recv() failed: %d\n", ret); 747 + isert_conn->post_recv_buf_count--; 748 + } 749 + 750 + pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n"); 751 + return ret; 752 + } 753 + 754 + static int 755 + isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, 756 + u32 length) 757 + { 758 + struct isert_conn *isert_conn = conn->context; 759 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 760 + struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc; 761 + int ret; 762 + 763 + isert_create_send_desc(isert_conn, NULL, tx_desc); 764 + 765 + memcpy(&tx_desc->iscsi_header, &login->rsp[0], 766 + sizeof(struct iscsi_hdr)); 767 + 768 + isert_init_tx_hdrs(isert_conn, tx_desc); 769 + 770 + if (length > 0) { 771 + struct ib_sge *tx_dsg = &tx_desc->tx_sg[1]; 772 + 773 + ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma, 774 + length, DMA_TO_DEVICE); 775 + 776 + memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length); 777 + 778 + ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma, 779 + length, DMA_TO_DEVICE); 780 + 781 + tx_dsg->addr = isert_conn->login_rsp_dma; 782 + tx_dsg->length = length; 783 + tx_dsg->lkey = isert_conn->conn_mr->lkey; 784 + tx_desc->num_sge = 2; 785 + } 786 + if (!login->login_failed) { 787 + if (login->login_complete) { 788 + ret = isert_alloc_rx_descriptors(isert_conn); 789 + if (ret) 790 + return ret; 791 + 792 + ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX); 793 + if (ret) 794 + return ret; 795 + 796 + isert_conn->state = ISER_CONN_UP; 797 + goto post_send; 798 + } 799 + 800 + ret = isert_rdma_post_recvl(isert_conn); 801 + if (ret) 802 + return ret; 803 + } 804 + post_send: 805 + ret = isert_post_send(isert_conn, tx_desc); 806 + if (ret) 807 + return ret; 808 + 809 + return 0; 810 + } 811 + 812 + static void 813 + isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen, 814 + struct isert_conn *isert_conn) 815 + { 816 + struct iscsi_conn *conn = isert_conn->conn; 817 + struct iscsi_login *login = conn->conn_login; 818 + int size; 819 + 820 + if (!login) { 821 + pr_err("conn->conn_login is NULL\n"); 822 + dump_stack(); 823 + return; 824 + } 825 + 826 + if (login->first_request) { 827 + struct iscsi_login_req *login_req = 828 + (struct iscsi_login_req *)&rx_desc->iscsi_header; 829 + /* 830 + * Setup the initial iscsi_login values from the leading 831 + * login request PDU. 832 + */ 833 + login->leading_connection = (!login_req->tsih) ? 1 : 0; 834 + login->current_stage = 835 + (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) 836 + >> 2; 837 + login->version_min = login_req->min_version; 838 + login->version_max = login_req->max_version; 839 + memcpy(login->isid, login_req->isid, 6); 840 + login->cmd_sn = be32_to_cpu(login_req->cmdsn); 841 + login->init_task_tag = login_req->itt; 842 + login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); 843 + login->cid = be16_to_cpu(login_req->cid); 844 + login->tsih = be16_to_cpu(login_req->tsih); 845 + } 846 + 847 + memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); 848 + 849 + size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); 850 + pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n", 851 + size, rx_buflen, MAX_KEY_VALUE_PAIRS); 852 + memcpy(login->req_buf, &rx_desc->data[0], size); 853 + 854 + complete(&isert_conn->conn_login_comp); 855 + } 856 + 857 + static void 858 + isert_release_cmd(struct iscsi_cmd *cmd) 859 + { 860 + struct isert_cmd *isert_cmd = container_of(cmd, struct isert_cmd, 861 + iscsi_cmd); 862 + 863 + pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd); 864 + 865 + kfree(cmd->buf_ptr); 866 + kfree(cmd->tmr_req); 867 + 868 + kmem_cache_free(isert_cmd_cache, isert_cmd); 869 + } 870 + 871 + static struct iscsi_cmd 872 + *isert_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp) 873 + { 874 + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 875 + struct isert_cmd *isert_cmd; 876 + 877 + isert_cmd = kmem_cache_zalloc(isert_cmd_cache, gfp); 878 + if (!isert_cmd) { 879 + pr_err("Unable to allocate isert_cmd\n"); 880 + return NULL; 881 + } 882 + isert_cmd->conn = isert_conn; 883 + isert_cmd->iscsi_cmd.release_cmd = &isert_release_cmd; 884 + 885 + return &isert_cmd->iscsi_cmd; 886 + } 887 + 888 + static int 889 + isert_handle_scsi_cmd(struct isert_conn *isert_conn, 890 + struct isert_cmd *isert_cmd, struct iser_rx_desc *rx_desc, 891 + unsigned char *buf) 892 + { 893 + struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 894 + struct iscsi_conn *conn = isert_conn->conn; 895 + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 896 + struct scatterlist *sg; 897 + int imm_data, imm_data_len, unsol_data, sg_nents, rc; 898 + bool dump_payload = false; 899 + 900 + rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 901 + if (rc < 0) 902 + return rc; 903 + 904 + imm_data = cmd->immediate_data; 905 + imm_data_len = cmd->first_burst_len; 906 + unsol_data = cmd->unsolicited_data; 907 + 908 + rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 909 + if (rc < 0) { 910 + return 0; 911 + } else if (rc > 0) { 912 + dump_payload = true; 913 + goto sequence_cmd; 914 + } 915 + 916 + if (!imm_data) 917 + return 0; 918 + 919 + sg = &cmd->se_cmd.t_data_sg[0]; 920 + sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); 921 + 922 + pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", 923 + sg, sg_nents, &rx_desc->data[0], imm_data_len); 924 + 925 + sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); 926 + 927 + cmd->write_data_done += imm_data_len; 928 + 929 + if (cmd->write_data_done == cmd->se_cmd.data_length) { 930 + spin_lock_bh(&cmd->istate_lock); 931 + cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 932 + cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 933 + spin_unlock_bh(&cmd->istate_lock); 934 + } 935 + 936 + sequence_cmd: 937 + rc = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 938 + 939 + if (!rc && dump_payload == false && unsol_data) 940 + iscsit_set_unsoliticed_dataout(cmd); 941 + 942 + if (rc == CMDSN_ERROR_CANNOT_RECOVER) 943 + return iscsit_add_reject_from_cmd( 944 + ISCSI_REASON_PROTOCOL_ERROR, 945 + 1, 0, (unsigned char *)hdr, cmd); 946 + 947 + return 0; 948 + } 949 + 950 + static int 951 + isert_handle_iscsi_dataout(struct isert_conn *isert_conn, 952 + struct iser_rx_desc *rx_desc, unsigned char *buf) 953 + { 954 + struct scatterlist *sg_start; 955 + struct iscsi_conn *conn = isert_conn->conn; 956 + struct iscsi_cmd *cmd = NULL; 957 + struct iscsi_data *hdr = (struct iscsi_data *)buf; 958 + u32 unsol_data_len = ntoh24(hdr->dlength); 959 + int rc, sg_nents, sg_off, page_off; 960 + 961 + rc = iscsit_check_dataout_hdr(conn, buf, &cmd); 962 + if (rc < 0) 963 + return rc; 964 + else if (!cmd) 965 + return 0; 966 + /* 967 + * FIXME: Unexpected unsolicited_data out 968 + */ 969 + if (!cmd->unsolicited_data) { 970 + pr_err("Received unexpected solicited data payload\n"); 971 + dump_stack(); 972 + return -1; 973 + } 974 + 975 + pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n", 976 + unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length); 977 + 978 + sg_off = cmd->write_data_done / PAGE_SIZE; 979 + sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 980 + sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE)); 981 + page_off = cmd->write_data_done % PAGE_SIZE; 982 + /* 983 + * FIXME: Non page-aligned unsolicited_data out 984 + */ 985 + if (page_off) { 986 + pr_err("Received unexpected non-page aligned data payload\n"); 987 + dump_stack(); 988 + return -1; 989 + } 990 + pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n", 991 + sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len); 992 + 993 + sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], 994 + unsol_data_len); 995 + 996 + rc = iscsit_check_dataout_payload(cmd, hdr, false); 997 + if (rc < 0) 998 + return rc; 999 + 1000 + return 0; 1001 + } 1002 + 1003 + static int 1004 + isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, 1005 + uint32_t read_stag, uint64_t read_va, 1006 + uint32_t write_stag, uint64_t write_va) 1007 + { 1008 + struct iscsi_hdr *hdr = &rx_desc->iscsi_header; 1009 + struct iscsi_conn *conn = isert_conn->conn; 1010 + struct iscsi_cmd *cmd; 1011 + struct isert_cmd *isert_cmd; 1012 + int ret = -EINVAL; 1013 + u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); 1014 + 1015 + switch (opcode) { 1016 + case ISCSI_OP_SCSI_CMD: 1017 + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1018 + if (!cmd) 1019 + break; 1020 + 1021 + isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd); 1022 + isert_cmd->read_stag = read_stag; 1023 + isert_cmd->read_va = read_va; 1024 + isert_cmd->write_stag = write_stag; 1025 + isert_cmd->write_va = write_va; 1026 + 1027 + ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, 1028 + rx_desc, (unsigned char *)hdr); 1029 + break; 1030 + case ISCSI_OP_NOOP_OUT: 1031 + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1032 + if (!cmd) 1033 + break; 1034 + 1035 + ret = iscsit_handle_nop_out(conn, cmd, (unsigned char *)hdr); 1036 + break; 1037 + case ISCSI_OP_SCSI_DATA_OUT: 1038 + ret = isert_handle_iscsi_dataout(isert_conn, rx_desc, 1039 + (unsigned char *)hdr); 1040 + break; 1041 + case ISCSI_OP_SCSI_TMFUNC: 1042 + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1043 + if (!cmd) 1044 + break; 1045 + 1046 + ret = iscsit_handle_task_mgt_cmd(conn, cmd, 1047 + (unsigned char *)hdr); 1048 + break; 1049 + case ISCSI_OP_LOGOUT: 1050 + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1051 + if (!cmd) 1052 + break; 1053 + 1054 + ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1055 + if (ret > 0) 1056 + wait_for_completion_timeout(&conn->conn_logout_comp, 1057 + SECONDS_FOR_LOGOUT_COMP * 1058 + HZ); 1059 + break; 1060 + default: 1061 + pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); 1062 + dump_stack(); 1063 + break; 1064 + } 1065 + 1066 + return ret; 1067 + } 1068 + 1069 + static void 1070 + isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) 1071 + { 1072 + struct iser_hdr *iser_hdr = &rx_desc->iser_header; 1073 + uint64_t read_va = 0, write_va = 0; 1074 + uint32_t read_stag = 0, write_stag = 0; 1075 + int rc; 1076 + 1077 + switch (iser_hdr->flags & 0xF0) { 1078 + case ISCSI_CTRL: 1079 + if (iser_hdr->flags & ISER_RSV) { 1080 + read_stag = be32_to_cpu(iser_hdr->read_stag); 1081 + read_va = be64_to_cpu(iser_hdr->read_va); 1082 + pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n", 1083 + read_stag, (unsigned long long)read_va); 1084 + } 1085 + if (iser_hdr->flags & ISER_WSV) { 1086 + write_stag = be32_to_cpu(iser_hdr->write_stag); 1087 + write_va = be64_to_cpu(iser_hdr->write_va); 1088 + pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n", 1089 + write_stag, (unsigned long long)write_va); 1090 + } 1091 + 1092 + pr_debug("ISER ISCSI_CTRL PDU\n"); 1093 + break; 1094 + case ISER_HELLO: 1095 + pr_err("iSER Hello message\n"); 1096 + break; 1097 + default: 1098 + pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags); 1099 + break; 1100 + } 1101 + 1102 + rc = isert_rx_opcode(isert_conn, rx_desc, 1103 + read_stag, read_va, write_stag, write_va); 1104 + } 1105 + 1106 + static void 1107 + isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, 1108 + unsigned long xfer_len) 1109 + { 1110 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1111 + struct iscsi_hdr *hdr; 1112 + u64 rx_dma; 1113 + int rx_buflen, outstanding; 1114 + 1115 + if ((char *)desc == isert_conn->login_req_buf) { 1116 + rx_dma = isert_conn->login_req_dma; 1117 + rx_buflen = ISER_RX_LOGIN_SIZE; 1118 + pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", 1119 + rx_dma, rx_buflen); 1120 + } else { 1121 + rx_dma = desc->dma_addr; 1122 + rx_buflen = ISER_RX_PAYLOAD_SIZE; 1123 + pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", 1124 + rx_dma, rx_buflen); 1125 + } 1126 + 1127 + ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE); 1128 + 1129 + hdr = &desc->iscsi_header; 1130 + pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", 1131 + hdr->opcode, hdr->itt, hdr->flags, 1132 + (int)(xfer_len - ISER_HEADERS_LEN)); 1133 + 1134 + if ((char *)desc == isert_conn->login_req_buf) 1135 + isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN, 1136 + isert_conn); 1137 + else 1138 + isert_rx_do_work(desc, isert_conn); 1139 + 1140 + ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen, 1141 + DMA_FROM_DEVICE); 1142 + 1143 + isert_conn->post_recv_buf_count--; 1144 + pr_debug("iSERT: Decremented post_recv_buf_count: %d\n", 1145 + isert_conn->post_recv_buf_count); 1146 + 1147 + if ((char *)desc == isert_conn->login_req_buf) 1148 + return; 1149 + 1150 + outstanding = isert_conn->post_recv_buf_count; 1151 + if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) { 1152 + int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding, 1153 + ISERT_MIN_POSTED_RX); 1154 + err = isert_post_recv(isert_conn, count); 1155 + if (err) { 1156 + pr_err("isert_post_recv() count: %d failed, %d\n", 1157 + count, err); 1158 + } 1159 + } 1160 + } 1161 + 1162 + static void 1163 + isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1164 + { 1165 + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1166 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1167 + 1168 + pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n"); 1169 + 1170 + if (wr->sge) { 1171 + ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE); 1172 + wr->sge = NULL; 1173 + } 1174 + 1175 + kfree(wr->send_wr); 1176 + wr->send_wr = NULL; 1177 + 1178 + kfree(isert_cmd->ib_sge); 1179 + isert_cmd->ib_sge = NULL; 1180 + } 1181 + 1182 + static void 1183 + isert_put_cmd(struct isert_cmd *isert_cmd) 1184 + { 1185 + struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1186 + struct isert_conn *isert_conn = isert_cmd->conn; 1187 + struct iscsi_conn *conn; 1188 + 1189 + pr_debug("Entering isert_put_cmd: %p\n", isert_cmd); 1190 + 1191 + switch (cmd->iscsi_opcode) { 1192 + case ISCSI_OP_SCSI_CMD: 1193 + conn = isert_conn->conn; 1194 + 1195 + spin_lock_bh(&conn->cmd_lock); 1196 + if (!list_empty(&cmd->i_conn_node)) 1197 + list_del(&cmd->i_conn_node); 1198 + spin_unlock_bh(&conn->cmd_lock); 1199 + 1200 + if (cmd->data_direction == DMA_TO_DEVICE) 1201 + iscsit_stop_dataout_timer(cmd); 1202 + 1203 + isert_unmap_cmd(isert_cmd, isert_conn); 1204 + /* 1205 + * Fall-through 1206 + */ 1207 + case ISCSI_OP_SCSI_TMFUNC: 1208 + transport_generic_free_cmd(&cmd->se_cmd, 0); 1209 + break; 1210 + case ISCSI_OP_REJECT: 1211 + case ISCSI_OP_NOOP_OUT: 1212 + conn = isert_conn->conn; 1213 + 1214 + spin_lock_bh(&conn->cmd_lock); 1215 + if (!list_empty(&cmd->i_conn_node)) 1216 + list_del(&cmd->i_conn_node); 1217 + spin_unlock_bh(&conn->cmd_lock); 1218 + 1219 + /* 1220 + * Handle special case for REJECT when iscsi_add_reject*() has 1221 + * overwritten the original iscsi_opcode assignment, and the 1222 + * associated cmd->se_cmd needs to be released. 1223 + */ 1224 + if (cmd->se_cmd.se_tfo != NULL) { 1225 + transport_generic_free_cmd(&cmd->se_cmd, 0); 1226 + break; 1227 + } 1228 + /* 1229 + * Fall-through 1230 + */ 1231 + default: 1232 + isert_release_cmd(cmd); 1233 + break; 1234 + } 1235 + } 1236 + 1237 + static void 1238 + isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) 1239 + { 1240 + if (tx_desc->dma_addr != 0) { 1241 + pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n"); 1242 + ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, 1243 + ISER_HEADERS_LEN, DMA_TO_DEVICE); 1244 + tx_desc->dma_addr = 0; 1245 + } 1246 + } 1247 + 1248 + static void 1249 + isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, 1250 + struct ib_device *ib_dev) 1251 + { 1252 + if (isert_cmd->sense_buf_dma != 0) { 1253 + pr_debug("Calling ib_dma_unmap_single for isert_cmd->sense_buf_dma\n"); 1254 + ib_dma_unmap_single(ib_dev, isert_cmd->sense_buf_dma, 1255 + isert_cmd->sense_buf_len, DMA_TO_DEVICE); 1256 + isert_cmd->sense_buf_dma = 0; 1257 + } 1258 + 1259 + isert_unmap_tx_desc(tx_desc, ib_dev); 1260 + isert_put_cmd(isert_cmd); 1261 + } 1262 + 1263 + static void 1264 + isert_completion_rdma_read(struct iser_tx_desc *tx_desc, 1265 + struct isert_cmd *isert_cmd) 1266 + { 1267 + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1268 + struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1269 + struct se_cmd *se_cmd = &cmd->se_cmd; 1270 + struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device; 1271 + 1272 + iscsit_stop_dataout_timer(cmd); 1273 + 1274 + if (wr->sge) { 1275 + pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n"); 1276 + ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE); 1277 + wr->sge = NULL; 1278 + } 1279 + 1280 + if (isert_cmd->ib_sge) { 1281 + pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n"); 1282 + kfree(isert_cmd->ib_sge); 1283 + isert_cmd->ib_sge = NULL; 1284 + } 1285 + 1286 + cmd->write_data_done = se_cmd->data_length; 1287 + 1288 + pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n"); 1289 + spin_lock_bh(&cmd->istate_lock); 1290 + cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1291 + cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1292 + spin_unlock_bh(&cmd->istate_lock); 1293 + 1294 + target_execute_cmd(se_cmd); 1295 + } 1296 + 1297 + static void 1298 + isert_do_control_comp(struct work_struct *work) 1299 + { 1300 + struct isert_cmd *isert_cmd = container_of(work, 1301 + struct isert_cmd, comp_work); 1302 + struct isert_conn *isert_conn = isert_cmd->conn; 1303 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1304 + struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1305 + 1306 + switch (cmd->i_state) { 1307 + case ISTATE_SEND_TASKMGTRSP: 1308 + pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n"); 1309 + 1310 + atomic_dec(&isert_conn->post_send_buf_count); 1311 + iscsit_tmr_post_handler(cmd, cmd->conn); 1312 + 1313 + cmd->i_state = ISTATE_SENT_STATUS; 1314 + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); 1315 + break; 1316 + case ISTATE_SEND_REJECT: 1317 + pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n"); 1318 + atomic_dec(&isert_conn->post_send_buf_count); 1319 + 1320 + cmd->i_state = ISTATE_SENT_STATUS; 1321 + complete(&cmd->reject_comp); 1322 + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); 1323 + case ISTATE_SEND_LOGOUTRSP: 1324 + pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); 1325 + /* 1326 + * Call atomic_dec(&isert_conn->post_send_buf_count) 1327 + * from isert_free_conn() 1328 + */ 1329 + isert_conn->logout_posted = true; 1330 + iscsit_logout_post_handler(cmd, cmd->conn); 1331 + break; 1332 + default: 1333 + pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state); 1334 + dump_stack(); 1335 + break; 1336 + } 1337 + } 1338 + 1339 + static void 1340 + isert_response_completion(struct iser_tx_desc *tx_desc, 1341 + struct isert_cmd *isert_cmd, 1342 + struct isert_conn *isert_conn, 1343 + struct ib_device *ib_dev) 1344 + { 1345 + struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1346 + 1347 + if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || 1348 + cmd->i_state == ISTATE_SEND_LOGOUTRSP) { 1349 + isert_unmap_tx_desc(tx_desc, ib_dev); 1350 + 1351 + INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp); 1352 + queue_work(isert_comp_wq, &isert_cmd->comp_work); 1353 + return; 1354 + } 1355 + atomic_dec(&isert_conn->post_send_buf_count); 1356 + 1357 + cmd->i_state = ISTATE_SENT_STATUS; 1358 + isert_completion_put(tx_desc, isert_cmd, ib_dev); 1359 + } 1360 + 1361 + static void 1362 + isert_send_completion(struct iser_tx_desc *tx_desc, 1363 + struct isert_conn *isert_conn) 1364 + { 1365 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1366 + struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 1367 + struct isert_rdma_wr *wr; 1368 + 1369 + if (!isert_cmd) { 1370 + atomic_dec(&isert_conn->post_send_buf_count); 1371 + isert_unmap_tx_desc(tx_desc, ib_dev); 1372 + return; 1373 + } 1374 + wr = &isert_cmd->rdma_wr; 1375 + 1376 + switch (wr->iser_ib_op) { 1377 + case ISER_IB_RECV: 1378 + pr_err("isert_send_completion: Got ISER_IB_RECV\n"); 1379 + dump_stack(); 1380 + break; 1381 + case ISER_IB_SEND: 1382 + pr_debug("isert_send_completion: Got ISER_IB_SEND\n"); 1383 + isert_response_completion(tx_desc, isert_cmd, 1384 + isert_conn, ib_dev); 1385 + break; 1386 + case ISER_IB_RDMA_WRITE: 1387 + pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n"); 1388 + dump_stack(); 1389 + break; 1390 + case ISER_IB_RDMA_READ: 1391 + pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); 1392 + 1393 + atomic_dec(&isert_conn->post_send_buf_count); 1394 + isert_completion_rdma_read(tx_desc, isert_cmd); 1395 + break; 1396 + default: 1397 + pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op); 1398 + dump_stack(); 1399 + break; 1400 + } 1401 + } 1402 + 1403 + static void 1404 + isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) 1405 + { 1406 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1407 + 1408 + if (tx_desc) { 1409 + struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 1410 + 1411 + if (!isert_cmd) 1412 + isert_unmap_tx_desc(tx_desc, ib_dev); 1413 + else 1414 + isert_completion_put(tx_desc, isert_cmd, ib_dev); 1415 + } 1416 + 1417 + if (isert_conn->post_recv_buf_count == 0 && 1418 + atomic_read(&isert_conn->post_send_buf_count) == 0) { 1419 + pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 1420 + pr_debug("Calling wake_up from isert_cq_comp_err\n"); 1421 + 1422 + isert_conn->state = ISER_CONN_TERMINATING; 1423 + wake_up(&isert_conn->conn_wait_comp_err); 1424 + } 1425 + } 1426 + 1427 + static void 1428 + isert_cq_tx_work(struct work_struct *work) 1429 + { 1430 + struct isert_cq_desc *cq_desc = container_of(work, 1431 + struct isert_cq_desc, cq_tx_work); 1432 + struct isert_device *device = cq_desc->device; 1433 + int cq_index = cq_desc->cq_index; 1434 + struct ib_cq *tx_cq = device->dev_tx_cq[cq_index]; 1435 + struct isert_conn *isert_conn; 1436 + struct iser_tx_desc *tx_desc; 1437 + struct ib_wc wc; 1438 + 1439 + while (ib_poll_cq(tx_cq, 1, &wc) == 1) { 1440 + tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id; 1441 + isert_conn = wc.qp->qp_context; 1442 + 1443 + if (wc.status == IB_WC_SUCCESS) { 1444 + isert_send_completion(tx_desc, isert_conn); 1445 + } else { 1446 + pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); 1447 + pr_debug("TX wc.status: 0x%08x\n", wc.status); 1448 + atomic_dec(&isert_conn->post_send_buf_count); 1449 + isert_cq_comp_err(tx_desc, isert_conn); 1450 + } 1451 + } 1452 + 1453 + ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP); 1454 + } 1455 + 1456 + static void 1457 + isert_cq_tx_callback(struct ib_cq *cq, void *context) 1458 + { 1459 + struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 1460 + 1461 + INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work); 1462 + queue_work(isert_comp_wq, &cq_desc->cq_tx_work); 1463 + } 1464 + 1465 + static void 1466 + isert_cq_rx_work(struct work_struct *work) 1467 + { 1468 + struct isert_cq_desc *cq_desc = container_of(work, 1469 + struct isert_cq_desc, cq_rx_work); 1470 + struct isert_device *device = cq_desc->device; 1471 + int cq_index = cq_desc->cq_index; 1472 + struct ib_cq *rx_cq = device->dev_rx_cq[cq_index]; 1473 + struct isert_conn *isert_conn; 1474 + struct iser_rx_desc *rx_desc; 1475 + struct ib_wc wc; 1476 + unsigned long xfer_len; 1477 + 1478 + while (ib_poll_cq(rx_cq, 1, &wc) == 1) { 1479 + rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id; 1480 + isert_conn = wc.qp->qp_context; 1481 + 1482 + if (wc.status == IB_WC_SUCCESS) { 1483 + xfer_len = (unsigned long)wc.byte_len; 1484 + isert_rx_completion(rx_desc, isert_conn, xfer_len); 1485 + } else { 1486 + pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); 1487 + if (wc.status != IB_WC_WR_FLUSH_ERR) 1488 + pr_debug("RX wc.status: 0x%08x\n", wc.status); 1489 + 1490 + isert_conn->post_recv_buf_count--; 1491 + isert_cq_comp_err(NULL, isert_conn); 1492 + } 1493 + } 1494 + 1495 + ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP); 1496 + } 1497 + 1498 + static void 1499 + isert_cq_rx_callback(struct ib_cq *cq, void *context) 1500 + { 1501 + struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 1502 + 1503 + INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work); 1504 + queue_work(isert_rx_wq, &cq_desc->cq_rx_work); 1505 + } 1506 + 1507 + static int 1508 + isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) 1509 + { 1510 + struct ib_send_wr *wr_failed; 1511 + int ret; 1512 + 1513 + atomic_inc(&isert_conn->post_send_buf_count); 1514 + 1515 + ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr, 1516 + &wr_failed); 1517 + if (ret) { 1518 + pr_err("ib_post_send failed with %d\n", ret); 1519 + atomic_dec(&isert_conn->post_send_buf_count); 1520 + return ret; 1521 + } 1522 + return ret; 1523 + } 1524 + 1525 + static int 1526 + isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1527 + { 1528 + struct isert_cmd *isert_cmd = container_of(cmd, 1529 + struct isert_cmd, iscsi_cmd); 1530 + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1531 + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1532 + struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) 1533 + &isert_cmd->tx_desc.iscsi_header; 1534 + 1535 + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1536 + iscsit_build_rsp_pdu(cmd, conn, true, hdr); 1537 + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1538 + /* 1539 + * Attach SENSE DATA payload to iSCSI Response PDU 1540 + */ 1541 + if (cmd->se_cmd.sense_buffer && 1542 + ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 1543 + (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 1544 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1545 + struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1546 + u32 padding, sense_len; 1547 + 1548 + put_unaligned_be16(cmd->se_cmd.scsi_sense_length, 1549 + cmd->sense_buffer); 1550 + cmd->se_cmd.scsi_sense_length += sizeof(__be16); 1551 + 1552 + padding = -(cmd->se_cmd.scsi_sense_length) & 3; 1553 + hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); 1554 + sense_len = cmd->se_cmd.scsi_sense_length + padding; 1555 + 1556 + isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev, 1557 + (void *)cmd->sense_buffer, sense_len, 1558 + DMA_TO_DEVICE); 1559 + 1560 + isert_cmd->sense_buf_len = sense_len; 1561 + tx_dsg->addr = isert_cmd->sense_buf_dma; 1562 + tx_dsg->length = sense_len; 1563 + tx_dsg->lkey = isert_conn->conn_mr->lkey; 1564 + isert_cmd->tx_desc.num_sge = 2; 1565 + } 1566 + 1567 + isert_init_send_wr(isert_cmd, send_wr); 1568 + 1569 + pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1570 + 1571 + return isert_post_response(isert_conn, isert_cmd); 1572 + } 1573 + 1574 + static int 1575 + isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 1576 + bool nopout_response) 1577 + { 1578 + struct isert_cmd *isert_cmd = container_of(cmd, 1579 + struct isert_cmd, iscsi_cmd); 1580 + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1581 + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1582 + 1583 + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1584 + iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *) 1585 + &isert_cmd->tx_desc.iscsi_header, 1586 + nopout_response); 1587 + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1588 + isert_init_send_wr(isert_cmd, send_wr); 1589 + 1590 + pr_debug("Posting NOPIN Reponse IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1591 + 1592 + return isert_post_response(isert_conn, isert_cmd); 1593 + } 1594 + 1595 + static int 1596 + isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1597 + { 1598 + struct isert_cmd *isert_cmd = container_of(cmd, 1599 + struct isert_cmd, iscsi_cmd); 1600 + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1601 + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1602 + 1603 + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1604 + iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) 1605 + &isert_cmd->tx_desc.iscsi_header); 1606 + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1607 + isert_init_send_wr(isert_cmd, send_wr); 1608 + 1609 + pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1610 + 1611 + return isert_post_response(isert_conn, isert_cmd); 1612 + } 1613 + 1614 + static int 1615 + isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1616 + { 1617 + struct isert_cmd *isert_cmd = container_of(cmd, 1618 + struct isert_cmd, iscsi_cmd); 1619 + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1620 + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1621 + 1622 + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1623 + iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) 1624 + &isert_cmd->tx_desc.iscsi_header); 1625 + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1626 + isert_init_send_wr(isert_cmd, send_wr); 1627 + 1628 + pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1629 + 1630 + return isert_post_response(isert_conn, isert_cmd); 1631 + } 1632 + 1633 + static int 1634 + isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1635 + { 1636 + struct isert_cmd *isert_cmd = container_of(cmd, 1637 + struct isert_cmd, iscsi_cmd); 1638 + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1639 + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1640 + 1641 + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1642 + iscsit_build_reject(cmd, conn, (struct iscsi_reject *) 1643 + &isert_cmd->tx_desc.iscsi_header); 1644 + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1645 + isert_init_send_wr(isert_cmd, send_wr); 1646 + 1647 + pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1648 + 1649 + return isert_post_response(isert_conn, isert_cmd); 1650 + } 1651 + 1652 + static int 1653 + isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1654 + struct ib_sge *ib_sge, struct ib_send_wr *send_wr, 1655 + u32 data_left, u32 offset) 1656 + { 1657 + struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1658 + struct scatterlist *sg_start, *tmp_sg; 1659 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1660 + u32 sg_off, page_off; 1661 + int i = 0, sg_nents; 1662 + 1663 + sg_off = offset / PAGE_SIZE; 1664 + sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1665 + sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge); 1666 + page_off = offset % PAGE_SIZE; 1667 + 1668 + send_wr->sg_list = ib_sge; 1669 + send_wr->num_sge = sg_nents; 1670 + send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 1671 + /* 1672 + * Perform mapping of TCM scatterlist memory ib_sge dma_addr. 1673 + */ 1674 + for_each_sg(sg_start, tmp_sg, sg_nents, i) { 1675 + pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n", 1676 + (unsigned long long)tmp_sg->dma_address, 1677 + tmp_sg->length, page_off); 1678 + 1679 + ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; 1680 + ib_sge->length = min_t(u32, data_left, 1681 + ib_sg_dma_len(ib_dev, tmp_sg) - page_off); 1682 + ib_sge->lkey = isert_conn->conn_mr->lkey; 1683 + 1684 + pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u\n", 1685 + ib_sge->addr, ib_sge->length); 1686 + page_off = 0; 1687 + data_left -= ib_sge->length; 1688 + ib_sge++; 1689 + pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge); 1690 + } 1691 + 1692 + pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", 1693 + send_wr->sg_list, send_wr->num_sge); 1694 + 1695 + return sg_nents; 1696 + } 1697 + 1698 + static int 1699 + isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1700 + { 1701 + struct se_cmd *se_cmd = &cmd->se_cmd; 1702 + struct isert_cmd *isert_cmd = container_of(cmd, 1703 + struct isert_cmd, iscsi_cmd); 1704 + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1705 + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1706 + struct ib_send_wr *wr_failed, *send_wr; 1707 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1708 + struct ib_sge *ib_sge; 1709 + struct scatterlist *sg; 1710 + u32 offset = 0, data_len, data_left, rdma_write_max; 1711 + int rc, ret = 0, count, sg_nents, i, ib_sge_cnt; 1712 + 1713 + pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length); 1714 + 1715 + sg = &se_cmd->t_data_sg[0]; 1716 + sg_nents = se_cmd->t_data_nents; 1717 + 1718 + count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE); 1719 + if (unlikely(!count)) { 1720 + pr_err("Unable to map put_datain SGs\n"); 1721 + return -EINVAL; 1722 + } 1723 + wr->sge = sg; 1724 + wr->num_sge = sg_nents; 1725 + pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n", 1726 + count, sg, sg_nents); 1727 + 1728 + ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); 1729 + if (!ib_sge) { 1730 + pr_warn("Unable to allocate datain ib_sge\n"); 1731 + ret = -ENOMEM; 1732 + goto unmap_sg; 1733 + } 1734 + isert_cmd->ib_sge = ib_sge; 1735 + 1736 + pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n", 1737 + ib_sge, se_cmd->t_data_nents); 1738 + 1739 + wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); 1740 + wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 1741 + GFP_KERNEL); 1742 + if (!wr->send_wr) { 1743 + pr_err("Unable to allocate wr->send_wr\n"); 1744 + ret = -ENOMEM; 1745 + goto unmap_sg; 1746 + } 1747 + pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n", 1748 + wr->send_wr, wr->send_wr_num); 1749 + 1750 + iscsit_increment_maxcmdsn(cmd, conn->sess); 1751 + cmd->stat_sn = conn->stat_sn++; 1752 + 1753 + wr->isert_cmd = isert_cmd; 1754 + rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 1755 + data_left = se_cmd->data_length; 1756 + 1757 + for (i = 0; i < wr->send_wr_num; i++) { 1758 + send_wr = &isert_cmd->rdma_wr.send_wr[i]; 1759 + data_len = min(data_left, rdma_write_max); 1760 + 1761 + send_wr->opcode = IB_WR_RDMA_WRITE; 1762 + send_wr->send_flags = 0; 1763 + send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset; 1764 + send_wr->wr.rdma.rkey = isert_cmd->read_stag; 1765 + 1766 + ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, 1767 + send_wr, data_len, offset); 1768 + ib_sge += ib_sge_cnt; 1769 + 1770 + if (i + 1 == wr->send_wr_num) 1771 + send_wr->next = &isert_cmd->tx_desc.send_wr; 1772 + else 1773 + send_wr->next = &wr->send_wr[i + 1]; 1774 + 1775 + offset += data_len; 1776 + data_left -= data_len; 1777 + } 1778 + /* 1779 + * Build isert_conn->tx_desc for iSCSI response PDU and attach 1780 + */ 1781 + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1782 + iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *) 1783 + &isert_cmd->tx_desc.iscsi_header); 1784 + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1785 + isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr); 1786 + 1787 + atomic_inc(&isert_conn->post_send_buf_count); 1788 + 1789 + rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 1790 + if (rc) { 1791 + pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 1792 + atomic_dec(&isert_conn->post_send_buf_count); 1793 + } 1794 + pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n"); 1795 + return 1; 1796 + 1797 + unmap_sg: 1798 + ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE); 1799 + return ret; 1800 + } 1801 + 1802 + static int 1803 + isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 1804 + { 1805 + struct se_cmd *se_cmd = &cmd->se_cmd; 1806 + struct isert_cmd *isert_cmd = container_of(cmd, 1807 + struct isert_cmd, iscsi_cmd); 1808 + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1809 + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1810 + struct ib_send_wr *wr_failed, *send_wr; 1811 + struct ib_sge *ib_sge; 1812 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1813 + struct scatterlist *sg_start; 1814 + u32 sg_off, sg_nents, page_off, va_offset = 0; 1815 + u32 offset = 0, data_len, data_left, rdma_write_max; 1816 + int rc, ret = 0, count, i, ib_sge_cnt; 1817 + 1818 + pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n", 1819 + se_cmd->data_length, cmd->write_data_done); 1820 + 1821 + sg_off = cmd->write_data_done / PAGE_SIZE; 1822 + sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1823 + page_off = cmd->write_data_done % PAGE_SIZE; 1824 + 1825 + pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n", 1826 + sg_off, sg_start, page_off); 1827 + 1828 + data_left = se_cmd->data_length - cmd->write_data_done; 1829 + sg_nents = se_cmd->t_data_nents - sg_off; 1830 + 1831 + pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n", 1832 + data_left, sg_nents); 1833 + 1834 + count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE); 1835 + if (unlikely(!count)) { 1836 + pr_err("Unable to map get_dataout SGs\n"); 1837 + return -EINVAL; 1838 + } 1839 + wr->sge = sg_start; 1840 + wr->num_sge = sg_nents; 1841 + pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n", 1842 + count, sg_start, sg_nents); 1843 + 1844 + ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); 1845 + if (!ib_sge) { 1846 + pr_warn("Unable to allocate dataout ib_sge\n"); 1847 + ret = -ENOMEM; 1848 + goto unmap_sg; 1849 + } 1850 + isert_cmd->ib_sge = ib_sge; 1851 + 1852 + pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n", 1853 + ib_sge, sg_nents); 1854 + 1855 + wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); 1856 + wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 1857 + GFP_KERNEL); 1858 + if (!wr->send_wr) { 1859 + pr_debug("Unable to allocate wr->send_wr\n"); 1860 + ret = -ENOMEM; 1861 + goto unmap_sg; 1862 + } 1863 + pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n", 1864 + wr->send_wr, wr->send_wr_num); 1865 + 1866 + isert_cmd->tx_desc.isert_cmd = isert_cmd; 1867 + 1868 + wr->iser_ib_op = ISER_IB_RDMA_READ; 1869 + wr->isert_cmd = isert_cmd; 1870 + rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 1871 + offset = cmd->write_data_done; 1872 + 1873 + for (i = 0; i < wr->send_wr_num; i++) { 1874 + send_wr = &isert_cmd->rdma_wr.send_wr[i]; 1875 + data_len = min(data_left, rdma_write_max); 1876 + 1877 + send_wr->opcode = IB_WR_RDMA_READ; 1878 + send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset; 1879 + send_wr->wr.rdma.rkey = isert_cmd->write_stag; 1880 + 1881 + ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, 1882 + send_wr, data_len, offset); 1883 + ib_sge += ib_sge_cnt; 1884 + 1885 + if (i + 1 == wr->send_wr_num) 1886 + send_wr->send_flags = IB_SEND_SIGNALED; 1887 + else 1888 + send_wr->next = &wr->send_wr[i + 1]; 1889 + 1890 + offset += data_len; 1891 + va_offset += data_len; 1892 + data_left -= data_len; 1893 + } 1894 + 1895 + atomic_inc(&isert_conn->post_send_buf_count); 1896 + 1897 + rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 1898 + if (rc) { 1899 + pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 1900 + atomic_dec(&isert_conn->post_send_buf_count); 1901 + } 1902 + pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n"); 1903 + return 0; 1904 + 1905 + unmap_sg: 1906 + ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE); 1907 + return ret; 1908 + } 1909 + 1910 + static int 1911 + isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 1912 + { 1913 + int ret; 1914 + 1915 + switch (state) { 1916 + case ISTATE_SEND_NOPIN_WANT_RESPONSE: 1917 + ret = isert_put_nopin(cmd, conn, false); 1918 + break; 1919 + default: 1920 + pr_err("Unknown immediate state: 0x%02x\n", state); 1921 + ret = -EINVAL; 1922 + break; 1923 + } 1924 + 1925 + return ret; 1926 + } 1927 + 1928 + static int 1929 + isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 1930 + { 1931 + int ret; 1932 + 1933 + switch (state) { 1934 + case ISTATE_SEND_LOGOUTRSP: 1935 + ret = isert_put_logout_rsp(cmd, conn); 1936 + if (!ret) { 1937 + pr_debug("Returning iSER Logout -EAGAIN\n"); 1938 + ret = -EAGAIN; 1939 + } 1940 + break; 1941 + case ISTATE_SEND_NOPIN: 1942 + ret = isert_put_nopin(cmd, conn, true); 1943 + break; 1944 + case ISTATE_SEND_TASKMGTRSP: 1945 + ret = isert_put_tm_rsp(cmd, conn); 1946 + break; 1947 + case ISTATE_SEND_REJECT: 1948 + ret = isert_put_reject(cmd, conn); 1949 + break; 1950 + case ISTATE_SEND_STATUS: 1951 + /* 1952 + * Special case for sending non GOOD SCSI status from TX thread 1953 + * context during pre se_cmd excecution failure. 1954 + */ 1955 + ret = isert_put_response(conn, cmd); 1956 + break; 1957 + default: 1958 + pr_err("Unknown response state: 0x%02x\n", state); 1959 + ret = -EINVAL; 1960 + break; 1961 + } 1962 + 1963 + return ret; 1964 + } 1965 + 1966 + static int 1967 + isert_setup_np(struct iscsi_np *np, 1968 + struct __kernel_sockaddr_storage *ksockaddr) 1969 + { 1970 + struct isert_np *isert_np; 1971 + struct rdma_cm_id *isert_lid; 1972 + struct sockaddr *sa; 1973 + int ret; 1974 + 1975 + isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); 1976 + if (!isert_np) { 1977 + pr_err("Unable to allocate struct isert_np\n"); 1978 + return -ENOMEM; 1979 + } 1980 + init_waitqueue_head(&isert_np->np_accept_wq); 1981 + mutex_init(&isert_np->np_accept_mutex); 1982 + INIT_LIST_HEAD(&isert_np->np_accept_list); 1983 + init_completion(&isert_np->np_login_comp); 1984 + 1985 + sa = (struct sockaddr *)ksockaddr; 1986 + pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa); 1987 + /* 1988 + * Setup the np->np_sockaddr from the passed sockaddr setup 1989 + * in iscsi_target_configfs.c code.. 1990 + */ 1991 + memcpy(&np->np_sockaddr, ksockaddr, 1992 + sizeof(struct __kernel_sockaddr_storage)); 1993 + 1994 + isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP, 1995 + IB_QPT_RC); 1996 + if (IS_ERR(isert_lid)) { 1997 + pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n", 1998 + PTR_ERR(isert_lid)); 1999 + ret = PTR_ERR(isert_lid); 2000 + goto out; 2001 + } 2002 + 2003 + ret = rdma_bind_addr(isert_lid, sa); 2004 + if (ret) { 2005 + pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret); 2006 + goto out_lid; 2007 + } 2008 + 2009 + ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG); 2010 + if (ret) { 2011 + pr_err("rdma_listen() for isert_lid failed: %d\n", ret); 2012 + goto out_lid; 2013 + } 2014 + 2015 + isert_np->np_cm_id = isert_lid; 2016 + np->np_context = isert_np; 2017 + pr_debug("Setup isert_lid->context: %p\n", isert_lid->context); 2018 + 2019 + return 0; 2020 + 2021 + out_lid: 2022 + rdma_destroy_id(isert_lid); 2023 + out: 2024 + kfree(isert_np); 2025 + return ret; 2026 + } 2027 + 2028 + static int 2029 + isert_check_accept_queue(struct isert_np *isert_np) 2030 + { 2031 + int empty; 2032 + 2033 + mutex_lock(&isert_np->np_accept_mutex); 2034 + empty = list_empty(&isert_np->np_accept_list); 2035 + mutex_unlock(&isert_np->np_accept_mutex); 2036 + 2037 + return empty; 2038 + } 2039 + 2040 + static int 2041 + isert_rdma_accept(struct isert_conn *isert_conn) 2042 + { 2043 + struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; 2044 + struct rdma_conn_param cp; 2045 + int ret; 2046 + 2047 + memset(&cp, 0, sizeof(struct rdma_conn_param)); 2048 + cp.responder_resources = isert_conn->responder_resources; 2049 + cp.initiator_depth = isert_conn->initiator_depth; 2050 + cp.retry_count = 7; 2051 + cp.rnr_retry_count = 7; 2052 + 2053 + pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n"); 2054 + 2055 + ret = rdma_accept(cm_id, &cp); 2056 + if (ret) { 2057 + pr_err("rdma_accept() failed with: %d\n", ret); 2058 + return ret; 2059 + } 2060 + 2061 + pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n"); 2062 + 2063 + return 0; 2064 + } 2065 + 2066 + static int 2067 + isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) 2068 + { 2069 + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2070 + int ret; 2071 + 2072 + pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn); 2073 + 2074 + ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); 2075 + if (ret) 2076 + return ret; 2077 + 2078 + pr_debug("isert_get_login_rx processing login->req: %p\n", login->req); 2079 + return 0; 2080 + } 2081 + 2082 + static void 2083 + isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, 2084 + struct isert_conn *isert_conn) 2085 + { 2086 + struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; 2087 + struct rdma_route *cm_route = &cm_id->route; 2088 + struct sockaddr_in *sock_in; 2089 + struct sockaddr_in6 *sock_in6; 2090 + 2091 + conn->login_family = np->np_sockaddr.ss_family; 2092 + 2093 + if (np->np_sockaddr.ss_family == AF_INET6) { 2094 + sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr; 2095 + snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", 2096 + &sock_in6->sin6_addr.in6_u); 2097 + conn->login_port = ntohs(sock_in6->sin6_port); 2098 + 2099 + sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr; 2100 + snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c", 2101 + &sock_in6->sin6_addr.in6_u); 2102 + conn->local_port = ntohs(sock_in6->sin6_port); 2103 + } else { 2104 + sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr; 2105 + sprintf(conn->login_ip, "%pI4", 2106 + &sock_in->sin_addr.s_addr); 2107 + conn->login_port = ntohs(sock_in->sin_port); 2108 + 2109 + sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr; 2110 + sprintf(conn->local_ip, "%pI4", 2111 + &sock_in->sin_addr.s_addr); 2112 + conn->local_port = ntohs(sock_in->sin_port); 2113 + } 2114 + } 2115 + 2116 + static int 2117 + isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) 2118 + { 2119 + struct isert_np *isert_np = (struct isert_np *)np->np_context; 2120 + struct isert_conn *isert_conn; 2121 + int max_accept = 0, ret; 2122 + 2123 + accept_wait: 2124 + ret = wait_event_interruptible(isert_np->np_accept_wq, 2125 + !isert_check_accept_queue(isert_np) || 2126 + np->np_thread_state == ISCSI_NP_THREAD_RESET); 2127 + if (max_accept > 5) 2128 + return -ENODEV; 2129 + 2130 + spin_lock_bh(&np->np_thread_lock); 2131 + if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 2132 + spin_unlock_bh(&np->np_thread_lock); 2133 + pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); 2134 + return -ENODEV; 2135 + } 2136 + spin_unlock_bh(&np->np_thread_lock); 2137 + 2138 + mutex_lock(&isert_np->np_accept_mutex); 2139 + if (list_empty(&isert_np->np_accept_list)) { 2140 + mutex_unlock(&isert_np->np_accept_mutex); 2141 + max_accept++; 2142 + goto accept_wait; 2143 + } 2144 + isert_conn = list_first_entry(&isert_np->np_accept_list, 2145 + struct isert_conn, conn_accept_node); 2146 + list_del_init(&isert_conn->conn_accept_node); 2147 + mutex_unlock(&isert_np->np_accept_mutex); 2148 + 2149 + conn->context = isert_conn; 2150 + isert_conn->conn = conn; 2151 + max_accept = 0; 2152 + 2153 + ret = isert_rdma_post_recvl(isert_conn); 2154 + if (ret) 2155 + return ret; 2156 + 2157 + ret = isert_rdma_accept(isert_conn); 2158 + if (ret) 2159 + return ret; 2160 + 2161 + isert_set_conn_info(np, conn, isert_conn); 2162 + 2163 + pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn); 2164 + return 0; 2165 + } 2166 + 2167 + static void 2168 + isert_free_np(struct iscsi_np *np) 2169 + { 2170 + struct isert_np *isert_np = (struct isert_np *)np->np_context; 2171 + 2172 + rdma_destroy_id(isert_np->np_cm_id); 2173 + 2174 + np->np_context = NULL; 2175 + kfree(isert_np); 2176 + } 2177 + 2178 + static void isert_free_conn(struct iscsi_conn *conn) 2179 + { 2180 + struct isert_conn *isert_conn = conn->context; 2181 + 2182 + pr_debug("isert_free_conn: Starting \n"); 2183 + /* 2184 + * Decrement post_send_buf_count for special case when called 2185 + * from isert_do_control_comp() -> iscsit_logout_post_handler() 2186 + */ 2187 + if (isert_conn->logout_posted) 2188 + atomic_dec(&isert_conn->post_send_buf_count); 2189 + 2190 + if (isert_conn->conn_cm_id) 2191 + rdma_disconnect(isert_conn->conn_cm_id); 2192 + /* 2193 + * Only wait for conn_wait_comp_err if the isert_conn made it 2194 + * into full feature phase.. 2195 + */ 2196 + if (isert_conn->state > ISER_CONN_INIT) { 2197 + pr_debug("isert_free_conn: Before wait_event comp_err %d\n", 2198 + isert_conn->state); 2199 + wait_event(isert_conn->conn_wait_comp_err, 2200 + isert_conn->state == ISER_CONN_TERMINATING); 2201 + pr_debug("isert_free_conn: After wait_event #1 >>>>>>>>>>>>\n"); 2202 + } 2203 + 2204 + pr_debug("isert_free_conn: wait_event conn_wait %d\n", isert_conn->state); 2205 + wait_event(isert_conn->conn_wait, isert_conn->state == ISER_CONN_DOWN); 2206 + pr_debug("isert_free_conn: After wait_event #2 >>>>>>>>>>>>>>>>>>>>\n"); 2207 + 2208 + isert_put_conn(isert_conn); 2209 + } 2210 + 2211 + static struct iscsit_transport iser_target_transport = { 2212 + .name = "IB/iSER", 2213 + .transport_type = ISCSI_INFINIBAND, 2214 + .owner = THIS_MODULE, 2215 + .iscsit_setup_np = isert_setup_np, 2216 + .iscsit_accept_np = isert_accept_np, 2217 + .iscsit_free_np = isert_free_np, 2218 + .iscsit_free_conn = isert_free_conn, 2219 + .iscsit_alloc_cmd = isert_alloc_cmd, 2220 + .iscsit_get_login_rx = isert_get_login_rx, 2221 + .iscsit_put_login_tx = isert_put_login_tx, 2222 + .iscsit_immediate_queue = isert_immediate_queue, 2223 + .iscsit_response_queue = isert_response_queue, 2224 + .iscsit_get_dataout = isert_get_dataout, 2225 + .iscsit_queue_data_in = isert_put_datain, 2226 + .iscsit_queue_status = isert_put_response, 2227 + }; 2228 + 2229 + static int __init isert_init(void) 2230 + { 2231 + int ret; 2232 + 2233 + isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0); 2234 + if (!isert_rx_wq) { 2235 + pr_err("Unable to allocate isert_rx_wq\n"); 2236 + return -ENOMEM; 2237 + } 2238 + 2239 + isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); 2240 + if (!isert_comp_wq) { 2241 + pr_err("Unable to allocate isert_comp_wq\n"); 2242 + ret = -ENOMEM; 2243 + goto destroy_rx_wq; 2244 + } 2245 + 2246 + isert_cmd_cache = kmem_cache_create("isert_cmd_cache", 2247 + sizeof(struct isert_cmd), __alignof__(struct isert_cmd), 2248 + 0, NULL); 2249 + if (!isert_cmd_cache) { 2250 + pr_err("Unable to create isert_cmd_cache\n"); 2251 + ret = -ENOMEM; 2252 + goto destroy_tx_cq; 2253 + } 2254 + 2255 + iscsit_register_transport(&iser_target_transport); 2256 + pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n"); 2257 + return 0; 2258 + 2259 + destroy_tx_cq: 2260 + destroy_workqueue(isert_comp_wq); 2261 + destroy_rx_wq: 2262 + destroy_workqueue(isert_rx_wq); 2263 + return ret; 2264 + } 2265 + 2266 + static void __exit isert_exit(void) 2267 + { 2268 + kmem_cache_destroy(isert_cmd_cache); 2269 + destroy_workqueue(isert_comp_wq); 2270 + destroy_workqueue(isert_rx_wq); 2271 + iscsit_unregister_transport(&iser_target_transport); 2272 + pr_debug("iSER_TARGET[0] - Released iser_target_transport\n"); 2273 + } 2274 + 2275 + MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); 2276 + MODULE_VERSION("0.1"); 2277 + MODULE_AUTHOR("nab@Linux-iSCSI.org"); 2278 + MODULE_LICENSE("GPL"); 2279 + 2280 + module_init(isert_init); 2281 + module_exit(isert_exit);
+138
drivers/infiniband/ulp/isert/ib_isert.h
··· 1 + #include <linux/socket.h> 2 + #include <linux/in.h> 3 + #include <linux/in6.h> 4 + #include <rdma/ib_verbs.h> 5 + #include <rdma/rdma_cm.h> 6 + 7 + #define ISERT_RDMA_LISTEN_BACKLOG 10 8 + 9 + enum isert_desc_type { 10 + ISCSI_TX_CONTROL, 11 + ISCSI_TX_DATAIN 12 + }; 13 + 14 + enum iser_ib_op_code { 15 + ISER_IB_RECV, 16 + ISER_IB_SEND, 17 + ISER_IB_RDMA_WRITE, 18 + ISER_IB_RDMA_READ, 19 + }; 20 + 21 + enum iser_conn_state { 22 + ISER_CONN_INIT, 23 + ISER_CONN_UP, 24 + ISER_CONN_TERMINATING, 25 + ISER_CONN_DOWN, 26 + }; 27 + 28 + struct iser_rx_desc { 29 + struct iser_hdr iser_header; 30 + struct iscsi_hdr iscsi_header; 31 + char data[ISER_RECV_DATA_SEG_LEN]; 32 + u64 dma_addr; 33 + struct ib_sge rx_sg; 34 + char pad[ISER_RX_PAD_SIZE]; 35 + } __packed; 36 + 37 + struct iser_tx_desc { 38 + struct iser_hdr iser_header; 39 + struct iscsi_hdr iscsi_header; 40 + enum isert_desc_type type; 41 + u64 dma_addr; 42 + struct ib_sge tx_sg[2]; 43 + int num_sge; 44 + struct isert_cmd *isert_cmd; 45 + struct ib_send_wr send_wr; 46 + } __packed; 47 + 48 + struct isert_rdma_wr { 49 + struct list_head wr_list; 50 + struct isert_cmd *isert_cmd; 51 + enum iser_ib_op_code iser_ib_op; 52 + struct ib_sge *ib_sge; 53 + int num_sge; 54 + struct scatterlist *sge; 55 + int send_wr_num; 56 + struct ib_send_wr *send_wr; 57 + }; 58 + 59 + struct isert_cmd { 60 + uint32_t read_stag; 61 + uint32_t write_stag; 62 + uint64_t read_va; 63 + uint64_t write_va; 64 + u64 sense_buf_dma; 65 + u32 sense_buf_len; 66 + u32 read_va_off; 67 + u32 write_va_off; 68 + u32 rdma_wr_num; 69 + struct isert_conn *conn; 70 + struct iscsi_cmd iscsi_cmd; 71 + struct ib_sge *ib_sge; 72 + struct iser_tx_desc tx_desc; 73 + struct isert_rdma_wr rdma_wr; 74 + struct work_struct comp_work; 75 + }; 76 + 77 + struct isert_device; 78 + 79 + struct isert_conn { 80 + enum iser_conn_state state; 81 + bool logout_posted; 82 + int post_recv_buf_count; 83 + atomic_t post_send_buf_count; 84 + u32 responder_resources; 85 + u32 initiator_depth; 86 + u32 max_sge; 87 + char *login_buf; 88 + char *login_req_buf; 89 + char *login_rsp_buf; 90 + u64 login_req_dma; 91 + u64 login_rsp_dma; 92 + unsigned int conn_rx_desc_head; 93 + struct iser_rx_desc *conn_rx_descs; 94 + struct ib_recv_wr conn_rx_wr[ISERT_MIN_POSTED_RX]; 95 + struct iscsi_conn *conn; 96 + struct list_head conn_accept_node; 97 + struct completion conn_login_comp; 98 + struct iser_tx_desc conn_login_tx_desc; 99 + struct rdma_cm_id *conn_cm_id; 100 + struct ib_pd *conn_pd; 101 + struct ib_mr *conn_mr; 102 + struct ib_qp *conn_qp; 103 + struct isert_device *conn_device; 104 + struct work_struct conn_logout_work; 105 + wait_queue_head_t conn_wait; 106 + wait_queue_head_t conn_wait_comp_err; 107 + struct kref conn_kref; 108 + }; 109 + 110 + #define ISERT_MAX_CQ 64 111 + 112 + struct isert_cq_desc { 113 + struct isert_device *device; 114 + int cq_index; 115 + struct work_struct cq_rx_work; 116 + struct work_struct cq_tx_work; 117 + }; 118 + 119 + struct isert_device { 120 + int cqs_used; 121 + int refcount; 122 + int cq_active_qps[ISERT_MAX_CQ]; 123 + struct ib_device *ib_device; 124 + struct ib_pd *dev_pd; 125 + struct ib_mr *dev_mr; 126 + struct ib_cq *dev_rx_cq[ISERT_MAX_CQ]; 127 + struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; 128 + struct isert_cq_desc *cq_desc; 129 + struct list_head dev_node; 130 + }; 131 + 132 + struct isert_np { 133 + wait_queue_head_t np_accept_wq; 134 + struct rdma_cm_id *np_cm_id; 135 + struct mutex np_accept_mutex; 136 + struct list_head np_accept_list; 137 + struct completion np_login_comp; 138 + };
+47
drivers/infiniband/ulp/isert/isert_proto.h
··· 1 + /* From iscsi_iser.h */ 2 + 3 + struct iser_hdr { 4 + u8 flags; 5 + u8 rsvd[3]; 6 + __be32 write_stag; /* write rkey */ 7 + __be64 write_va; 8 + __be32 read_stag; /* read rkey */ 9 + __be64 read_va; 10 + } __packed; 11 + 12 + /*Constant PDU lengths calculations */ 13 + #define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr)) 14 + 15 + #define ISER_RECV_DATA_SEG_LEN 8192 16 + #define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN) 17 + #define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN) 18 + 19 + /* QP settings */ 20 + /* Maximal bounds on received asynchronous PDUs */ 21 + #define ISERT_MAX_TX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */ 22 + 23 + #define ISERT_MAX_RX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), * 24 + * SCSI_TMFUNC(2), LOGOUT(1) */ 25 + 26 + #define ISCSI_DEF_XMIT_CMDS_MAX 128 /* from libiscsi.h, must be power of 2 */ 27 + 28 + #define ISERT_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX) 29 + 30 + #define ISERT_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2) 31 + 32 + #define ISERT_INFLIGHT_DATAOUTS 8 33 + 34 + #define ISERT_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \ 35 + (1 + ISERT_INFLIGHT_DATAOUTS) + \ 36 + ISERT_MAX_TX_MISC_PDUS + \ 37 + ISERT_MAX_RX_MISC_PDUS) 38 + 39 + #define ISER_RX_PAD_SIZE (ISER_RECV_DATA_SEG_LEN + 4096 - \ 40 + (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge))) 41 + 42 + #define ISER_VER 0x10 43 + #define ISER_WSV 0x08 44 + #define ISER_RSV 0x04 45 + #define ISCSI_CTRL 0x10 46 + #define ISER_HELLO 0x20 47 + #define ISER_HELLORPLY 0x30
-19
drivers/scsi/qla2xxx/qla_target.c
··· 2585 2585 ha->tgt.tgt_ops->free_cmd(cmd); 2586 2586 } 2587 2587 2588 - /* ha->hardware_lock supposed to be held on entry */ 2589 - /* called via callback from qla2xxx */ 2590 - void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle) 2591 - { 2592 - struct qla_hw_data *ha = vha->hw; 2593 - struct qla_tgt *tgt = ha->tgt.qla_tgt; 2594 - 2595 - if (likely(tgt == NULL)) { 2596 - ql_dbg(ql_dbg_tgt, vha, 0xe021, 2597 - "CTIO, but target mode not enabled" 2598 - " (ha %d %p handle %#x)", vha->vp_idx, ha, handle); 2599 - return; 2600 - } 2601 - 2602 - tgt->irq_cmd_count++; 2603 - qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL); 2604 - tgt->irq_cmd_count--; 2605 - } 2606 - 2607 2588 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 2608 2589 uint8_t task_codes) 2609 2590 {
-1
drivers/scsi/qla2xxx/qla_target.h
··· 980 980 extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); 981 981 extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); 982 982 extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); 983 - extern void qlt_ctio_completion(struct scsi_qla_host *, uint32_t); 984 983 extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *); 985 984 extern void qlt_enable_vha(struct scsi_qla_host *); 986 985 extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
+2 -1
drivers/target/iscsi/Makefile
··· 15 15 iscsi_target_util.o \ 16 16 iscsi_target.o \ 17 17 iscsi_target_configfs.o \ 18 - iscsi_target_stat.o 18 + iscsi_target_stat.o \ 19 + iscsi_target_transport.o 19 20 20 21 obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o
+708 -484
drivers/target/iscsi/iscsi_target.c
··· 49 49 #include "iscsi_target_device.h" 50 50 #include "iscsi_target_stat.h" 51 51 52 + #include <target/iscsi/iscsi_transport.h> 53 + 52 54 static LIST_HEAD(g_tiqn_list); 53 55 static LIST_HEAD(g_np_list); 54 56 static DEFINE_SPINLOCK(tiqn_lock); ··· 70 68 struct kmem_cache *lio_r2t_cache; 71 69 72 70 static int iscsit_handle_immediate_data(struct iscsi_cmd *, 73 - unsigned char *buf, u32); 74 - static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *); 71 + struct iscsi_scsi_req *, u32); 75 72 76 73 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf) 77 74 { ··· 402 401 spin_unlock_bh(&np_lock); 403 402 404 403 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n", 405 - np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ? 406 - "TCP" : "SCTP"); 404 + np->np_ip, np->np_port, np->np_transport->name); 407 405 408 406 return np; 409 407 } ··· 441 441 return 0; 442 442 } 443 443 444 - static int iscsit_del_np_comm(struct iscsi_np *np) 444 + static void iscsit_free_np(struct iscsi_np *np) 445 445 { 446 446 if (np->np_socket) 447 447 sock_release(np->np_socket); 448 - return 0; 449 448 } 450 449 451 450 int iscsit_del_np(struct iscsi_np *np) ··· 466 467 send_sig(SIGINT, np->np_thread, 1); 467 468 kthread_stop(np->np_thread); 468 469 } 469 - iscsit_del_np_comm(np); 470 + 471 + np->np_transport->iscsit_free_np(np); 470 472 471 473 spin_lock_bh(&np_lock); 472 474 list_del(&np->np_list); 473 475 spin_unlock_bh(&np_lock); 474 476 475 477 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n", 476 - np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ? 477 - "TCP" : "SCTP"); 478 + np->np_ip, np->np_port, np->np_transport->name); 478 479 480 + iscsit_put_transport(np->np_transport); 479 481 kfree(np); 480 482 return 0; 481 483 } 484 + 485 + static int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int); 486 + static int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int); 487 + 488 + static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 489 + { 490 + iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 491 + return 0; 492 + } 493 + 494 + static struct iscsit_transport iscsi_target_transport = { 495 + .name = "iSCSI/TCP", 496 + .transport_type = ISCSI_TCP, 497 + .owner = NULL, 498 + .iscsit_setup_np = iscsit_setup_np, 499 + .iscsit_accept_np = iscsit_accept_np, 500 + .iscsit_free_np = iscsit_free_np, 501 + .iscsit_alloc_cmd = iscsit_alloc_cmd, 502 + .iscsit_get_login_rx = iscsit_get_login_rx, 503 + .iscsit_put_login_tx = iscsit_put_login_tx, 504 + .iscsit_get_dataout = iscsit_build_r2ts_for_cmd, 505 + .iscsit_immediate_queue = iscsit_immediate_queue, 506 + .iscsit_response_queue = iscsit_response_queue, 507 + .iscsit_queue_data_in = iscsit_queue_rsp, 508 + .iscsit_queue_status = iscsit_queue_rsp, 509 + }; 482 510 483 511 static int __init iscsi_target_init_module(void) 484 512 { ··· 583 557 goto ooo_out; 584 558 } 585 559 560 + iscsit_register_transport(&iscsi_target_transport); 561 + 586 562 if (iscsit_load_discovery_tpg() < 0) 587 563 goto r2t_out; 588 564 ··· 615 587 iscsi_deallocate_thread_sets(); 616 588 iscsi_thread_set_free(); 617 589 iscsit_release_discovery_tpg(); 590 + iscsit_unregister_transport(&iscsi_target_transport); 618 591 kmem_cache_destroy(lio_cmd_cache); 619 592 kmem_cache_destroy(lio_qr_cache); 620 593 kmem_cache_destroy(lio_dr_cache); ··· 711 682 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 712 683 713 684 ret = wait_for_completion_interruptible(&cmd->reject_comp); 685 + /* 686 + * Perform the kref_put now if se_cmd has already been setup by 687 + * scsit_setup_scsi_cmd() 688 + */ 689 + if (cmd->se_cmd.se_tfo != NULL) { 690 + pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n"); 691 + target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); 692 + } 714 693 if (ret != 0) 715 694 return -1; 716 695 717 696 return (!fail_conn) ? 0 : -1; 718 697 } 698 + EXPORT_SYMBOL(iscsit_add_reject_from_cmd); 719 699 720 700 /* 721 701 * Map some portion of the allocated scatterlist to an iovec, suitable for ··· 783 745 784 746 conn->exp_statsn = exp_statsn; 785 747 748 + if (conn->sess->sess_ops->RDMAExtensions) 749 + return; 750 + 786 751 spin_lock_bh(&conn->cmd_lock); 787 752 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 788 753 spin_lock(&cmd->istate_lock); ··· 818 777 return 0; 819 778 } 820 779 821 - static int iscsit_handle_scsi_cmd( 822 - struct iscsi_conn *conn, 823 - unsigned char *buf) 780 + int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 781 + unsigned char *buf) 824 782 { 825 - int data_direction, payload_length, cmdsn_ret = 0, immed_ret; 826 - struct iscsi_cmd *cmd = NULL; 783 + int data_direction, payload_length; 827 784 struct iscsi_scsi_req *hdr; 828 785 int iscsi_task_attr; 829 786 int sam_task_attr; ··· 844 805 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { 845 806 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL" 846 807 " not set. Bad iSCSI Initiator.\n"); 847 - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 848 - buf, conn); 808 + return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, 809 + 1, 1, buf, cmd); 849 810 } 850 811 851 812 if (((hdr->flags & ISCSI_FLAG_CMD_READ) || ··· 865 826 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" 866 827 " set when Expected Data Transfer Length is 0 for" 867 828 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); 868 - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 869 - buf, conn); 829 + return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, 830 + 1, 1, buf, cmd); 870 831 } 871 832 done: 872 833 ··· 875 836 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE" 876 837 " MUST be set if Expected Data Transfer Length is not 0." 877 838 " Bad iSCSI Initiator\n"); 878 - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 879 - buf, conn); 839 + return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, 840 + 1, 1, buf, cmd); 880 841 } 881 842 882 843 if ((hdr->flags & ISCSI_FLAG_CMD_READ) && 883 844 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) { 884 845 pr_err("Bidirectional operations not supported!\n"); 885 - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 886 - buf, conn); 846 + return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, 847 + 1, 1, buf, cmd); 887 848 } 888 849 889 850 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 890 851 pr_err("Illegally set Immediate Bit in iSCSI Initiator" 891 852 " Scsi Command PDU.\n"); 892 - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 893 - buf, conn); 853 + return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, 854 + 1, 1, buf, cmd); 894 855 } 895 856 896 857 if (payload_length && !conn->sess->sess_ops->ImmediateData) { 897 858 pr_err("ImmediateData=No but DataSegmentLength=%u," 898 859 " protocol error.\n", payload_length); 899 - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 900 - buf, conn); 860 + return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR, 861 + 1, 1, buf, cmd); 901 862 } 902 863 903 864 if ((be32_to_cpu(hdr->data_length )== payload_length) && ··· 905 866 pr_err("Expected Data Transfer Length and Length of" 906 867 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL" 907 868 " bit is not set protocol error\n"); 908 - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 909 - buf, conn); 869 + return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR, 870 + 1, 1, buf, cmd); 910 871 } 911 872 912 873 if (payload_length > be32_to_cpu(hdr->data_length)) { 913 874 pr_err("DataSegmentLength: %u is greater than" 914 875 " EDTL: %u, protocol error.\n", payload_length, 915 876 hdr->data_length); 916 - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 917 - buf, conn); 877 + return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR, 878 + 1, 1, buf, cmd); 918 879 } 919 880 920 881 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { 921 882 pr_err("DataSegmentLength: %u is greater than" 922 883 " MaxXmitDataSegmentLength: %u, protocol error.\n", 923 884 payload_length, conn->conn_ops->MaxXmitDataSegmentLength); 924 - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 925 - buf, conn); 885 + return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR, 886 + 1, 1, buf, cmd); 926 887 } 927 888 928 889 if (payload_length > conn->sess->sess_ops->FirstBurstLength) { 929 890 pr_err("DataSegmentLength: %u is greater than" 930 891 " FirstBurstLength: %u, protocol error.\n", 931 892 payload_length, conn->sess->sess_ops->FirstBurstLength); 932 - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 933 - buf, conn); 893 + return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, 894 + 1, 1, buf, cmd); 934 895 } 935 896 936 897 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE : 937 898 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE : 938 899 DMA_NONE; 939 - 940 - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 941 - if (!cmd) 942 - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, 943 - buf, conn); 944 900 945 901 cmd->data_direction = data_direction; 946 902 iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK; ··· 979 945 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 980 946 cmd->first_burst_len = payload_length; 981 947 982 - if (cmd->data_direction == DMA_FROM_DEVICE) { 948 + if (!conn->sess->sess_ops->RDMAExtensions && 949 + cmd->data_direction == DMA_FROM_DEVICE) { 983 950 struct iscsi_datain_req *dr; 984 951 985 952 dr = iscsit_allocate_datain_req(); ··· 1002 967 1003 968 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," 1004 969 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, 1005 - hdr->cmdsn, hdr->data_length, payload_length, conn->cid); 970 + hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length, 971 + conn->cid); 972 + 973 + target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true); 1006 974 1007 975 cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd, 1008 976 scsilun_to_int(&hdr->lun)); ··· 1039 1001 */ 1040 1002 core_alua_check_nonop_delay(&cmd->se_cmd); 1041 1003 1042 - if (iscsit_allocate_iovecs(cmd) < 0) { 1043 - return iscsit_add_reject_from_cmd( 1044 - ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1045 - 1, 0, buf, cmd); 1046 - } 1004 + return 0; 1005 + } 1006 + EXPORT_SYMBOL(iscsit_setup_scsi_cmd); 1047 1007 1008 + void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *cmd) 1009 + { 1010 + iscsit_set_dataout_sequence_values(cmd); 1011 + 1012 + spin_lock_bh(&cmd->dataout_timeout_lock); 1013 + iscsit_start_dataout_timer(cmd, cmd->conn); 1014 + spin_unlock_bh(&cmd->dataout_timeout_lock); 1015 + } 1016 + EXPORT_SYMBOL(iscsit_set_unsoliticed_dataout); 1017 + 1018 + int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 1019 + struct iscsi_scsi_req *hdr) 1020 + { 1021 + int cmdsn_ret = 0; 1048 1022 /* 1049 1023 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if 1050 1024 * the Immediate Bit is not set, and no Immediate ··· 1069 1019 */ 1070 1020 if (!cmd->immediate_data) { 1071 1021 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1072 - if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 1022 + if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 1023 + if (!cmd->sense_reason) 1024 + return 0; 1025 + 1026 + target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); 1073 1027 return 0; 1074 - else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1028 + } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) { 1075 1029 return iscsit_add_reject_from_cmd( 1076 1030 ISCSI_REASON_PROTOCOL_ERROR, 1077 - 1, 0, buf, cmd); 1031 + 1, 0, (unsigned char *)hdr, cmd); 1032 + } 1078 1033 } 1079 1034 1080 1035 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); ··· 1088 1033 * If no Immediate Data is attached, it's OK to return now. 1089 1034 */ 1090 1035 if (!cmd->immediate_data) { 1091 - if (!cmd->sense_reason && cmd->unsolicited_data) { 1092 - iscsit_set_dataout_sequence_values(cmd); 1036 + if (!cmd->sense_reason && cmd->unsolicited_data) 1037 + iscsit_set_unsoliticed_dataout(cmd); 1038 + if (!cmd->sense_reason) 1039 + return 0; 1093 1040 1094 - spin_lock_bh(&cmd->dataout_timeout_lock); 1095 - iscsit_start_dataout_timer(cmd, cmd->conn); 1096 - spin_unlock_bh(&cmd->dataout_timeout_lock); 1097 - } 1098 - 1041 + target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); 1099 1042 return 0; 1100 1043 } 1101 1044 1102 1045 /* 1103 - * Early CHECK_CONDITIONs never make it to the transport processing 1104 - * thread. They are processed in CmdSN order by 1105 - * iscsit_check_received_cmdsn() below. 1046 + * Early CHECK_CONDITIONs with ImmediateData never make it to command 1047 + * execution. These exceptions are processed in CmdSN order using 1048 + * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below. 1106 1049 */ 1107 1050 if (cmd->sense_reason) { 1108 - immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1109 - goto after_immediate_data; 1051 + target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); 1052 + return 1; 1110 1053 } 1111 1054 /* 1112 1055 * Call directly into transport_generic_new_cmd() to perform ··· 1112 1059 */ 1113 1060 cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd); 1114 1061 if (cmd->sense_reason) { 1115 - immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1116 - goto after_immediate_data; 1062 + target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); 1063 + return 1; 1117 1064 } 1118 1065 1119 - immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length); 1066 + return 0; 1067 + } 1068 + EXPORT_SYMBOL(iscsit_process_scsi_cmd); 1069 + 1070 + static int 1071 + iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, 1072 + bool dump_payload) 1073 + { 1074 + int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1075 + /* 1076 + * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes. 1077 + */ 1078 + if (dump_payload == true) 1079 + goto after_immediate_data; 1080 + 1081 + immed_ret = iscsit_handle_immediate_data(cmd, hdr, 1082 + cmd->first_burst_len); 1120 1083 after_immediate_data: 1121 1084 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) { 1122 1085 /* ··· 1140 1071 * DataCRC, check against ExpCmdSN/MaxCmdSN if 1141 1072 * Immediate Bit is not set. 1142 1073 */ 1143 - cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1144 - /* 1145 - * Special case for Unsupported SAM WRITE Opcodes 1146 - * and ImmediateData=Yes. 1147 - */ 1148 - if (cmd->sense_reason) { 1149 - if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) 1150 - return -1; 1151 - } else if (cmd->unsolicited_data) { 1152 - iscsit_set_dataout_sequence_values(cmd); 1074 + cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd, hdr->cmdsn); 1153 1075 1154 - spin_lock_bh(&cmd->dataout_timeout_lock); 1155 - iscsit_start_dataout_timer(cmd, cmd->conn); 1156 - spin_unlock_bh(&cmd->dataout_timeout_lock); 1157 - } 1076 + if (cmd->sense_reason) { 1077 + if (iscsit_dump_data_payload(cmd->conn, 1078 + cmd->first_burst_len, 1) < 0) 1079 + return -1; 1080 + } else if (cmd->unsolicited_data) 1081 + iscsit_set_unsoliticed_dataout(cmd); 1158 1082 1159 1083 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1160 1084 return iscsit_add_reject_from_cmd( 1161 1085 ISCSI_REASON_PROTOCOL_ERROR, 1162 - 1, 0, buf, cmd); 1086 + 1, 0, (unsigned char *)hdr, cmd); 1163 1087 1164 1088 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) { 1165 1089 /* ··· 1167 1105 * CmdSN and issue a retry to plug the sequence. 1168 1106 */ 1169 1107 cmd->i_state = ISTATE_REMOVE; 1170 - iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 1108 + iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state); 1171 1109 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */ 1172 1110 return -1; 1173 1111 1174 1112 return 0; 1113 + } 1114 + 1115 + static int 1116 + iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 1117 + unsigned char *buf) 1118 + { 1119 + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1120 + int rc, immed_data; 1121 + bool dump_payload = false; 1122 + 1123 + rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 1124 + if (rc < 0) 1125 + return rc; 1126 + /* 1127 + * Allocation iovecs needed for struct socket operations for 1128 + * traditional iSCSI block I/O. 1129 + */ 1130 + if (iscsit_allocate_iovecs(cmd) < 0) { 1131 + return iscsit_add_reject_from_cmd( 1132 + ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1133 + 1, 0, buf, cmd); 1134 + } 1135 + immed_data = cmd->immediate_data; 1136 + 1137 + rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 1138 + if (rc < 0) 1139 + return rc; 1140 + else if (rc > 0) 1141 + dump_payload = true; 1142 + 1143 + if (!immed_data) 1144 + return 0; 1145 + 1146 + return iscsit_get_immediate_data(cmd, hdr, dump_payload); 1175 1147 } 1176 1148 1177 1149 static u32 iscsit_do_crypto_hash_sg( ··· 1270 1174 crypto_hash_final(hash, data_crc); 1271 1175 } 1272 1176 1273 - static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) 1177 + int 1178 + iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, 1179 + struct iscsi_cmd **out_cmd) 1274 1180 { 1275 - int iov_ret, ooo_cmdsn = 0, ret; 1276 - u8 data_crc_failed = 0; 1277 - u32 checksum, iov_count = 0, padding = 0, rx_got = 0; 1278 - u32 rx_size = 0, payload_length; 1181 + struct iscsi_data *hdr = (struct iscsi_data *)buf; 1279 1182 struct iscsi_cmd *cmd = NULL; 1280 1183 struct se_cmd *se_cmd; 1281 - struct iscsi_data *hdr; 1282 - struct kvec *iov; 1283 1184 unsigned long flags; 1284 - 1285 - hdr = (struct iscsi_data *) buf; 1286 - payload_length = ntoh24(hdr->dlength); 1185 + u32 payload_length = ntoh24(hdr->dlength); 1186 + int rc; 1287 1187 1288 1188 if (!payload_length) { 1289 1189 pr_err("DataOUT payload is ZERO, protocol error.\n"); ··· 1312 1220 1313 1221 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x," 1314 1222 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 1315 - hdr->itt, hdr->ttt, hdr->datasn, hdr->offset, 1223 + hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset), 1316 1224 payload_length, conn->cid); 1317 1225 1318 1226 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { ··· 1404 1312 * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and 1405 1313 * within-command recovery checks before receiving the payload. 1406 1314 */ 1407 - ret = iscsit_check_pre_dataout(cmd, buf); 1408 - if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY) 1315 + rc = iscsit_check_pre_dataout(cmd, buf); 1316 + if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY) 1409 1317 return 0; 1410 - else if (ret == DATAOUT_CANNOT_RECOVER) 1318 + else if (rc == DATAOUT_CANNOT_RECOVER) 1411 1319 return -1; 1320 + 1321 + *out_cmd = cmd; 1322 + return 0; 1323 + } 1324 + EXPORT_SYMBOL(iscsit_check_dataout_hdr); 1325 + 1326 + static int 1327 + iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 1328 + struct iscsi_data *hdr) 1329 + { 1330 + struct kvec *iov; 1331 + u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0; 1332 + u32 payload_length = ntoh24(hdr->dlength); 1333 + int iov_ret, data_crc_failed = 0; 1412 1334 1413 1335 rx_size += payload_length; 1414 1336 iov = &cmd->iov_data[0]; ··· 1476 1370 payload_length); 1477 1371 } 1478 1372 } 1373 + 1374 + return data_crc_failed; 1375 + } 1376 + 1377 + int 1378 + iscsit_check_dataout_payload(struct iscsi_cmd *cmd, struct iscsi_data *hdr, 1379 + bool data_crc_failed) 1380 + { 1381 + struct iscsi_conn *conn = cmd->conn; 1382 + int rc, ooo_cmdsn; 1479 1383 /* 1480 1384 * Increment post receive data and CRC values or perform 1481 1385 * within-command recovery. 1482 1386 */ 1483 - ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed); 1484 - if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)) 1387 + rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed); 1388 + if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)) 1485 1389 return 0; 1486 - else if (ret == DATAOUT_SEND_R2T) { 1390 + else if (rc == DATAOUT_SEND_R2T) { 1487 1391 iscsit_set_dataout_sequence_values(cmd); 1488 - iscsit_build_r2ts_for_cmd(cmd, conn, false); 1489 - } else if (ret == DATAOUT_SEND_TO_TRANSPORT) { 1392 + conn->conn_transport->iscsit_get_dataout(conn, cmd, false); 1393 + } else if (rc == DATAOUT_SEND_TO_TRANSPORT) { 1490 1394 /* 1491 1395 * Handle extra special case for out of order 1492 1396 * Unsolicited Data Out. ··· 1517 1401 1518 1402 return 0; 1519 1403 } 1404 + EXPORT_SYMBOL(iscsit_check_dataout_payload); 1520 1405 1521 - static int iscsit_handle_nop_out( 1522 - struct iscsi_conn *conn, 1523 - unsigned char *buf) 1406 + static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) 1407 + { 1408 + struct iscsi_cmd *cmd; 1409 + struct iscsi_data *hdr = (struct iscsi_data *)buf; 1410 + int rc; 1411 + bool data_crc_failed = false; 1412 + 1413 + rc = iscsit_check_dataout_hdr(conn, buf, &cmd); 1414 + if (rc < 0) 1415 + return rc; 1416 + else if (!cmd) 1417 + return 0; 1418 + 1419 + rc = iscsit_get_dataout(conn, cmd, hdr); 1420 + if (rc < 0) 1421 + return rc; 1422 + else if (rc > 0) 1423 + data_crc_failed = true; 1424 + 1425 + return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed); 1426 + } 1427 + 1428 + int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 1429 + unsigned char *buf) 1524 1430 { 1525 1431 unsigned char *ping_data = NULL; 1526 1432 int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size; 1527 1433 u32 checksum, data_crc, padding = 0, payload_length; 1528 - struct iscsi_cmd *cmd = NULL; 1434 + struct iscsi_cmd *cmd_p = NULL; 1529 1435 struct kvec *iov = NULL; 1530 1436 struct iscsi_nopout *hdr; 1531 1437 ··· 1570 1432 buf, conn); 1571 1433 } 1572 1434 1573 - pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x," 1435 + pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x," 1574 1436 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n", 1575 1437 hdr->itt == RESERVED_ITT ? "Response" : "Request", 1576 1438 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn, ··· 1583 1445 * can contain ping data. 1584 1446 */ 1585 1447 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { 1586 - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1587 1448 if (!cmd) 1588 1449 return iscsit_add_reject( 1589 1450 ISCSI_REASON_BOOKMARK_NO_RESOURCES, ··· 1717 1580 /* 1718 1581 * This was a response to a unsolicited NOPIN ping. 1719 1582 */ 1720 - cmd = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt)); 1721 - if (!cmd) 1583 + cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt)); 1584 + if (!cmd_p) 1722 1585 return -1; 1723 1586 1724 1587 iscsit_stop_nopin_response_timer(conn); 1725 1588 1726 - cmd->i_state = ISTATE_REMOVE; 1727 - iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 1589 + cmd_p->i_state = ISTATE_REMOVE; 1590 + iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state); 1728 1591 iscsit_start_nopin_timer(conn); 1729 1592 } else { 1730 1593 /* ··· 1748 1611 kfree(ping_data); 1749 1612 return ret; 1750 1613 } 1614 + EXPORT_SYMBOL(iscsit_handle_nop_out); 1751 1615 1752 - static int iscsit_handle_task_mgt_cmd( 1753 - struct iscsi_conn *conn, 1754 - unsigned char *buf) 1616 + int 1617 + iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 1618 + unsigned char *buf) 1755 1619 { 1756 - struct iscsi_cmd *cmd; 1757 1620 struct se_tmr_req *se_tmr; 1758 1621 struct iscsi_tmr_req *tmr_req; 1759 1622 struct iscsi_tm *hdr; ··· 1782 1645 pr_err("Task Management Request TASK_REASSIGN not" 1783 1646 " issued as immediate command, bad iSCSI Initiator" 1784 1647 "implementation\n"); 1785 - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1786 - buf, conn); 1648 + return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR, 1649 + 1, 1, buf, cmd); 1787 1650 } 1788 1651 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1789 1652 be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG) 1790 1653 hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG); 1791 - 1792 - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1793 - if (!cmd) 1794 - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1795 - 1, buf, conn); 1796 1654 1797 1655 cmd->data_direction = DMA_NONE; 1798 1656 ··· 1959 1827 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 1960 1828 return 0; 1961 1829 } 1830 + EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd); 1962 1831 1963 1832 /* #warning FIXME: Support Text Command parameters besides SendTargets */ 1964 1833 static int iscsit_handle_text_cmd( ··· 2222 2089 return 0; 2223 2090 } 2224 2091 2225 - static int iscsit_handle_logout_cmd( 2226 - struct iscsi_conn *conn, 2227 - unsigned char *buf) 2092 + int 2093 + iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2094 + unsigned char *buf) 2228 2095 { 2229 2096 int cmdsn_ret, logout_remove = 0; 2230 2097 u8 reason_code = 0; 2231 - struct iscsi_cmd *cmd; 2232 2098 struct iscsi_logout *hdr; 2233 2099 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn); 2234 2100 ··· 2251 2119 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) { 2252 2120 pr_err("Received logout request on connection that" 2253 2121 " is not in logged in state, ignoring request.\n"); 2122 + iscsit_release_cmd(cmd); 2254 2123 return 0; 2255 2124 } 2256 - 2257 - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 2258 - if (!cmd) 2259 - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, 2260 - buf, conn); 2261 2125 2262 2126 cmd->iscsi_opcode = ISCSI_OP_LOGOUT; 2263 2127 cmd->i_state = ISTATE_SEND_LOGOUTRSP; ··· 2304 2176 2305 2177 return logout_remove; 2306 2178 } 2179 + EXPORT_SYMBOL(iscsit_handle_logout_cmd); 2307 2180 2308 2181 static int iscsit_handle_snack( 2309 2182 struct iscsi_conn *conn, ··· 2372 2243 2373 2244 static int iscsit_handle_immediate_data( 2374 2245 struct iscsi_cmd *cmd, 2375 - unsigned char *buf, 2246 + struct iscsi_scsi_req *hdr, 2376 2247 u32 length) 2377 2248 { 2378 2249 int iov_ret, rx_got = 0, rx_size = 0; ··· 2428 2299 " in ERL=0.\n"); 2429 2300 iscsit_add_reject_from_cmd( 2430 2301 ISCSI_REASON_DATA_DIGEST_ERROR, 2431 - 1, 0, buf, cmd); 2302 + 1, 0, (unsigned char *)hdr, cmd); 2432 2303 return IMMEDIATE_DATA_CANNOT_RECOVER; 2433 2304 } else { 2434 2305 iscsit_add_reject_from_cmd( 2435 2306 ISCSI_REASON_DATA_DIGEST_ERROR, 2436 - 0, 0, buf, cmd); 2307 + 0, 0, (unsigned char *)hdr, cmd); 2437 2308 return IMMEDIATE_DATA_ERL1_CRC_FAILURE; 2438 2309 } 2439 2310 } else { ··· 2553 2424 } 2554 2425 } 2555 2426 2556 - static int iscsit_send_data_in( 2557 - struct iscsi_cmd *cmd, 2558 - struct iscsi_conn *conn) 2427 + static void 2428 + iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 2429 + struct iscsi_datain *datain, struct iscsi_data_rsp *hdr, 2430 + bool set_statsn) 2559 2431 { 2560 - int iov_ret = 0, set_statsn = 0; 2561 - u32 iov_count = 0, tx_size = 0; 2432 + hdr->opcode = ISCSI_OP_SCSI_DATA_IN; 2433 + hdr->flags = datain->flags; 2434 + if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { 2435 + if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 2436 + hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; 2437 + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 2438 + } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 2439 + hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; 2440 + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 2441 + } 2442 + } 2443 + hton24(hdr->dlength, datain->length); 2444 + if (hdr->flags & ISCSI_FLAG_DATA_ACK) 2445 + int_to_scsilun(cmd->se_cmd.orig_fe_lun, 2446 + (struct scsi_lun *)&hdr->lun); 2447 + else 2448 + put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2449 + 2450 + hdr->itt = cmd->init_task_tag; 2451 + 2452 + if (hdr->flags & ISCSI_FLAG_DATA_ACK) 2453 + hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2454 + else 2455 + hdr->ttt = cpu_to_be32(0xFFFFFFFF); 2456 + if (set_statsn) 2457 + hdr->statsn = cpu_to_be32(cmd->stat_sn); 2458 + else 2459 + hdr->statsn = cpu_to_be32(0xFFFFFFFF); 2460 + 2461 + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2462 + hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2463 + hdr->datasn = cpu_to_be32(datain->data_sn); 2464 + hdr->offset = cpu_to_be32(datain->offset); 2465 + 2466 + pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x," 2467 + " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 2468 + cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), 2469 + ntohl(hdr->offset), datain->length, conn->cid); 2470 + } 2471 + 2472 + static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2473 + { 2474 + struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0]; 2562 2475 struct iscsi_datain datain; 2563 2476 struct iscsi_datain_req *dr; 2564 - struct iscsi_data_rsp *hdr; 2565 2477 struct kvec *iov; 2566 - int eodr = 0; 2567 - int ret; 2478 + u32 iov_count = 0, tx_size = 0; 2479 + int eodr = 0, ret, iov_ret; 2480 + bool set_statsn = false; 2568 2481 2569 2482 memset(&datain, 0, sizeof(struct iscsi_datain)); 2570 2483 dr = iscsit_get_datain_values(cmd, &datain); ··· 2615 2444 cmd->init_task_tag); 2616 2445 return -1; 2617 2446 } 2618 - 2619 2447 /* 2620 2448 * Be paranoid and double check the logic for now. 2621 2449 */ ··· 2622 2452 pr_err("Command ITT: 0x%08x, datain.offset: %u and" 2623 2453 " datain.length: %u exceeds cmd->data_length: %u\n", 2624 2454 cmd->init_task_tag, datain.offset, datain.length, 2625 - cmd->se_cmd.data_length); 2455 + cmd->se_cmd.data_length); 2626 2456 return -1; 2627 2457 } 2628 2458 ··· 2646 2476 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) { 2647 2477 iscsit_increment_maxcmdsn(cmd, conn->sess); 2648 2478 cmd->stat_sn = conn->stat_sn++; 2649 - set_statsn = 1; 2479 + set_statsn = true; 2650 2480 } else if (dr->dr_complete == 2651 - DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY) 2652 - set_statsn = 1; 2481 + DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY) 2482 + set_statsn = true; 2653 2483 } 2654 2484 2655 - hdr = (struct iscsi_data_rsp *) cmd->pdu; 2656 - memset(hdr, 0, ISCSI_HDR_LEN); 2657 - hdr->opcode = ISCSI_OP_SCSI_DATA_IN; 2658 - hdr->flags = datain.flags; 2659 - if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { 2660 - if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 2661 - hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; 2662 - hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 2663 - } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 2664 - hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; 2665 - hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 2666 - } 2667 - } 2668 - hton24(hdr->dlength, datain.length); 2669 - if (hdr->flags & ISCSI_FLAG_DATA_ACK) 2670 - int_to_scsilun(cmd->se_cmd.orig_fe_lun, 2671 - (struct scsi_lun *)&hdr->lun); 2672 - else 2673 - put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2674 - 2675 - hdr->itt = cmd->init_task_tag; 2676 - 2677 - if (hdr->flags & ISCSI_FLAG_DATA_ACK) 2678 - hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2679 - else 2680 - hdr->ttt = cpu_to_be32(0xFFFFFFFF); 2681 - if (set_statsn) 2682 - hdr->statsn = cpu_to_be32(cmd->stat_sn); 2683 - else 2684 - hdr->statsn = cpu_to_be32(0xFFFFFFFF); 2685 - 2686 - hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2687 - hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2688 - hdr->datasn = cpu_to_be32(datain.data_sn); 2689 - hdr->offset = cpu_to_be32(datain.offset); 2485 + iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn); 2690 2486 2691 2487 iov = &cmd->iov_data[0]; 2692 2488 iov[iov_count].iov_base = cmd->pdu; ··· 2663 2527 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2664 2528 2665 2529 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2666 - (unsigned char *)hdr, ISCSI_HDR_LEN, 2530 + (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, 2667 2531 0, NULL, (u8 *)header_digest); 2668 2532 2669 2533 iov[0].iov_len += ISCSI_CRC_LEN; ··· 2673 2537 " for DataIN PDU 0x%08x\n", *header_digest); 2674 2538 } 2675 2539 2676 - iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length); 2540 + iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], 2541 + datain.offset, datain.length); 2677 2542 if (iov_ret < 0) 2678 2543 return -1; 2679 2544 ··· 2705 2568 cmd->iov_data_count = iov_count; 2706 2569 cmd->tx_size = tx_size; 2707 2570 2708 - pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x," 2709 - " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 2710 - cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), 2711 - ntohl(hdr->offset), datain.length, conn->cid); 2712 - 2713 2571 /* sendpage is preferred but can't insert markers */ 2714 2572 if (!conn->conn_ops->IFMarker) 2715 2573 ret = iscsit_fe_sendpage_sg(cmd, conn); ··· 2727 2595 return eodr; 2728 2596 } 2729 2597 2730 - static int iscsit_send_logout_response( 2731 - struct iscsi_cmd *cmd, 2732 - struct iscsi_conn *conn) 2598 + int 2599 + iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 2600 + struct iscsi_logout_rsp *hdr) 2733 2601 { 2734 - int niov = 0, tx_size; 2735 2602 struct iscsi_conn *logout_conn = NULL; 2736 2603 struct iscsi_conn_recovery *cr = NULL; 2737 2604 struct iscsi_session *sess = conn->sess; 2738 - struct kvec *iov; 2739 - struct iscsi_logout_rsp *hdr; 2740 2605 /* 2741 2606 * The actual shutting down of Sessions and/or Connections 2742 2607 * for CLOSESESSION and CLOSECONNECTION Logout Requests ··· 2802 2673 return -1; 2803 2674 } 2804 2675 2805 - tx_size = ISCSI_HDR_LEN; 2806 - hdr = (struct iscsi_logout_rsp *)cmd->pdu; 2807 - memset(hdr, 0, ISCSI_HDR_LEN); 2808 2676 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 2809 2677 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2810 2678 hdr->response = cmd->logout_response; ··· 2813 2687 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2814 2688 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2815 2689 2690 + pr_debug("Built Logout Response ITT: 0x%08x StatSN:" 2691 + " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n", 2692 + cmd->init_task_tag, cmd->stat_sn, hdr->response, 2693 + cmd->logout_cid, conn->cid); 2694 + 2695 + return 0; 2696 + } 2697 + EXPORT_SYMBOL(iscsit_build_logout_rsp); 2698 + 2699 + static int 2700 + iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2701 + { 2702 + struct kvec *iov; 2703 + int niov = 0, tx_size, rc; 2704 + 2705 + rc = iscsit_build_logout_rsp(cmd, conn, 2706 + (struct iscsi_logout_rsp *)&cmd->pdu[0]); 2707 + if (rc < 0) 2708 + return rc; 2709 + 2710 + tx_size = ISCSI_HDR_LEN; 2816 2711 iov = &cmd->iov_misc[0]; 2817 2712 iov[niov].iov_base = cmd->pdu; 2818 2713 iov[niov++].iov_len = ISCSI_HDR_LEN; ··· 2842 2695 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2843 2696 2844 2697 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2845 - (unsigned char *)hdr, ISCSI_HDR_LEN, 2698 + (unsigned char *)&cmd->pdu[0], ISCSI_HDR_LEN, 2846 2699 0, NULL, (u8 *)header_digest); 2847 2700 2848 2701 iov[0].iov_len += ISCSI_CRC_LEN; ··· 2853 2706 cmd->iov_misc_count = niov; 2854 2707 cmd->tx_size = tx_size; 2855 2708 2856 - pr_debug("Sending Logout Response ITT: 0x%08x StatSN:" 2857 - " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n", 2858 - cmd->init_task_tag, cmd->stat_sn, hdr->response, 2859 - cmd->logout_cid, conn->cid); 2860 - 2861 2709 return 0; 2862 2710 } 2711 + 2712 + void 2713 + iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 2714 + struct iscsi_nopin *hdr, bool nopout_response) 2715 + { 2716 + hdr->opcode = ISCSI_OP_NOOP_IN; 2717 + hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2718 + hton24(hdr->dlength, cmd->buf_ptr_size); 2719 + if (nopout_response) 2720 + put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2721 + hdr->itt = cmd->init_task_tag; 2722 + hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2723 + cmd->stat_sn = (nopout_response) ? conn->stat_sn++ : 2724 + conn->stat_sn; 2725 + hdr->statsn = cpu_to_be32(cmd->stat_sn); 2726 + 2727 + if (nopout_response) 2728 + iscsit_increment_maxcmdsn(cmd, conn->sess); 2729 + 2730 + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2731 + hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2732 + 2733 + pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x," 2734 + " StatSN: 0x%08x, Length %u\n", (nopout_response) ? 2735 + "Solicitied" : "Unsolicitied", cmd->init_task_tag, 2736 + cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size); 2737 + } 2738 + EXPORT_SYMBOL(iscsit_build_nopin_rsp); 2863 2739 2864 2740 /* 2865 2741 * Unsolicited NOPIN, either requesting a response or not. ··· 2892 2722 struct iscsi_conn *conn, 2893 2723 int want_response) 2894 2724 { 2895 - int tx_size = ISCSI_HDR_LEN; 2896 - struct iscsi_nopin *hdr; 2897 - int ret; 2725 + struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; 2726 + int tx_size = ISCSI_HDR_LEN, ret; 2898 2727 2899 - hdr = (struct iscsi_nopin *) cmd->pdu; 2900 - memset(hdr, 0, ISCSI_HDR_LEN); 2901 - hdr->opcode = ISCSI_OP_NOOP_IN; 2902 - hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2903 - hdr->itt = cmd->init_task_tag; 2904 - hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2905 - cmd->stat_sn = conn->stat_sn; 2906 - hdr->statsn = cpu_to_be32(cmd->stat_sn); 2907 - hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2908 - hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2728 + iscsit_build_nopin_rsp(cmd, conn, hdr, false); 2909 2729 2910 2730 if (conn->conn_ops->HeaderDigest) { 2911 2731 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; ··· 2931 2771 return 0; 2932 2772 } 2933 2773 2934 - static int iscsit_send_nopin_response( 2935 - struct iscsi_cmd *cmd, 2936 - struct iscsi_conn *conn) 2774 + static int 2775 + iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2937 2776 { 2938 - int niov = 0, tx_size; 2939 - u32 padding = 0; 2777 + struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; 2940 2778 struct kvec *iov; 2941 - struct iscsi_nopin *hdr; 2779 + u32 padding = 0; 2780 + int niov = 0, tx_size; 2781 + 2782 + iscsit_build_nopin_rsp(cmd, conn, hdr, true); 2942 2783 2943 2784 tx_size = ISCSI_HDR_LEN; 2944 - hdr = (struct iscsi_nopin *) cmd->pdu; 2945 - memset(hdr, 0, ISCSI_HDR_LEN); 2946 - hdr->opcode = ISCSI_OP_NOOP_IN; 2947 - hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2948 - hton24(hdr->dlength, cmd->buf_ptr_size); 2949 - put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2950 - hdr->itt = cmd->init_task_tag; 2951 - hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2952 - cmd->stat_sn = conn->stat_sn++; 2953 - hdr->statsn = cpu_to_be32(cmd->stat_sn); 2954 - 2955 - iscsit_increment_maxcmdsn(cmd, conn->sess); 2956 - hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2957 - hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2958 - 2959 2785 iov = &cmd->iov_misc[0]; 2960 2786 iov[niov].iov_base = cmd->pdu; 2961 2787 iov[niov++].iov_len = ISCSI_HDR_LEN; ··· 2996 2850 2997 2851 cmd->iov_misc_count = niov; 2998 2852 cmd->tx_size = tx_size; 2999 - 3000 - pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:" 3001 - " 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag, 3002 - cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size); 3003 2853 3004 2854 return 0; 3005 2855 } ··· 3081 2939 * connection recovery. 3082 2940 */ 3083 2941 int iscsit_build_r2ts_for_cmd( 3084 - struct iscsi_cmd *cmd, 3085 2942 struct iscsi_conn *conn, 2943 + struct iscsi_cmd *cmd, 3086 2944 bool recovery) 3087 2945 { 3088 2946 int first_r2t = 1; ··· 3157 3015 return 0; 3158 3016 } 3159 3017 3160 - static int iscsit_send_status( 3161 - struct iscsi_cmd *cmd, 3162 - struct iscsi_conn *conn) 3018 + void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 3019 + bool inc_stat_sn, struct iscsi_scsi_rsp *hdr) 3163 3020 { 3164 - u8 iov_count = 0, recovery; 3165 - u32 padding = 0, tx_size = 0; 3166 - struct iscsi_scsi_rsp *hdr; 3167 - struct kvec *iov; 3168 - 3169 - recovery = (cmd->i_state != ISTATE_SEND_STATUS); 3170 - if (!recovery) 3021 + if (inc_stat_sn) 3171 3022 cmd->stat_sn = conn->stat_sn++; 3172 3023 3173 3024 spin_lock_bh(&conn->sess->session_stats_lock); 3174 3025 conn->sess->rsp_pdus++; 3175 3026 spin_unlock_bh(&conn->sess->session_stats_lock); 3176 3027 3177 - hdr = (struct iscsi_scsi_rsp *) cmd->pdu; 3178 3028 memset(hdr, 0, ISCSI_HDR_LEN); 3179 3029 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP; 3180 3030 hdr->flags |= ISCSI_FLAG_CMD_FINAL; ··· 3185 3051 iscsit_increment_maxcmdsn(cmd, conn->sess); 3186 3052 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3187 3053 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3054 + 3055 + pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x," 3056 + " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n", 3057 + cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status, 3058 + cmd->se_cmd.scsi_status, conn->cid); 3059 + } 3060 + EXPORT_SYMBOL(iscsit_build_rsp_pdu); 3061 + 3062 + static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 3063 + { 3064 + struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0]; 3065 + struct kvec *iov; 3066 + u32 padding = 0, tx_size = 0; 3067 + int iov_count = 0; 3068 + bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS); 3069 + 3070 + iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr); 3188 3071 3189 3072 iov = &cmd->iov_misc[0]; 3190 3073 iov[iov_count].iov_base = cmd->pdu; ··· 3257 3106 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3258 3107 3259 3108 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3260 - (unsigned char *)hdr, ISCSI_HDR_LEN, 3109 + (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, 3261 3110 0, NULL, (u8 *)header_digest); 3262 3111 3263 3112 iov[0].iov_len += ISCSI_CRC_LEN; ··· 3268 3117 3269 3118 cmd->iov_misc_count = iov_count; 3270 3119 cmd->tx_size = tx_size; 3271 - 3272 - pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x," 3273 - " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n", 3274 - (!recovery) ? "" : "Recovery ", cmd->init_task_tag, 3275 - cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid); 3276 3120 3277 3121 return 0; 3278 3122 } ··· 3291 3145 } 3292 3146 } 3293 3147 3294 - static int iscsit_send_task_mgt_rsp( 3295 - struct iscsi_cmd *cmd, 3296 - struct iscsi_conn *conn) 3148 + void 3149 + iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 3150 + struct iscsi_tm_rsp *hdr) 3297 3151 { 3298 3152 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; 3299 - struct iscsi_tm_rsp *hdr; 3300 - u32 tx_size = 0; 3301 3153 3302 - hdr = (struct iscsi_tm_rsp *) cmd->pdu; 3303 - memset(hdr, 0, ISCSI_HDR_LEN); 3304 3154 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 3305 3155 hdr->flags = ISCSI_FLAG_CMD_FINAL; 3306 3156 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); ··· 3307 3165 iscsit_increment_maxcmdsn(cmd, conn->sess); 3308 3166 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3309 3167 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3168 + 3169 + pr_debug("Built Task Management Response ITT: 0x%08x," 3170 + " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n", 3171 + cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid); 3172 + } 3173 + EXPORT_SYMBOL(iscsit_build_task_mgt_rsp); 3174 + 3175 + static int 3176 + iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 3177 + { 3178 + struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0]; 3179 + u32 tx_size = 0; 3180 + 3181 + iscsit_build_task_mgt_rsp(cmd, conn, hdr); 3310 3182 3311 3183 cmd->iov_misc[0].iov_base = cmd->pdu; 3312 3184 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; ··· 3341 3185 3342 3186 cmd->iov_misc_count = 1; 3343 3187 cmd->tx_size = tx_size; 3344 - 3345 - pr_debug("Built Task Management Response ITT: 0x%08x," 3346 - " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n", 3347 - cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid); 3348 3188 3349 3189 return 0; 3350 3190 } ··· 3537 3385 return 0; 3538 3386 } 3539 3387 3388 + void 3389 + iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 3390 + struct iscsi_reject *hdr) 3391 + { 3392 + hdr->opcode = ISCSI_OP_REJECT; 3393 + hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3394 + hton24(hdr->dlength, ISCSI_HDR_LEN); 3395 + hdr->ffffffff = cpu_to_be32(0xffffffff); 3396 + cmd->stat_sn = conn->stat_sn++; 3397 + hdr->statsn = cpu_to_be32(cmd->stat_sn); 3398 + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3399 + hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3400 + 3401 + } 3402 + EXPORT_SYMBOL(iscsit_build_reject); 3403 + 3540 3404 static int iscsit_send_reject( 3541 3405 struct iscsi_cmd *cmd, 3542 3406 struct iscsi_conn *conn) ··· 3561 3393 struct iscsi_reject *hdr; 3562 3394 struct kvec *iov; 3563 3395 3564 - hdr = (struct iscsi_reject *) cmd->pdu; 3565 - hdr->opcode = ISCSI_OP_REJECT; 3566 - hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3567 - hton24(hdr->dlength, ISCSI_HDR_LEN); 3568 - hdr->ffffffff = cpu_to_be32(0xffffffff); 3569 - cmd->stat_sn = conn->stat_sn++; 3570 - hdr->statsn = cpu_to_be32(cmd->stat_sn); 3571 - hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3572 - hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3396 + iscsit_build_reject(cmd, conn, (struct iscsi_reject *)&cmd->pdu[0]); 3573 3397 3574 3398 iov = &cmd->iov_misc[0]; 3575 - 3576 3399 iov[iov_count].iov_base = cmd->pdu; 3577 3400 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3578 3401 iov[iov_count].iov_base = cmd->buf_ptr; ··· 3660 3501 set_cpus_allowed_ptr(p, conn->conn_cpumask); 3661 3502 } 3662 3503 3663 - static int handle_immediate_queue(struct iscsi_conn *conn) 3504 + static int 3505 + iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 3664 3506 { 3507 + int ret; 3508 + 3509 + switch (state) { 3510 + case ISTATE_SEND_R2T: 3511 + ret = iscsit_send_r2t(cmd, conn); 3512 + if (ret < 0) 3513 + goto err; 3514 + break; 3515 + case ISTATE_REMOVE: 3516 + spin_lock_bh(&conn->cmd_lock); 3517 + list_del(&cmd->i_conn_node); 3518 + spin_unlock_bh(&conn->cmd_lock); 3519 + 3520 + iscsit_free_cmd(cmd); 3521 + break; 3522 + case ISTATE_SEND_NOPIN_WANT_RESPONSE: 3523 + iscsit_mod_nopin_response_timer(conn); 3524 + ret = iscsit_send_unsolicited_nopin(cmd, conn, 1); 3525 + if (ret < 0) 3526 + goto err; 3527 + break; 3528 + case ISTATE_SEND_NOPIN_NO_RESPONSE: 3529 + ret = iscsit_send_unsolicited_nopin(cmd, conn, 0); 3530 + if (ret < 0) 3531 + goto err; 3532 + break; 3533 + default: 3534 + pr_err("Unknown Opcode: 0x%02x ITT:" 3535 + " 0x%08x, i_state: %d on CID: %hu\n", 3536 + cmd->iscsi_opcode, cmd->init_task_tag, state, 3537 + conn->cid); 3538 + goto err; 3539 + } 3540 + 3541 + return 0; 3542 + 3543 + err: 3544 + return -1; 3545 + } 3546 + 3547 + static int 3548 + iscsit_handle_immediate_queue(struct iscsi_conn *conn) 3549 + { 3550 + struct iscsit_transport *t = conn->conn_transport; 3665 3551 struct iscsi_queue_req *qr; 3666 3552 struct iscsi_cmd *cmd; 3667 3553 u8 state; ··· 3718 3514 state = qr->state; 3719 3515 kmem_cache_free(lio_qr_cache, qr); 3720 3516 3721 - switch (state) { 3722 - case ISTATE_SEND_R2T: 3723 - ret = iscsit_send_r2t(cmd, conn); 3724 - if (ret < 0) 3725 - goto err; 3726 - break; 3727 - case ISTATE_REMOVE: 3728 - if (cmd->data_direction == DMA_TO_DEVICE) 3729 - iscsit_stop_dataout_timer(cmd); 3517 + ret = t->iscsit_immediate_queue(conn, cmd, state); 3518 + if (ret < 0) 3519 + return ret; 3520 + } 3730 3521 3731 - spin_lock_bh(&conn->cmd_lock); 3732 - list_del(&cmd->i_conn_node); 3733 - spin_unlock_bh(&conn->cmd_lock); 3522 + return 0; 3523 + } 3734 3524 3735 - iscsit_free_cmd(cmd); 3736 - continue; 3737 - case ISTATE_SEND_NOPIN_WANT_RESPONSE: 3738 - iscsit_mod_nopin_response_timer(conn); 3739 - ret = iscsit_send_unsolicited_nopin(cmd, 3740 - conn, 1); 3741 - if (ret < 0) 3742 - goto err; 3525 + static int 3526 + iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 3527 + { 3528 + int ret; 3529 + 3530 + check_rsp_state: 3531 + switch (state) { 3532 + case ISTATE_SEND_DATAIN: 3533 + ret = iscsit_send_datain(cmd, conn); 3534 + if (ret < 0) 3535 + goto err; 3536 + else if (!ret) 3537 + /* more drs */ 3538 + goto check_rsp_state; 3539 + else if (ret == 1) { 3540 + /* all done */ 3541 + spin_lock_bh(&cmd->istate_lock); 3542 + cmd->i_state = ISTATE_SENT_STATUS; 3543 + spin_unlock_bh(&cmd->istate_lock); 3544 + 3545 + if (atomic_read(&conn->check_immediate_queue)) 3546 + return 1; 3547 + 3548 + return 0; 3549 + } else if (ret == 2) { 3550 + /* Still must send status, 3551 + SCF_TRANSPORT_TASK_SENSE was set */ 3552 + spin_lock_bh(&cmd->istate_lock); 3553 + cmd->i_state = ISTATE_SEND_STATUS; 3554 + spin_unlock_bh(&cmd->istate_lock); 3555 + state = ISTATE_SEND_STATUS; 3556 + goto check_rsp_state; 3557 + } 3558 + 3559 + break; 3560 + case ISTATE_SEND_STATUS: 3561 + case ISTATE_SEND_STATUS_RECOVERY: 3562 + ret = iscsit_send_response(cmd, conn); 3563 + break; 3564 + case ISTATE_SEND_LOGOUTRSP: 3565 + ret = iscsit_send_logout(cmd, conn); 3566 + break; 3567 + case ISTATE_SEND_ASYNCMSG: 3568 + ret = iscsit_send_conn_drop_async_message( 3569 + cmd, conn); 3570 + break; 3571 + case ISTATE_SEND_NOPIN: 3572 + ret = iscsit_send_nopin(cmd, conn); 3573 + break; 3574 + case ISTATE_SEND_REJECT: 3575 + ret = iscsit_send_reject(cmd, conn); 3576 + break; 3577 + case ISTATE_SEND_TASKMGTRSP: 3578 + ret = iscsit_send_task_mgt_rsp(cmd, conn); 3579 + if (ret != 0) 3743 3580 break; 3744 - case ISTATE_SEND_NOPIN_NO_RESPONSE: 3745 - ret = iscsit_send_unsolicited_nopin(cmd, 3746 - conn, 0); 3747 - if (ret < 0) 3748 - goto err; 3749 - break; 3750 - default: 3751 - pr_err("Unknown Opcode: 0x%02x ITT:" 3752 - " 0x%08x, i_state: %d on CID: %hu\n", 3753 - cmd->iscsi_opcode, cmd->init_task_tag, state, 3754 - conn->cid); 3581 + ret = iscsit_tmr_post_handler(cmd, conn); 3582 + if (ret != 0) 3583 + iscsit_fall_back_to_erl0(conn->sess); 3584 + break; 3585 + case ISTATE_SEND_TEXTRSP: 3586 + ret = iscsit_send_text_rsp(cmd, conn); 3587 + break; 3588 + default: 3589 + pr_err("Unknown Opcode: 0x%02x ITT:" 3590 + " 0x%08x, i_state: %d on CID: %hu\n", 3591 + cmd->iscsi_opcode, cmd->init_task_tag, 3592 + state, conn->cid); 3593 + goto err; 3594 + } 3595 + if (ret < 0) 3596 + goto err; 3597 + 3598 + if (iscsit_send_tx_data(cmd, conn, 1) < 0) { 3599 + iscsit_tx_thread_wait_for_tcp(conn); 3600 + iscsit_unmap_iovec(cmd); 3601 + goto err; 3602 + } 3603 + iscsit_unmap_iovec(cmd); 3604 + 3605 + switch (state) { 3606 + case ISTATE_SEND_LOGOUTRSP: 3607 + if (!iscsit_logout_post_handler(cmd, conn)) 3608 + goto restart; 3609 + /* fall through */ 3610 + case ISTATE_SEND_STATUS: 3611 + case ISTATE_SEND_ASYNCMSG: 3612 + case ISTATE_SEND_NOPIN: 3613 + case ISTATE_SEND_STATUS_RECOVERY: 3614 + case ISTATE_SEND_TEXTRSP: 3615 + case ISTATE_SEND_TASKMGTRSP: 3616 + spin_lock_bh(&cmd->istate_lock); 3617 + cmd->i_state = ISTATE_SENT_STATUS; 3618 + spin_unlock_bh(&cmd->istate_lock); 3619 + break; 3620 + case ISTATE_SEND_REJECT: 3621 + if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) { 3622 + cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN; 3623 + complete(&cmd->reject_comp); 3755 3624 goto err; 3756 3625 } 3626 + complete(&cmd->reject_comp); 3627 + break; 3628 + default: 3629 + pr_err("Unknown Opcode: 0x%02x ITT:" 3630 + " 0x%08x, i_state: %d on CID: %hu\n", 3631 + cmd->iscsi_opcode, cmd->init_task_tag, 3632 + cmd->i_state, conn->cid); 3633 + goto err; 3757 3634 } 3635 + 3636 + if (atomic_read(&conn->check_immediate_queue)) 3637 + return 1; 3758 3638 3759 3639 return 0; 3760 3640 3761 3641 err: 3762 3642 return -1; 3643 + restart: 3644 + return -EAGAIN; 3763 3645 } 3764 3646 3765 - static int handle_response_queue(struct iscsi_conn *conn) 3647 + static int iscsit_handle_response_queue(struct iscsi_conn *conn) 3766 3648 { 3649 + struct iscsit_transport *t = conn->conn_transport; 3767 3650 struct iscsi_queue_req *qr; 3768 3651 struct iscsi_cmd *cmd; 3769 3652 u8 state; ··· 3861 3570 state = qr->state; 3862 3571 kmem_cache_free(lio_qr_cache, qr); 3863 3572 3864 - check_rsp_state: 3865 - switch (state) { 3866 - case ISTATE_SEND_DATAIN: 3867 - ret = iscsit_send_data_in(cmd, conn); 3868 - if (ret < 0) 3869 - goto err; 3870 - else if (!ret) 3871 - /* more drs */ 3872 - goto check_rsp_state; 3873 - else if (ret == 1) { 3874 - /* all done */ 3875 - spin_lock_bh(&cmd->istate_lock); 3876 - cmd->i_state = ISTATE_SENT_STATUS; 3877 - spin_unlock_bh(&cmd->istate_lock); 3878 - 3879 - if (atomic_read(&conn->check_immediate_queue)) 3880 - return 1; 3881 - 3882 - continue; 3883 - } else if (ret == 2) { 3884 - /* Still must send status, 3885 - SCF_TRANSPORT_TASK_SENSE was set */ 3886 - spin_lock_bh(&cmd->istate_lock); 3887 - cmd->i_state = ISTATE_SEND_STATUS; 3888 - spin_unlock_bh(&cmd->istate_lock); 3889 - state = ISTATE_SEND_STATUS; 3890 - goto check_rsp_state; 3891 - } 3892 - 3893 - break; 3894 - case ISTATE_SEND_STATUS: 3895 - case ISTATE_SEND_STATUS_RECOVERY: 3896 - ret = iscsit_send_status(cmd, conn); 3897 - break; 3898 - case ISTATE_SEND_LOGOUTRSP: 3899 - ret = iscsit_send_logout_response(cmd, conn); 3900 - break; 3901 - case ISTATE_SEND_ASYNCMSG: 3902 - ret = iscsit_send_conn_drop_async_message( 3903 - cmd, conn); 3904 - break; 3905 - case ISTATE_SEND_NOPIN: 3906 - ret = iscsit_send_nopin_response(cmd, conn); 3907 - break; 3908 - case ISTATE_SEND_REJECT: 3909 - ret = iscsit_send_reject(cmd, conn); 3910 - break; 3911 - case ISTATE_SEND_TASKMGTRSP: 3912 - ret = iscsit_send_task_mgt_rsp(cmd, conn); 3913 - if (ret != 0) 3914 - break; 3915 - ret = iscsit_tmr_post_handler(cmd, conn); 3916 - if (ret != 0) 3917 - iscsit_fall_back_to_erl0(conn->sess); 3918 - break; 3919 - case ISTATE_SEND_TEXTRSP: 3920 - ret = iscsit_send_text_rsp(cmd, conn); 3921 - break; 3922 - default: 3923 - pr_err("Unknown Opcode: 0x%02x ITT:" 3924 - " 0x%08x, i_state: %d on CID: %hu\n", 3925 - cmd->iscsi_opcode, cmd->init_task_tag, 3926 - state, conn->cid); 3927 - goto err; 3928 - } 3929 - if (ret < 0) 3930 - goto err; 3931 - 3932 - if (iscsit_send_tx_data(cmd, conn, 1) < 0) { 3933 - iscsit_tx_thread_wait_for_tcp(conn); 3934 - iscsit_unmap_iovec(cmd); 3935 - goto err; 3936 - } 3937 - iscsit_unmap_iovec(cmd); 3938 - 3939 - switch (state) { 3940 - case ISTATE_SEND_LOGOUTRSP: 3941 - if (!iscsit_logout_post_handler(cmd, conn)) 3942 - goto restart; 3943 - /* fall through */ 3944 - case ISTATE_SEND_STATUS: 3945 - case ISTATE_SEND_ASYNCMSG: 3946 - case ISTATE_SEND_NOPIN: 3947 - case ISTATE_SEND_STATUS_RECOVERY: 3948 - case ISTATE_SEND_TEXTRSP: 3949 - case ISTATE_SEND_TASKMGTRSP: 3950 - spin_lock_bh(&cmd->istate_lock); 3951 - cmd->i_state = ISTATE_SENT_STATUS; 3952 - spin_unlock_bh(&cmd->istate_lock); 3953 - break; 3954 - case ISTATE_SEND_REJECT: 3955 - if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) { 3956 - cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN; 3957 - complete(&cmd->reject_comp); 3958 - goto err; 3959 - } 3960 - complete(&cmd->reject_comp); 3961 - break; 3962 - default: 3963 - pr_err("Unknown Opcode: 0x%02x ITT:" 3964 - " 0x%08x, i_state: %d on CID: %hu\n", 3965 - cmd->iscsi_opcode, cmd->init_task_tag, 3966 - cmd->i_state, conn->cid); 3967 - goto err; 3968 - } 3969 - 3970 - if (atomic_read(&conn->check_immediate_queue)) 3971 - return 1; 3573 + ret = t->iscsit_response_queue(conn, cmd, state); 3574 + if (ret == 1 || ret < 0) 3575 + return ret; 3972 3576 } 3973 3577 3974 3578 return 0; 3975 - 3976 - err: 3977 - return -1; 3978 - restart: 3979 - return -EAGAIN; 3980 3579 } 3981 3580 3982 3581 int iscsi_target_tx_thread(void *arg) ··· 3903 3722 goto transport_err; 3904 3723 3905 3724 get_immediate: 3906 - ret = handle_immediate_queue(conn); 3725 + ret = iscsit_handle_immediate_queue(conn); 3907 3726 if (ret < 0) 3908 3727 goto transport_err; 3909 3728 3910 - ret = handle_response_queue(conn); 3729 + ret = iscsit_handle_response_queue(conn); 3911 3730 if (ret == 1) 3912 3731 goto get_immediate; 3913 3732 else if (ret == -EAGAIN) ··· 3921 3740 goto restart; 3922 3741 out: 3923 3742 return 0; 3743 + } 3744 + 3745 + static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf) 3746 + { 3747 + struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf; 3748 + struct iscsi_cmd *cmd; 3749 + int ret = 0; 3750 + 3751 + switch (hdr->opcode & ISCSI_OPCODE_MASK) { 3752 + case ISCSI_OP_SCSI_CMD: 3753 + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 3754 + if (!cmd) 3755 + return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 3756 + 1, buf, conn); 3757 + 3758 + ret = iscsit_handle_scsi_cmd(conn, cmd, buf); 3759 + break; 3760 + case ISCSI_OP_SCSI_DATA_OUT: 3761 + ret = iscsit_handle_data_out(conn, buf); 3762 + break; 3763 + case ISCSI_OP_NOOP_OUT: 3764 + cmd = NULL; 3765 + if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { 3766 + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 3767 + if (!cmd) 3768 + return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 3769 + 1, buf, conn); 3770 + } 3771 + ret = iscsit_handle_nop_out(conn, cmd, buf); 3772 + break; 3773 + case ISCSI_OP_SCSI_TMFUNC: 3774 + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 3775 + if (!cmd) 3776 + return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 3777 + 1, buf, conn); 3778 + 3779 + ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf); 3780 + break; 3781 + case ISCSI_OP_TEXT: 3782 + ret = iscsit_handle_text_cmd(conn, buf); 3783 + break; 3784 + case ISCSI_OP_LOGOUT: 3785 + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 3786 + if (!cmd) 3787 + return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 3788 + 1, buf, conn); 3789 + 3790 + ret = iscsit_handle_logout_cmd(conn, cmd, buf); 3791 + if (ret > 0) 3792 + wait_for_completion_timeout(&conn->conn_logout_comp, 3793 + SECONDS_FOR_LOGOUT_COMP * HZ); 3794 + break; 3795 + case ISCSI_OP_SNACK: 3796 + ret = iscsit_handle_snack(conn, buf); 3797 + break; 3798 + default: 3799 + pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode); 3800 + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 3801 + pr_err("Cannot recover from unknown" 3802 + " opcode while ERL=0, closing iSCSI connection.\n"); 3803 + return -1; 3804 + } 3805 + if (!conn->conn_ops->OFMarker) { 3806 + pr_err("Unable to recover from unknown" 3807 + " opcode while OFMarker=No, closing iSCSI" 3808 + " connection.\n"); 3809 + return -1; 3810 + } 3811 + if (iscsit_recover_from_unknown_opcode(conn) < 0) { 3812 + pr_err("Unable to recover from unknown" 3813 + " opcode, closing iSCSI connection.\n"); 3814 + return -1; 3815 + } 3816 + break; 3817 + } 3818 + 3819 + return ret; 3924 3820 } 3925 3821 3926 3822 int iscsi_target_rx_thread(void *arg) ··· 4018 3760 conn = iscsi_rx_thread_pre_handler(ts); 4019 3761 if (!conn) 4020 3762 goto out; 3763 + 3764 + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { 3765 + struct completion comp; 3766 + int rc; 3767 + 3768 + init_completion(&comp); 3769 + rc = wait_for_completion_interruptible(&comp); 3770 + if (rc < 0) 3771 + goto transport_err; 3772 + 3773 + goto out; 3774 + } 4021 3775 4022 3776 while (!kthread_should_stop()) { 4023 3777 /* ··· 4102 3832 goto transport_err; 4103 3833 } 4104 3834 4105 - switch (opcode) { 4106 - case ISCSI_OP_SCSI_CMD: 4107 - if (iscsit_handle_scsi_cmd(conn, buffer) < 0) 4108 - goto transport_err; 4109 - break; 4110 - case ISCSI_OP_SCSI_DATA_OUT: 4111 - if (iscsit_handle_data_out(conn, buffer) < 0) 4112 - goto transport_err; 4113 - break; 4114 - case ISCSI_OP_NOOP_OUT: 4115 - if (iscsit_handle_nop_out(conn, buffer) < 0) 4116 - goto transport_err; 4117 - break; 4118 - case ISCSI_OP_SCSI_TMFUNC: 4119 - if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0) 4120 - goto transport_err; 4121 - break; 4122 - case ISCSI_OP_TEXT: 4123 - if (iscsit_handle_text_cmd(conn, buffer) < 0) 4124 - goto transport_err; 4125 - break; 4126 - case ISCSI_OP_LOGOUT: 4127 - ret = iscsit_handle_logout_cmd(conn, buffer); 4128 - if (ret > 0) { 4129 - wait_for_completion_timeout(&conn->conn_logout_comp, 4130 - SECONDS_FOR_LOGOUT_COMP * HZ); 4131 - goto transport_err; 4132 - } else if (ret < 0) 4133 - goto transport_err; 4134 - break; 4135 - case ISCSI_OP_SNACK: 4136 - if (iscsit_handle_snack(conn, buffer) < 0) 4137 - goto transport_err; 4138 - break; 4139 - default: 4140 - pr_err("Got unknown iSCSI OpCode: 0x%02x\n", 4141 - opcode); 4142 - if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 4143 - pr_err("Cannot recover from unknown" 4144 - " opcode while ERL=0, closing iSCSI connection" 4145 - ".\n"); 4146 - goto transport_err; 4147 - } 4148 - if (!conn->conn_ops->OFMarker) { 4149 - pr_err("Unable to recover from unknown" 4150 - " opcode while OFMarker=No, closing iSCSI" 4151 - " connection.\n"); 4152 - goto transport_err; 4153 - } 4154 - if (iscsit_recover_from_unknown_opcode(conn) < 0) { 4155 - pr_err("Unable to recover from unknown" 4156 - " opcode, closing iSCSI connection.\n"); 4157 - goto transport_err; 4158 - } 4159 - break; 4160 - } 3835 + ret = iscsi_target_rx_opcode(conn, buffer); 3836 + if (ret < 0) 3837 + goto transport_err; 4161 3838 } 4162 3839 4163 3840 transport_err: ··· 4270 4053 4271 4054 if (conn->sock) 4272 4055 sock_release(conn->sock); 4056 + 4057 + if (conn->conn_transport->iscsit_free_conn) 4058 + conn->conn_transport->iscsit_free_conn(conn); 4059 + 4060 + iscsit_put_transport(conn->conn_transport); 4061 + 4273 4062 conn->thread_set = NULL; 4274 4063 4275 4064 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); ··· 4507 4284 /* 4508 4285 * Return of 0 causes the TX thread to restart. 4509 4286 */ 4510 - static int iscsit_logout_post_handler( 4287 + int iscsit_logout_post_handler( 4511 4288 struct iscsi_cmd *cmd, 4512 4289 struct iscsi_conn *conn) 4513 4290 { ··· 4565 4342 } 4566 4343 return ret; 4567 4344 } 4345 + EXPORT_SYMBOL(iscsit_logout_post_handler); 4568 4346 4569 4347 void iscsit_fail_session(struct iscsi_session *sess) 4570 4348 {
+2 -1
drivers/target/iscsi/iscsi_target.h
··· 16 16 struct iscsi_portal_group *); 17 17 extern int iscsit_del_np(struct iscsi_np *); 18 18 extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *); 19 + extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *); 19 20 extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *); 20 21 extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *); 21 22 extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *); 22 23 extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8); 23 - extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, bool recovery); 24 + extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *, bool recovery); 24 25 extern void iscsit_thread_get_cpumask(struct iscsi_conn *); 25 26 extern int iscsi_target_tx_thread(void *); 26 27 extern int iscsi_target_rx_thread(void *);
+1 -27
drivers/target/iscsi/iscsi_target_auth.c
··· 49 49 } 50 50 } 51 51 52 - static void chap_set_random(char *data, int length) 53 - { 54 - long r; 55 - unsigned n; 56 - 57 - while (length > 0) { 58 - get_random_bytes(&r, sizeof(long)); 59 - r = r ^ (r >> 8); 60 - r = r ^ (r >> 4); 61 - n = r & 0x7; 62 - 63 - get_random_bytes(&r, sizeof(long)); 64 - r = r ^ (r >> 8); 65 - r = r ^ (r >> 5); 66 - n = (n << 3) | (r & 0x7); 67 - 68 - get_random_bytes(&r, sizeof(long)); 69 - r = r ^ (r >> 8); 70 - r = r ^ (r >> 5); 71 - n = (n << 2) | (r & 0x3); 72 - 73 - *data++ = n; 74 - length--; 75 - } 76 - } 77 - 78 52 static void chap_gen_challenge( 79 53 struct iscsi_conn *conn, 80 54 int caller, ··· 60 86 61 87 memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1); 62 88 63 - chap_set_random(chap->challenge, CHAP_CHALLENGE_LENGTH); 89 + get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH); 64 90 chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge, 65 91 CHAP_CHALLENGE_LENGTH); 66 92 /*
+94 -4
drivers/target/iscsi/iscsi_target_configfs.c
··· 27 27 #include <target/target_core_fabric_configfs.h> 28 28 #include <target/target_core_configfs.h> 29 29 #include <target/configfs_macros.h> 30 + #include <target/iscsi/iscsi_transport.h> 30 31 31 32 #include "iscsi_target_core.h" 32 33 #include "iscsi_target_parameters.h" ··· 125 124 126 125 TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR); 127 126 127 + static ssize_t lio_target_np_show_iser( 128 + struct se_tpg_np *se_tpg_np, 129 + char *page) 130 + { 131 + struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np, 132 + struct iscsi_tpg_np, se_tpg_np); 133 + struct iscsi_tpg_np *tpg_np_iser; 134 + ssize_t rb; 135 + 136 + tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); 137 + if (tpg_np_iser) 138 + rb = sprintf(page, "1\n"); 139 + else 140 + rb = sprintf(page, "0\n"); 141 + 142 + return rb; 143 + } 144 + 145 + static ssize_t lio_target_np_store_iser( 146 + struct se_tpg_np *se_tpg_np, 147 + const char *page, 148 + size_t count) 149 + { 150 + struct iscsi_np *np; 151 + struct iscsi_portal_group *tpg; 152 + struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np, 153 + struct iscsi_tpg_np, se_tpg_np); 154 + struct iscsi_tpg_np *tpg_np_iser = NULL; 155 + char *endptr; 156 + u32 op; 157 + int rc; 158 + 159 + op = simple_strtoul(page, &endptr, 0); 160 + if ((op != 1) && (op != 0)) { 161 + pr_err("Illegal value for tpg_enable: %u\n", op); 162 + return -EINVAL; 163 + } 164 + np = tpg_np->tpg_np; 165 + if (!np) { 166 + pr_err("Unable to locate struct iscsi_np from" 167 + " struct iscsi_tpg_np\n"); 168 + return -EINVAL; 169 + } 170 + 171 + tpg = tpg_np->tpg; 172 + if (iscsit_get_tpg(tpg) < 0) 173 + return -EINVAL; 174 + 175 + if (op) { 176 + int rc = request_module("ib_isert"); 177 + if (rc != 0) 178 + pr_warn("Unable to request_module for ib_isert\n"); 179 + 180 + tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, 181 + np->np_ip, tpg_np, ISCSI_INFINIBAND); 182 + if (!tpg_np_iser || IS_ERR(tpg_np_iser)) 183 + goto out; 184 + } else { 185 + tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); 186 + if (!tpg_np_iser) 187 + goto out; 188 + 189 + rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); 190 + if (rc < 0) 191 + goto out; 192 + } 193 + 194 + printk("lio_target_np_store_iser() done, op: %d\n", op); 195 + 196 + iscsit_put_tpg(tpg); 197 + return count; 198 + out: 199 + iscsit_put_tpg(tpg); 200 + return -EINVAL; 201 + } 202 + 203 + TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR); 204 + 128 205 static struct configfs_attribute *lio_target_portal_attrs[] = { 129 206 &lio_target_np_sctp.attr, 207 + &lio_target_np_iser.attr, 130 208 NULL, 131 209 }; 132 210 ··· 1616 1536 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1617 1537 1618 1538 cmd->i_state = ISTATE_SEND_DATAIN; 1619 - iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 1539 + cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd); 1540 + 1620 1541 return 0; 1621 1542 } 1622 1543 1623 1544 static int lio_write_pending(struct se_cmd *se_cmd) 1624 1545 { 1625 1546 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1547 + struct iscsi_conn *conn = cmd->conn; 1626 1548 1627 1549 if (!cmd->immediate_data && !cmd->unsolicited_data) 1628 - return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false); 1550 + return conn->conn_transport->iscsit_get_dataout(conn, cmd, false); 1629 1551 1630 1552 return 0; 1631 1553 } ··· 1649 1567 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1650 1568 1651 1569 cmd->i_state = ISTATE_SEND_STATUS; 1652 - iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 1570 + cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd); 1571 + 1653 1572 return 0; 1654 1573 } 1655 1574 ··· 1779 1696 iscsit_set_default_node_attribues(acl); 1780 1697 } 1781 1698 1699 + static int lio_check_stop_free(struct se_cmd *se_cmd) 1700 + { 1701 + return target_put_sess_cmd(se_cmd->se_sess, se_cmd); 1702 + } 1703 + 1782 1704 static void lio_release_cmd(struct se_cmd *se_cmd) 1783 1705 { 1784 1706 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1785 1707 1786 - iscsit_release_cmd(cmd); 1708 + pr_debug("Entering lio_release_cmd for se_cmd: %p\n", se_cmd); 1709 + cmd->release_cmd(cmd); 1787 1710 } 1788 1711 1789 1712 /* End functions for target_core_fabric_ops */ ··· 1829 1740 fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl; 1830 1741 fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl; 1831 1742 fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index; 1743 + fabric->tf_ops.check_stop_free = &lio_check_stop_free, 1832 1744 fabric->tf_ops.release_cmd = &lio_release_cmd; 1833 1745 fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session; 1834 1746 fabric->tf_ops.close_session = &lio_tpg_close_session;
+23 -3
drivers/target/iscsi/iscsi_target_core.h
··· 60 60 61 61 #define ISCSI_IOV_DATA_BUFFER 5 62 62 63 - enum tpg_np_network_transport_table { 63 + enum iscsit_transport_type { 64 64 ISCSI_TCP = 0, 65 65 ISCSI_SCTP_TCP = 1, 66 66 ISCSI_SCTP_UDP = 2, ··· 244 244 u8 IFMarker; /* [0,1] == [No,Yes] */ 245 245 u32 OFMarkInt; /* [1..65535] */ 246 246 u32 IFMarkInt; /* [1..65535] */ 247 + /* 248 + * iSER specific connection parameters 249 + */ 250 + u32 InitiatorRecvDataSegmentLength; /* [512..2**24-1] */ 251 + u32 TargetRecvDataSegmentLength; /* [512..2**24-1] */ 247 252 }; 248 253 249 254 struct iscsi_sess_ops { ··· 270 265 u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */ 271 266 u8 ErrorRecoveryLevel; /* [0..2] */ 272 267 u8 SessionType; /* [0,1] == [Normal,Discovery]*/ 268 + /* 269 + * iSER specific session parameters 270 + */ 271 + u8 RDMAExtensions; /* [0,1] == [No,Yes] */ 273 272 }; 274 273 275 274 struct iscsi_queue_req { ··· 293 284 }; 294 285 295 286 struct iscsi_param_list { 287 + bool iser; 296 288 struct list_head param_list; 297 289 struct list_head extra_response_list; 298 290 }; ··· 485 475 u32 first_data_sg_off; 486 476 u32 kmapped_nents; 487 477 sense_reason_t sense_reason; 478 + void (*release_cmd)(struct iscsi_cmd *); 488 479 } ____cacheline_aligned; 489 480 490 481 struct iscsi_tmr_req { ··· 514 503 u16 login_port; 515 504 u16 local_port; 516 505 int net_size; 506 + int login_family; 517 507 u32 auth_id; 518 508 u32 conn_flags; 519 509 /* Used for iscsi_tx_login_rsp() */ ··· 574 562 struct list_head immed_queue_list; 575 563 struct list_head response_queue_list; 576 564 struct iscsi_conn_ops *conn_ops; 565 + struct iscsi_login *conn_login; 566 + struct iscsit_transport *conn_transport; 577 567 struct iscsi_param_list *param_list; 578 568 /* Used for per connection auth state machine */ 579 569 void *auth_protocol; 570 + void *context; 580 571 struct iscsi_login_thread_s *login_thread; 581 572 struct iscsi_portal_group *tpg; 582 573 /* Pointer to parent session */ ··· 678 663 u8 first_request; 679 664 u8 version_min; 680 665 u8 version_max; 666 + u8 login_complete; 667 + u8 login_failed; 681 668 char isid[6]; 682 669 u32 cmd_sn; 683 670 itt_t init_task_tag; ··· 687 670 u32 rsp_length; 688 671 u16 cid; 689 672 u16 tsih; 690 - char *req; 691 - char *rsp; 673 + char req[ISCSI_HDR_LEN]; 674 + char rsp[ISCSI_HDR_LEN]; 692 675 char *req_buf; 693 676 char *rsp_buf; 677 + struct iscsi_conn *conn; 694 678 } ____cacheline_aligned; 695 679 696 680 struct iscsi_node_attrib { ··· 772 754 struct task_struct *np_thread; 773 755 struct timer_list np_login_timer; 774 756 struct iscsi_portal_group *np_login_tpg; 757 + void *np_context; 758 + struct iscsit_transport *np_transport; 775 759 struct list_head np_list; 776 760 } ____cacheline_aligned; 777 761
+6 -1
drivers/target/iscsi/iscsi_target_device.c
··· 60 60 61 61 cmd->maxcmdsn_inc = 1; 62 62 63 - mutex_lock(&sess->cmdsn_mutex); 63 + if (!mutex_trylock(&sess->cmdsn_mutex)) { 64 + sess->max_cmd_sn += 1; 65 + pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn); 66 + return; 67 + } 64 68 sess->max_cmd_sn += 1; 65 69 pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn); 66 70 mutex_unlock(&sess->cmdsn_mutex); 67 71 } 72 + EXPORT_SYMBOL(iscsit_increment_maxcmdsn);
+8 -5
drivers/target/iscsi/iscsi_target_erl1.c
··· 22 22 #include <scsi/iscsi_proto.h> 23 23 #include <target/target_core_base.h> 24 24 #include <target/target_core_fabric.h> 25 + #include <target/iscsi/iscsi_transport.h> 25 26 26 27 #include "iscsi_target_core.h" 27 28 #include "iscsi_target_seq_pdu_list.h" ··· 53 52 int ret = DATAOUT_WITHIN_COMMAND_RECOVERY, rx_got; 54 53 u32 length, padding, offset = 0, size; 55 54 struct kvec iov; 55 + 56 + if (conn->sess->sess_ops->RDMAExtensions) 57 + return 0; 56 58 57 59 length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len; 58 60 ··· 923 919 int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) 924 920 { 925 921 struct se_cmd *se_cmd = &cmd->se_cmd; 922 + struct iscsi_conn *conn = cmd->conn; 926 923 int lr = 0; 927 924 928 925 spin_lock_bh(&cmd->istate_lock); ··· 986 981 return 0; 987 982 988 983 iscsit_set_dataout_sequence_values(cmd); 989 - iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false); 984 + conn->conn_transport->iscsit_get_dataout(conn, cmd, false); 990 985 } 991 986 return 0; 992 987 } ··· 1004 999 if (transport_check_aborted_status(se_cmd, 1) != 0) 1005 1000 return 0; 1006 1001 1007 - iscsit_set_dataout_sequence_values(cmd); 1008 - spin_lock_bh(&cmd->dataout_timeout_lock); 1009 - iscsit_start_dataout_timer(cmd, cmd->conn); 1010 - spin_unlock_bh(&cmd->dataout_timeout_lock); 1002 + iscsit_set_unsoliticed_dataout(cmd); 1011 1003 } 1012 1004 return transport_handle_cdb_direct(&cmd->se_cmd); 1013 1005 ··· 1292 1290 cmd->init_task_tag); 1293 1291 spin_unlock_bh(&cmd->dataout_timeout_lock); 1294 1292 } 1293 + EXPORT_SYMBOL(iscsit_stop_dataout_timer);
+344 -128
drivers/target/iscsi/iscsi_target_login.c
··· 39 39 #include "iscsi_target.h" 40 40 #include "iscsi_target_parameters.h" 41 41 42 - static int iscsi_login_init_conn(struct iscsi_conn *conn) 42 + #include <target/iscsi/iscsi_transport.h> 43 + 44 + static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn) 43 45 { 46 + struct iscsi_login *login; 47 + 48 + login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL); 49 + if (!login) { 50 + pr_err("Unable to allocate memory for struct iscsi_login.\n"); 51 + return NULL; 52 + } 53 + login->conn = conn; 54 + login->first_request = 1; 55 + 56 + login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); 57 + if (!login->req_buf) { 58 + pr_err("Unable to allocate memory for response buffer.\n"); 59 + goto out_login; 60 + } 61 + 62 + login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); 63 + if (!login->rsp_buf) { 64 + pr_err("Unable to allocate memory for request buffer.\n"); 65 + goto out_req_buf; 66 + } 67 + 68 + conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL); 69 + if (!conn->conn_ops) { 70 + pr_err("Unable to allocate memory for" 71 + " struct iscsi_conn_ops.\n"); 72 + goto out_rsp_buf; 73 + } 74 + 44 75 init_waitqueue_head(&conn->queues_wq); 45 76 INIT_LIST_HEAD(&conn->conn_list); 46 77 INIT_LIST_HEAD(&conn->conn_cmd_list); ··· 93 62 94 63 if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) { 95 64 pr_err("Unable to allocate conn->conn_cpumask\n"); 96 - return -ENOMEM; 65 + goto out_conn_ops; 97 66 } 67 + conn->conn_login = login; 98 68 99 - return 0; 69 + return login; 70 + 71 + out_conn_ops: 72 + kfree(conn->conn_ops); 73 + out_rsp_buf: 74 + kfree(login->rsp_buf); 75 + out_req_buf: 76 + kfree(login->req_buf); 77 + out_login: 78 + kfree(login); 79 + return NULL; 100 80 } 101 81 102 82 /* ··· 340 298 struct iscsi_node_attrib *na; 341 299 struct iscsi_session *sess = conn->sess; 342 300 unsigned char buf[32]; 301 + bool iser = false; 343 302 344 303 sess->tpg = conn->tpg; 345 304 ··· 362 319 return -1; 363 320 } 364 321 365 - iscsi_set_keys_to_negotiate(0, conn->param_list); 322 + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) 323 + iser = true; 324 + 325 + iscsi_set_keys_to_negotiate(conn->param_list, iser); 366 326 367 327 if (sess->sess_ops->SessionType) 368 328 return iscsi_set_keys_irrelevant_for_discovery( ··· 403 357 404 358 if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0) 405 359 return -1; 360 + /* 361 + * Set RDMAExtensions=Yes by default for iSER enabled network portals 362 + */ 363 + if (iser) { 364 + struct iscsi_param *param; 365 + unsigned long mrdsl, off; 366 + int rc; 367 + 368 + sprintf(buf, "RDMAExtensions=Yes"); 369 + if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { 370 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 371 + ISCSI_LOGIN_STATUS_NO_RESOURCES); 372 + return -1; 373 + } 374 + /* 375 + * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for 376 + * Immediate Data + Unsolicitied Data-OUT if necessary.. 377 + */ 378 + param = iscsi_find_param_from_key("MaxRecvDataSegmentLength", 379 + conn->param_list); 380 + if (!param) { 381 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 382 + ISCSI_LOGIN_STATUS_NO_RESOURCES); 383 + return -1; 384 + } 385 + rc = strict_strtoul(param->value, 0, &mrdsl); 386 + if (rc < 0) { 387 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 388 + ISCSI_LOGIN_STATUS_NO_RESOURCES); 389 + return -1; 390 + } 391 + off = mrdsl % PAGE_SIZE; 392 + if (!off) 393 + return 0; 394 + 395 + if (mrdsl < PAGE_SIZE) 396 + mrdsl = PAGE_SIZE; 397 + else 398 + mrdsl -= off; 399 + 400 + pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down" 401 + " to PAGE_SIZE\n", mrdsl); 402 + 403 + sprintf(buf, "MaxRecvDataSegmentLength=%lu\n", mrdsl); 404 + if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { 405 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 406 + ISCSI_LOGIN_STATUS_NO_RESOURCES); 407 + return -1; 408 + } 409 + } 406 410 407 411 return 0; 408 412 } ··· 532 436 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 533 437 struct se_session *se_sess, *se_sess_tmp; 534 438 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; 439 + bool iser = false; 535 440 536 441 spin_lock_bh(&se_tpg->session_lock); 537 442 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, ··· 582 485 return -1; 583 486 } 584 487 585 - iscsi_set_keys_to_negotiate(0, conn->param_list); 488 + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) 489 + iser = true; 490 + 491 + iscsi_set_keys_to_negotiate(conn->param_list, iser); 586 492 /* 587 493 * Need to send TargetPortalGroupTag back in first login response 588 494 * on any iSCSI connection where the Initiator provides TargetName. ··· 674 574 static void iscsi_post_login_start_timers(struct iscsi_conn *conn) 675 575 { 676 576 struct iscsi_session *sess = conn->sess; 577 + /* 578 + * FIXME: Unsolicitied NopIN support for ISER 579 + */ 580 + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) 581 + return; 677 582 678 583 if (!sess->sess_ops->SessionType) 679 584 iscsit_start_nopin_timer(conn); ··· 737 632 spin_unlock_bh(&sess->conn_lock); 738 633 739 634 iscsi_post_login_start_timers(conn); 635 + 740 636 iscsi_activate_thread_set(conn, ts); 741 637 /* 742 638 * Determine CPU mask to ensure connection's RX and TX kthreads ··· 867 761 spin_unlock_bh(&np->np_thread_lock); 868 762 } 869 763 870 - int iscsi_target_setup_login_socket( 764 + int iscsit_setup_np( 871 765 struct iscsi_np *np, 872 766 struct __kernel_sockaddr_storage *sockaddr) 873 767 { 874 - struct socket *sock; 768 + struct socket *sock = NULL; 875 769 int backlog = 5, ret, opt = 0, len; 876 770 877 771 switch (np->np_network_transport) { ··· 887 781 np->np_ip_proto = IPPROTO_SCTP; 888 782 np->np_sock_type = SOCK_SEQPACKET; 889 783 break; 890 - case ISCSI_IWARP_TCP: 891 - case ISCSI_IWARP_SCTP: 892 - case ISCSI_INFINIBAND: 893 784 default: 894 785 pr_err("Unsupported network_transport: %d\n", 895 786 np->np_network_transport); 896 787 return -EINVAL; 897 788 } 789 + 790 + np->np_ip_proto = IPPROTO_TCP; 791 + np->np_sock_type = SOCK_STREAM; 898 792 899 793 ret = sock_create(sockaddr->ss_family, np->np_sock_type, 900 794 np->np_ip_proto, &sock); ··· 959 853 } 960 854 961 855 return 0; 962 - 963 856 fail: 964 857 np->np_socket = NULL; 965 858 if (sock) ··· 966 861 return ret; 967 862 } 968 863 864 + int iscsi_target_setup_login_socket( 865 + struct iscsi_np *np, 866 + struct __kernel_sockaddr_storage *sockaddr) 867 + { 868 + struct iscsit_transport *t; 869 + int rc; 870 + 871 + t = iscsit_get_transport(np->np_network_transport); 872 + if (!t) 873 + return -EINVAL; 874 + 875 + rc = t->iscsit_setup_np(np, sockaddr); 876 + if (rc < 0) { 877 + iscsit_put_transport(t); 878 + return rc; 879 + } 880 + 881 + np->np_transport = t; 882 + printk("Set np->np_transport to %p -> %s\n", np->np_transport, 883 + np->np_transport->name); 884 + return 0; 885 + } 886 + 887 + int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) 888 + { 889 + struct socket *new_sock, *sock = np->np_socket; 890 + struct sockaddr_in sock_in; 891 + struct sockaddr_in6 sock_in6; 892 + int rc, err; 893 + 894 + rc = kernel_accept(sock, &new_sock, 0); 895 + if (rc < 0) 896 + return rc; 897 + 898 + conn->sock = new_sock; 899 + conn->login_family = np->np_sockaddr.ss_family; 900 + printk("iSCSI/TCP: Setup conn->sock from new_sock: %p\n", new_sock); 901 + 902 + if (np->np_sockaddr.ss_family == AF_INET6) { 903 + memset(&sock_in6, 0, sizeof(struct sockaddr_in6)); 904 + 905 + rc = conn->sock->ops->getname(conn->sock, 906 + (struct sockaddr *)&sock_in6, &err, 1); 907 + if (!rc) { 908 + snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", 909 + &sock_in6.sin6_addr.in6_u); 910 + conn->login_port = ntohs(sock_in6.sin6_port); 911 + } 912 + 913 + rc = conn->sock->ops->getname(conn->sock, 914 + (struct sockaddr *)&sock_in6, &err, 0); 915 + if (!rc) { 916 + snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c", 917 + &sock_in6.sin6_addr.in6_u); 918 + conn->local_port = ntohs(sock_in6.sin6_port); 919 + } 920 + } else { 921 + memset(&sock_in, 0, sizeof(struct sockaddr_in)); 922 + 923 + rc = conn->sock->ops->getname(conn->sock, 924 + (struct sockaddr *)&sock_in, &err, 1); 925 + if (!rc) { 926 + sprintf(conn->login_ip, "%pI4", 927 + &sock_in.sin_addr.s_addr); 928 + conn->login_port = ntohs(sock_in.sin_port); 929 + } 930 + 931 + rc = conn->sock->ops->getname(conn->sock, 932 + (struct sockaddr *)&sock_in, &err, 0); 933 + if (!rc) { 934 + sprintf(conn->local_ip, "%pI4", 935 + &sock_in.sin_addr.s_addr); 936 + conn->local_port = ntohs(sock_in.sin_port); 937 + } 938 + } 939 + 940 + return 0; 941 + } 942 + 943 + int iscsit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) 944 + { 945 + struct iscsi_login_req *login_req; 946 + u32 padding = 0, payload_length; 947 + 948 + if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0) 949 + return -1; 950 + 951 + login_req = (struct iscsi_login_req *)login->req; 952 + payload_length = ntoh24(login_req->dlength); 953 + padding = ((-payload_length) & 3); 954 + 955 + pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x," 956 + " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n", 957 + login_req->flags, login_req->itt, login_req->cmdsn, 958 + login_req->exp_statsn, login_req->cid, payload_length); 959 + /* 960 + * Setup the initial iscsi_login values from the leading 961 + * login request PDU. 962 + */ 963 + if (login->first_request) { 964 + login_req = (struct iscsi_login_req *)login->req; 965 + login->leading_connection = (!login_req->tsih) ? 1 : 0; 966 + login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(login_req->flags); 967 + login->version_min = login_req->min_version; 968 + login->version_max = login_req->max_version; 969 + memcpy(login->isid, login_req->isid, 6); 970 + login->cmd_sn = be32_to_cpu(login_req->cmdsn); 971 + login->init_task_tag = login_req->itt; 972 + login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); 973 + login->cid = be16_to_cpu(login_req->cid); 974 + login->tsih = be16_to_cpu(login_req->tsih); 975 + } 976 + 977 + if (iscsi_target_check_login_request(conn, login) < 0) 978 + return -1; 979 + 980 + memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS); 981 + if (iscsi_login_rx_data(conn, login->req_buf, 982 + payload_length + padding) < 0) 983 + return -1; 984 + 985 + return 0; 986 + } 987 + 988 + int iscsit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, 989 + u32 length) 990 + { 991 + if (iscsi_login_tx_data(conn, login->rsp, login->rsp_buf, length) < 0) 992 + return -1; 993 + 994 + return 0; 995 + } 996 + 997 + static int 998 + iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t) 999 + { 1000 + int rc; 1001 + 1002 + if (!t->owner) { 1003 + conn->conn_transport = t; 1004 + return 0; 1005 + } 1006 + 1007 + rc = try_module_get(t->owner); 1008 + if (!rc) { 1009 + pr_err("try_module_get() failed for %s\n", t->name); 1010 + return -EINVAL; 1011 + } 1012 + 1013 + conn->conn_transport = t; 1014 + return 0; 1015 + } 1016 + 969 1017 static int __iscsi_target_login_thread(struct iscsi_np *np) 970 1018 { 971 - u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0; 972 - int err, ret = 0, stop; 1019 + u8 *buffer, zero_tsih = 0; 1020 + int ret = 0, rc, stop; 973 1021 struct iscsi_conn *conn = NULL; 974 1022 struct iscsi_login *login; 975 1023 struct iscsi_portal_group *tpg = NULL; 976 - struct socket *new_sock, *sock; 977 - struct kvec iov; 978 1024 struct iscsi_login_req *pdu; 979 - struct sockaddr_in sock_in; 980 - struct sockaddr_in6 sock_in6; 981 1025 982 1026 flush_signals(current); 983 - sock = np->np_socket; 984 1027 985 1028 spin_lock_bh(&np->np_thread_lock); 986 1029 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { ··· 1139 886 } 1140 887 spin_unlock_bh(&np->np_thread_lock); 1141 888 1142 - if (kernel_accept(sock, &new_sock, 0) < 0) { 1143 - spin_lock_bh(&np->np_thread_lock); 1144 - if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 1145 - spin_unlock_bh(&np->np_thread_lock); 1146 - complete(&np->np_restart_comp); 1147 - /* Get another socket */ 1148 - return 1; 1149 - } 1150 - spin_unlock_bh(&np->np_thread_lock); 1151 - goto out; 1152 - } 1153 - iscsi_start_login_thread_timer(np); 1154 - 1155 889 conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); 1156 890 if (!conn) { 1157 891 pr_err("Could not allocate memory for" 1158 892 " new connection\n"); 1159 - sock_release(new_sock); 1160 893 /* Get another socket */ 1161 894 return 1; 1162 895 } 1163 - 1164 896 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 1165 897 conn->conn_state = TARG_CONN_STATE_FREE; 1166 - conn->sock = new_sock; 1167 898 1168 - pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n"); 1169 - conn->conn_state = TARG_CONN_STATE_XPT_UP; 899 + if (iscsit_conn_set_transport(conn, np->np_transport) < 0) { 900 + kfree(conn); 901 + return 1; 902 + } 1170 903 1171 - /* 1172 - * Allocate conn->conn_ops early as a failure calling 1173 - * iscsit_tx_login_rsp() below will call tx_data(). 1174 - */ 1175 - conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL); 1176 - if (!conn->conn_ops) { 1177 - pr_err("Unable to allocate memory for" 1178 - " struct iscsi_conn_ops.\n"); 1179 - goto new_sess_out; 904 + rc = np->np_transport->iscsit_accept_np(np, conn); 905 + if (rc == -ENOSYS) { 906 + complete(&np->np_restart_comp); 907 + iscsit_put_transport(conn->conn_transport); 908 + kfree(conn); 909 + conn = NULL; 910 + goto exit; 911 + } else if (rc < 0) { 912 + spin_lock_bh(&np->np_thread_lock); 913 + if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 914 + spin_unlock_bh(&np->np_thread_lock); 915 + complete(&np->np_restart_comp); 916 + if (ret == -ENODEV) { 917 + iscsit_put_transport(conn->conn_transport); 918 + kfree(conn); 919 + conn = NULL; 920 + goto out; 921 + } 922 + /* Get another socket */ 923 + return 1; 924 + } 925 + spin_unlock_bh(&np->np_thread_lock); 926 + iscsit_put_transport(conn->conn_transport); 927 + kfree(conn); 928 + conn = NULL; 929 + goto out; 1180 930 } 1181 931 /* 1182 932 * Perform the remaining iSCSI connection initialization items.. 1183 933 */ 1184 - if (iscsi_login_init_conn(conn) < 0) 1185 - goto new_sess_out; 1186 - 1187 - memset(buffer, 0, ISCSI_HDR_LEN); 1188 - memset(&iov, 0, sizeof(struct kvec)); 1189 - iov.iov_base = buffer; 1190 - iov.iov_len = ISCSI_HDR_LEN; 1191 - 1192 - if (rx_data(conn, &iov, 1, ISCSI_HDR_LEN) <= 0) { 1193 - pr_err("rx_data() returned an error.\n"); 934 + login = iscsi_login_init_conn(conn); 935 + if (!login) { 1194 936 goto new_sess_out; 1195 937 } 1196 938 1197 - iscsi_opcode = (buffer[0] & ISCSI_OPCODE_MASK); 1198 - if (!(iscsi_opcode & ISCSI_OP_LOGIN)) { 1199 - pr_err("First opcode is not login request," 1200 - " failing login request.\n"); 939 + iscsi_start_login_thread_timer(np); 940 + 941 + pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n"); 942 + conn->conn_state = TARG_CONN_STATE_XPT_UP; 943 + /* 944 + * This will process the first login request + payload.. 945 + */ 946 + rc = np->np_transport->iscsit_get_login_rx(conn, login); 947 + if (rc == 1) 948 + return 1; 949 + else if (rc < 0) 1201 950 goto new_sess_out; 1202 - } 1203 951 1204 - pdu = (struct iscsi_login_req *) buffer; 1205 - 952 + buffer = &login->req[0]; 953 + pdu = (struct iscsi_login_req *)buffer; 1206 954 /* 1207 955 * Used by iscsit_tx_login_rsp() for Login Resonses PDUs 1208 956 * when Status-Class != 0. 1209 957 */ 1210 - conn->login_itt = pdu->itt; 958 + conn->login_itt = pdu->itt; 1211 959 1212 960 spin_lock_bh(&np->np_thread_lock); 1213 961 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { ··· 1221 967 } 1222 968 spin_unlock_bh(&np->np_thread_lock); 1223 969 1224 - if (np->np_sockaddr.ss_family == AF_INET6) { 1225 - memset(&sock_in6, 0, sizeof(struct sockaddr_in6)); 1226 - 1227 - if (conn->sock->ops->getname(conn->sock, 1228 - (struct sockaddr *)&sock_in6, &err, 1) < 0) { 1229 - pr_err("sock_ops->getname() failed.\n"); 1230 - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1231 - ISCSI_LOGIN_STATUS_TARGET_ERROR); 1232 - goto new_sess_out; 1233 - } 1234 - snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", 1235 - &sock_in6.sin6_addr.in6_u); 1236 - conn->login_port = ntohs(sock_in6.sin6_port); 1237 - 1238 - if (conn->sock->ops->getname(conn->sock, 1239 - (struct sockaddr *)&sock_in6, &err, 0) < 0) { 1240 - pr_err("sock_ops->getname() failed.\n"); 1241 - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1242 - ISCSI_LOGIN_STATUS_TARGET_ERROR); 1243 - goto new_sess_out; 1244 - } 1245 - snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c", 1246 - &sock_in6.sin6_addr.in6_u); 1247 - conn->local_port = ntohs(sock_in6.sin6_port); 1248 - 1249 - } else { 1250 - memset(&sock_in, 0, sizeof(struct sockaddr_in)); 1251 - 1252 - if (conn->sock->ops->getname(conn->sock, 1253 - (struct sockaddr *)&sock_in, &err, 1) < 0) { 1254 - pr_err("sock_ops->getname() failed.\n"); 1255 - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1256 - ISCSI_LOGIN_STATUS_TARGET_ERROR); 1257 - goto new_sess_out; 1258 - } 1259 - sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr); 1260 - conn->login_port = ntohs(sock_in.sin_port); 1261 - 1262 - if (conn->sock->ops->getname(conn->sock, 1263 - (struct sockaddr *)&sock_in, &err, 0) < 0) { 1264 - pr_err("sock_ops->getname() failed.\n"); 1265 - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1266 - ISCSI_LOGIN_STATUS_TARGET_ERROR); 1267 - goto new_sess_out; 1268 - } 1269 - sprintf(conn->local_ip, "%pI4", &sock_in.sin_addr.s_addr); 1270 - conn->local_port = ntohs(sock_in.sin_port); 1271 - } 1272 - 1273 970 conn->network_transport = np->np_network_transport; 1274 971 1275 972 pr_debug("Received iSCSI login request from %s on %s Network" 1276 - " Portal %s:%hu\n", conn->login_ip, 1277 - (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP", 1278 - conn->local_ip, conn->local_port); 973 + " Portal %s:%hu\n", conn->login_ip, np->np_transport->name, 974 + conn->local_ip, conn->local_port); 1279 975 1280 976 pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n"); 1281 977 conn->conn_state = TARG_CONN_STATE_IN_LOGIN; ··· 1254 1050 if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0) 1255 1051 goto new_sess_out; 1256 1052 } 1257 - 1258 1053 /* 1259 - * This will process the first login request, and call 1260 - * iscsi_target_locate_portal(), and return a valid struct iscsi_login. 1054 + * SessionType: Discovery 1055 + * 1056 + * Locates Default Portal 1057 + * 1058 + * SessionType: Normal 1059 + * 1060 + * Locates Target Portal from NP -> Target IQN 1261 1061 */ 1262 - login = iscsi_target_init_negotiation(np, conn, buffer); 1263 - if (!login) { 1062 + rc = iscsi_target_locate_portal(np, conn, login); 1063 + if (rc < 0) { 1264 1064 tpg = conn->tpg; 1265 1065 goto new_sess_out; 1266 1066 } ··· 1276 1068 } 1277 1069 1278 1070 if (zero_tsih) { 1279 - if (iscsi_login_zero_tsih_s2(conn) < 0) { 1280 - iscsi_target_nego_release(login, conn); 1071 + if (iscsi_login_zero_tsih_s2(conn) < 0) 1281 1072 goto new_sess_out; 1282 - } 1283 1073 } else { 1284 - if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) { 1285 - iscsi_target_nego_release(login, conn); 1074 + if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) 1286 1075 goto old_sess_out; 1287 - } 1288 1076 } 1289 1077 1290 1078 if (iscsi_target_start_negotiation(login, conn) < 0) ··· 1357 1153 iscsi_release_param_list(conn->param_list); 1358 1154 conn->param_list = NULL; 1359 1155 } 1360 - if (conn->sock) 1156 + iscsi_target_nego_release(conn); 1157 + 1158 + if (conn->sock) { 1361 1159 sock_release(conn->sock); 1160 + conn->sock = NULL; 1161 + } 1162 + 1163 + if (conn->conn_transport->iscsit_free_conn) 1164 + conn->conn_transport->iscsit_free_conn(conn); 1165 + 1166 + iscsit_put_transport(conn->conn_transport); 1167 + 1362 1168 kfree(conn); 1363 1169 1364 1170 if (tpg) { ··· 1386 1172 /* Wait for another socket.. */ 1387 1173 if (!stop) 1388 1174 return 1; 1389 - 1175 + exit: 1390 1176 iscsi_stop_login_thread_timer(np); 1391 1177 spin_lock_bh(&np->np_thread_lock); 1392 1178 np->np_thread_state = ISCSI_NP_THREAD_EXIT; 1179 + np->np_thread = NULL; 1393 1180 spin_unlock_bh(&np->np_thread_lock); 1181 + 1394 1182 return 0; 1395 1183 } 1396 1184
+6
drivers/target/iscsi/iscsi_target_login.h
··· 4 4 extern int iscsi_login_setup_crypto(struct iscsi_conn *); 5 5 extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *); 6 6 extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32); 7 + extern int iscsit_setup_np(struct iscsi_np *, 8 + struct __kernel_sockaddr_storage *); 7 9 extern int iscsi_target_setup_login_socket(struct iscsi_np *, 8 10 struct __kernel_sockaddr_storage *); 11 + extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *); 12 + extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); 13 + extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); 14 + extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); 9 15 extern int iscsi_target_login_thread(void *); 10 16 extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *); 11 17
+24 -170
drivers/target/iscsi/iscsi_target_nego.c
··· 22 22 #include <scsi/iscsi_proto.h> 23 23 #include <target/target_core_base.h> 24 24 #include <target/target_core_fabric.h> 25 + #include <target/iscsi/iscsi_transport.h> 25 26 26 27 #include "iscsi_target_core.h" 27 28 #include "iscsi_target_parameters.h" ··· 170 169 kfree(conn->auth_protocol); 171 170 } 172 171 173 - static int iscsi_target_check_login_request( 172 + int iscsi_target_check_login_request( 174 173 struct iscsi_conn *conn, 175 174 struct iscsi_login *login) 176 175 { ··· 201 200 return -1; 202 201 } 203 202 204 - req_csg = (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2; 205 - req_nsg = (login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK); 203 + req_csg = ISCSI_LOGIN_CURRENT_STAGE(login_req->flags); 204 + req_nsg = ISCSI_LOGIN_NEXT_STAGE(login_req->flags); 206 205 207 206 if (req_csg != login->current_stage) { 208 207 pr_err("Initiator unexpectedly changed login stage" ··· 353 352 354 353 padding = ((-login->rsp_length) & 3); 355 354 356 - if (iscsi_login_tx_data( 357 - conn, 358 - login->rsp, 359 - login->rsp_buf, 360 - login->rsp_length + padding) < 0) 355 + if (conn->conn_transport->iscsit_put_login_tx(conn, login, 356 + login->rsp_length + padding) < 0) 361 357 return -1; 362 358 363 359 login->rsp_length = 0; ··· 366 368 return 0; 367 369 } 368 370 369 - static int iscsi_target_do_rx_login_io(struct iscsi_conn *conn, struct iscsi_login *login) 370 - { 371 - u32 padding = 0, payload_length; 372 - struct iscsi_login_req *login_req; 373 - 374 - if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0) 375 - return -1; 376 - 377 - login_req = (struct iscsi_login_req *) login->req; 378 - payload_length = ntoh24(login_req->dlength); 379 - 380 - pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x," 381 - " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n", 382 - login_req->flags, login_req->itt, login_req->cmdsn, 383 - login_req->exp_statsn, login_req->cid, payload_length); 384 - 385 - if (iscsi_target_check_login_request(conn, login) < 0) 386 - return -1; 387 - 388 - padding = ((-payload_length) & 3); 389 - memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS); 390 - 391 - if (iscsi_login_rx_data( 392 - conn, 393 - login->req_buf, 394 - payload_length + padding) < 0) 395 - return -1; 396 - 397 - return 0; 398 - } 399 - 400 371 static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login) 401 372 { 402 373 if (iscsi_target_do_tx_login_io(conn, login) < 0) 403 374 return -1; 404 375 405 - if (iscsi_target_do_rx_login_io(conn, login) < 0) 406 - return -1; 407 - 408 - return 0; 409 - } 410 - 411 - static int iscsi_target_get_initial_payload( 412 - struct iscsi_conn *conn, 413 - struct iscsi_login *login) 414 - { 415 - u32 padding = 0, payload_length; 416 - struct iscsi_login_req *login_req; 417 - 418 - login_req = (struct iscsi_login_req *) login->req; 419 - payload_length = ntoh24(login_req->dlength); 420 - 421 - pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x," 422 - " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n", 423 - login_req->flags, login_req->itt, login_req->cmdsn, 424 - login_req->exp_statsn, payload_length); 425 - 426 - if (iscsi_target_check_login_request(conn, login) < 0) 427 - return -1; 428 - 429 - padding = ((-payload_length) & 3); 430 - 431 - if (iscsi_login_rx_data( 432 - conn, 433 - login->req_buf, 434 - payload_length + padding) < 0) 376 + if (conn->conn_transport->iscsit_get_login_rx(conn, login) < 0) 435 377 return -1; 436 378 437 379 return 0; ··· 619 681 return -1; 620 682 } 621 683 622 - switch ((login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2) { 684 + switch (ISCSI_LOGIN_CURRENT_STAGE(login_req->flags)) { 623 685 case 0: 624 - login_rsp->flags |= (0 & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK); 686 + login_rsp->flags &= ~ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK; 625 687 if (iscsi_target_handle_csg_zero(conn, login) < 0) 626 688 return -1; 627 689 break; ··· 631 693 return -1; 632 694 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { 633 695 login->tsih = conn->sess->tsih; 696 + login->login_complete = 1; 634 697 if (iscsi_target_do_tx_login_io(conn, 635 698 login) < 0) 636 699 return -1; ··· 641 702 default: 642 703 pr_err("Illegal CSG: %d received from" 643 704 " Initiator, protocol error.\n", 644 - (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) 645 - >> 2); 705 + ISCSI_LOGIN_CURRENT_STAGE(login_req->flags)); 646 706 break; 647 707 } 648 708 ··· 675 737 /* 676 738 * Processes the first Login Request.. 677 739 */ 678 - static int iscsi_target_locate_portal( 740 + int iscsi_target_locate_portal( 679 741 struct iscsi_np *np, 680 742 struct iscsi_conn *conn, 681 743 struct iscsi_login *login) ··· 690 752 691 753 login_req = (struct iscsi_login_req *) login->req; 692 754 payload_length = ntoh24(login_req->dlength); 693 - 694 - login->first_request = 1; 695 - login->leading_connection = (!login_req->tsih) ? 1 : 0; 696 - login->current_stage = 697 - (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2; 698 - login->version_min = login_req->min_version; 699 - login->version_max = login_req->max_version; 700 - memcpy(login->isid, login_req->isid, 6); 701 - login->cmd_sn = be32_to_cpu(login_req->cmdsn); 702 - login->init_task_tag = login_req->itt; 703 - login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); 704 - login->cid = be16_to_cpu(login_req->cid); 705 - login->tsih = be16_to_cpu(login_req->tsih); 706 - 707 - if (iscsi_target_get_initial_payload(conn, login) < 0) 708 - return -1; 709 755 710 756 tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL); 711 757 if (!tmpbuf) { ··· 721 799 722 800 start += strlen(key) + strlen(value) + 2; 723 801 } 802 + 803 + printk("i_buf: %s, s_buf: %s, t_buf: %s\n", i_buf, s_buf, t_buf); 724 804 725 805 /* 726 806 * See 5.3. Login Phase. ··· 882 958 return ret; 883 959 } 884 960 885 - struct iscsi_login *iscsi_target_init_negotiation( 886 - struct iscsi_np *np, 887 - struct iscsi_conn *conn, 888 - char *login_pdu) 889 - { 890 - struct iscsi_login *login; 891 - 892 - login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL); 893 - if (!login) { 894 - pr_err("Unable to allocate memory for struct iscsi_login.\n"); 895 - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 896 - ISCSI_LOGIN_STATUS_NO_RESOURCES); 897 - return NULL; 898 - } 899 - 900 - login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL); 901 - if (!login->req) { 902 - pr_err("Unable to allocate memory for Login Request.\n"); 903 - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 904 - ISCSI_LOGIN_STATUS_NO_RESOURCES); 905 - goto out; 906 - } 907 - 908 - login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); 909 - if (!login->req_buf) { 910 - pr_err("Unable to allocate memory for response buffer.\n"); 911 - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 912 - ISCSI_LOGIN_STATUS_NO_RESOURCES); 913 - goto out; 914 - } 915 - /* 916 - * SessionType: Discovery 917 - * 918 - * Locates Default Portal 919 - * 920 - * SessionType: Normal 921 - * 922 - * Locates Target Portal from NP -> Target IQN 923 - */ 924 - if (iscsi_target_locate_portal(np, conn, login) < 0) { 925 - goto out; 926 - } 927 - 928 - return login; 929 - out: 930 - kfree(login->req); 931 - kfree(login->req_buf); 932 - kfree(login); 933 - 934 - return NULL; 935 - } 936 - 937 961 int iscsi_target_start_negotiation( 938 962 struct iscsi_login *login, 939 963 struct iscsi_conn *conn) 940 964 { 941 - int ret = -1; 942 - 943 - login->rsp = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); 944 - if (!login->rsp) { 945 - pr_err("Unable to allocate memory for" 946 - " Login Response.\n"); 947 - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 948 - ISCSI_LOGIN_STATUS_NO_RESOURCES); 949 - ret = -1; 950 - goto out; 951 - } 952 - 953 - login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); 954 - if (!login->rsp_buf) { 955 - pr_err("Unable to allocate memory for" 956 - " request buffer.\n"); 957 - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 958 - ISCSI_LOGIN_STATUS_NO_RESOURCES); 959 - ret = -1; 960 - goto out; 961 - } 965 + int ret; 962 966 963 967 ret = iscsi_target_do_login(conn, login); 964 - out: 965 968 if (ret != 0) 966 969 iscsi_remove_failed_auth_entry(conn); 967 970 968 - iscsi_target_nego_release(login, conn); 971 + iscsi_target_nego_release(conn); 969 972 return ret; 970 973 } 971 974 972 - void iscsi_target_nego_release( 973 - struct iscsi_login *login, 974 - struct iscsi_conn *conn) 975 + void iscsi_target_nego_release(struct iscsi_conn *conn) 975 976 { 976 - kfree(login->req); 977 - kfree(login->rsp); 977 + struct iscsi_login *login = conn->conn_login; 978 + 979 + if (!login) 980 + return; 981 + 978 982 kfree(login->req_buf); 979 983 kfree(login->rsp_buf); 980 984 kfree(login); 985 + 986 + conn->conn_login = NULL; 981 987 }
+7 -4
drivers/target/iscsi/iscsi_target_nego.h
··· 7 7 extern void convert_null_to_semi(char *, int); 8 8 extern int extract_param(const char *, const char *, unsigned int, char *, 9 9 unsigned char *); 10 - extern struct iscsi_login *iscsi_target_init_negotiation( 11 - struct iscsi_np *, struct iscsi_conn *, char *); 10 + extern int iscsi_target_check_login_request(struct iscsi_conn *, 11 + struct iscsi_login *); 12 + extern int iscsi_target_get_initial_payload(struct iscsi_conn *, 13 + struct iscsi_login *); 14 + extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsi_conn *, 15 + struct iscsi_login *); 12 16 extern int iscsi_target_start_negotiation( 13 17 struct iscsi_login *, struct iscsi_conn *); 14 - extern void iscsi_target_nego_release( 15 - struct iscsi_login *, struct iscsi_conn *); 18 + extern void iscsi_target_nego_release(struct iscsi_conn *); 16 19 17 20 #endif /* ISCSI_TARGET_NEGO_H */
+78 -9
drivers/target/iscsi/iscsi_target_parameters.c
··· 59 59 char *text_buf, 60 60 int text_length) 61 61 { 62 - int length, tx_sent; 62 + int length, tx_sent, iov_cnt = 1; 63 63 struct kvec iov[2]; 64 64 65 65 length = (ISCSI_HDR_LEN + text_length); ··· 67 67 memset(&iov[0], 0, 2 * sizeof(struct kvec)); 68 68 iov[0].iov_len = ISCSI_HDR_LEN; 69 69 iov[0].iov_base = pdu_buf; 70 - iov[1].iov_len = text_length; 71 - iov[1].iov_base = text_buf; 70 + 71 + if (text_buf && text_length) { 72 + iov[1].iov_len = text_length; 73 + iov[1].iov_base = text_buf; 74 + iov_cnt++; 75 + } 72 76 73 77 /* 74 78 * Initial Marker-less Interval. ··· 81 77 */ 82 78 conn->if_marker += length; 83 79 84 - tx_sent = tx_data(conn, &iov[0], 2, length); 80 + tx_sent = tx_data(conn, &iov[0], iov_cnt, length); 85 81 if (tx_sent != length) { 86 82 pr_err("tx_data returned %d, expecting %d.\n", 87 83 tx_sent, length); ··· 433 429 TYPERANGE_MARKINT, USE_INITIAL_ONLY); 434 430 if (!param) 435 431 goto out; 432 + /* 433 + * Extra parameters for ISER from RFC-5046 434 + */ 435 + param = iscsi_set_default_param(pl, RDMAEXTENTIONS, INITIAL_RDMAEXTENTIONS, 436 + PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, 437 + TYPERANGE_BOOL_AND, USE_LEADING_ONLY); 438 + if (!param) 439 + goto out; 440 + 441 + param = iscsi_set_default_param(pl, INITIATORRECVDATASEGMENTLENGTH, 442 + INITIAL_INITIATORRECVDATASEGMENTLENGTH, 443 + PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, 444 + TYPERANGE_512_TO_16777215, USE_ALL); 445 + if (!param) 446 + goto out; 447 + 448 + param = iscsi_set_default_param(pl, TARGETRECVDATASEGMENTLENGTH, 449 + INITIAL_TARGETRECVDATASEGMENTLENGTH, 450 + PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, 451 + TYPERANGE_512_TO_16777215, USE_ALL); 452 + if (!param) 453 + goto out; 436 454 437 455 *param_list_ptr = pl; 438 456 return 0; ··· 464 438 } 465 439 466 440 int iscsi_set_keys_to_negotiate( 467 - int sessiontype, 468 - struct iscsi_param_list *param_list) 441 + struct iscsi_param_list *param_list, 442 + bool iser) 469 443 { 470 444 struct iscsi_param *param; 445 + 446 + param_list->iser = iser; 471 447 472 448 list_for_each_entry(param, &param_list->param_list, p_list) { 473 449 param->state = 0; 474 450 if (!strcmp(param->name, AUTHMETHOD)) { 475 451 SET_PSTATE_NEGOTIATE(param); 476 452 } else if (!strcmp(param->name, HEADERDIGEST)) { 477 - SET_PSTATE_NEGOTIATE(param); 453 + if (iser == false) 454 + SET_PSTATE_NEGOTIATE(param); 478 455 } else if (!strcmp(param->name, DATADIGEST)) { 479 - SET_PSTATE_NEGOTIATE(param); 456 + if (iser == false) 457 + SET_PSTATE_NEGOTIATE(param); 480 458 } else if (!strcmp(param->name, MAXCONNECTIONS)) { 481 459 SET_PSTATE_NEGOTIATE(param); 482 460 } else if (!strcmp(param->name, TARGETNAME)) { ··· 499 469 } else if (!strcmp(param->name, IMMEDIATEDATA)) { 500 470 SET_PSTATE_NEGOTIATE(param); 501 471 } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) { 502 - SET_PSTATE_NEGOTIATE(param); 472 + if (iser == false) 473 + SET_PSTATE_NEGOTIATE(param); 503 474 } else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) { 504 475 continue; 505 476 } else if (!strcmp(param->name, MAXBURSTLENGTH)) { ··· 529 498 SET_PSTATE_NEGOTIATE(param); 530 499 } else if (!strcmp(param->name, OFMARKINT)) { 531 500 SET_PSTATE_NEGOTIATE(param); 501 + } else if (!strcmp(param->name, RDMAEXTENTIONS)) { 502 + if (iser == true) 503 + SET_PSTATE_NEGOTIATE(param); 504 + } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { 505 + if (iser == true) 506 + SET_PSTATE_NEGOTIATE(param); 507 + } else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) { 508 + if (iser == true) 509 + SET_PSTATE_NEGOTIATE(param); 532 510 } 533 511 } 534 512 ··· 579 539 else if (!strcmp(param->name, IFMARKINT)) 580 540 param->state &= ~PSTATE_NEGOTIATE; 581 541 else if (!strcmp(param->name, OFMARKINT)) 542 + param->state &= ~PSTATE_NEGOTIATE; 543 + else if (!strcmp(param->name, RDMAEXTENTIONS)) 544 + param->state &= ~PSTATE_NEGOTIATE; 545 + else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) 546 + param->state &= ~PSTATE_NEGOTIATE; 547 + else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) 582 548 param->state &= ~PSTATE_NEGOTIATE; 583 549 } 584 550 ··· 1801 1755 * this key is not sent over the wire. 1802 1756 */ 1803 1757 if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) { 1758 + if (param_list->iser == true) 1759 + continue; 1760 + 1804 1761 ops->MaxXmitDataSegmentLength = 1805 1762 simple_strtoul(param->value, &tmpptr, 0); 1806 1763 pr_debug("MaxXmitDataSegmentLength: %s\n", ··· 1849 1800 simple_strtoul(param->value, &tmpptr, 0); 1850 1801 pr_debug("IFMarkInt: %s\n", 1851 1802 param->value); 1803 + } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { 1804 + ops->InitiatorRecvDataSegmentLength = 1805 + simple_strtoul(param->value, &tmpptr, 0); 1806 + pr_debug("InitiatorRecvDataSegmentLength: %s\n", 1807 + param->value); 1808 + ops->MaxRecvDataSegmentLength = 1809 + ops->InitiatorRecvDataSegmentLength; 1810 + pr_debug("Set MRDSL from InitiatorRecvDataSegmentLength\n"); 1811 + } else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) { 1812 + ops->TargetRecvDataSegmentLength = 1813 + simple_strtoul(param->value, &tmpptr, 0); 1814 + pr_debug("TargetRecvDataSegmentLength: %s\n", 1815 + param->value); 1816 + ops->MaxXmitDataSegmentLength = 1817 + ops->TargetRecvDataSegmentLength; 1818 + pr_debug("Set MXDSL from TargetRecvDataSegmentLength\n"); 1852 1819 } 1853 1820 } 1854 1821 pr_debug("----------------------------------------------------" ··· 1976 1911 } else if (!strcmp(param->name, SESSIONTYPE)) { 1977 1912 ops->SessionType = !strcmp(param->value, DISCOVERY); 1978 1913 pr_debug("SessionType: %s\n", 1914 + param->value); 1915 + } else if (!strcmp(param->name, RDMAEXTENTIONS)) { 1916 + ops->RDMAExtensions = !strcmp(param->value, YES); 1917 + pr_debug("RDMAExtensions: %s\n", 1979 1918 param->value); 1980 1919 } 1981 1920 }
+15 -1
drivers/target/iscsi/iscsi_target_parameters.h
··· 27 27 extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *); 28 28 extern void iscsi_print_params(struct iscsi_param_list *); 29 29 extern int iscsi_create_default_params(struct iscsi_param_list **); 30 - extern int iscsi_set_keys_to_negotiate(int, struct iscsi_param_list *); 30 + extern int iscsi_set_keys_to_negotiate(struct iscsi_param_list *, bool); 31 31 extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *); 32 32 extern int iscsi_copy_param_list(struct iscsi_param_list **, 33 33 struct iscsi_param_list *, int); ··· 89 89 #define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft" 90 90 91 91 /* 92 + * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046 93 + */ 94 + #define RDMAEXTENTIONS "RDMAExtensions" 95 + #define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength" 96 + #define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength" 97 + 98 + /* 92 99 * For AuthMethod. 93 100 */ 94 101 #define KRB5 "KRB5" ··· 138 131 #define INITIAL_OFMARKER NO 139 132 #define INITIAL_IFMARKINT "2048~65535" 140 133 #define INITIAL_OFMARKINT "2048~65535" 134 + 135 + /* 136 + * Initial values for iSER parameters following RFC-5046 Section 6 137 + */ 138 + #define INITIAL_RDMAEXTENTIONS NO 139 + #define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144" 140 + #define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192" 141 141 142 142 /* 143 143 * For [Header,Data]Digests.
+3 -1
drivers/target/iscsi/iscsi_target_tmr.c
··· 23 23 #include <scsi/iscsi_proto.h> 24 24 #include <target/target_core_base.h> 25 25 #include <target/target_core_fabric.h> 26 + #include <target/iscsi/iscsi_transport.h> 26 27 27 28 #include "iscsi_target_core.h" 28 29 #include "iscsi_target_seq_pdu_list.h" ··· 302 301 /* 303 302 * iscsit_build_r2ts_for_cmd() can handle the rest from here. 304 303 */ 305 - return iscsit_build_r2ts_for_cmd(cmd, conn, true); 304 + return conn->conn_transport->iscsit_get_dataout(conn, cmd, true); 306 305 } 307 306 308 307 static int iscsit_task_reassign_complete_read( ··· 472 471 473 472 return 0; 474 473 } 474 + EXPORT_SYMBOL(iscsit_tmr_post_handler); 475 475 476 476 /* 477 477 * Nothing to do here, but leave it for good measure. :-)
+4 -2
drivers/target/iscsi/iscsi_target_tpg.c
··· 31 31 #include "iscsi_target.h" 32 32 #include "iscsi_target_parameters.h" 33 33 34 + #include <target/iscsi/iscsi_transport.h> 35 + 34 36 struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt) 35 37 { 36 38 struct iscsi_portal_group *tpg; ··· 510 508 511 509 pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n", 512 510 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt, 513 - (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP"); 511 + np->np_transport->name); 514 512 515 513 return tpg_np; 516 514 } ··· 524 522 525 523 pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n", 526 524 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt, 527 - (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP"); 525 + np->np_transport->name); 528 526 529 527 tpg_np->tpg_np = NULL; 530 528 tpg_np->tpg = NULL;
+55
drivers/target/iscsi/iscsi_target_transport.c
··· 1 + #include <linux/spinlock.h> 2 + #include <linux/list.h> 3 + #include <target/iscsi/iscsi_transport.h> 4 + 5 + static LIST_HEAD(g_transport_list); 6 + static DEFINE_MUTEX(transport_mutex); 7 + 8 + struct iscsit_transport *iscsit_get_transport(int type) 9 + { 10 + struct iscsit_transport *t; 11 + 12 + mutex_lock(&transport_mutex); 13 + list_for_each_entry(t, &g_transport_list, t_node) { 14 + if (t->transport_type == type) { 15 + if (t->owner && !try_module_get(t->owner)) { 16 + t = NULL; 17 + } 18 + mutex_unlock(&transport_mutex); 19 + return t; 20 + } 21 + } 22 + mutex_unlock(&transport_mutex); 23 + 24 + return NULL; 25 + } 26 + 27 + void iscsit_put_transport(struct iscsit_transport *t) 28 + { 29 + if (t->owner) 30 + module_put(t->owner); 31 + } 32 + 33 + int iscsit_register_transport(struct iscsit_transport *t) 34 + { 35 + INIT_LIST_HEAD(&t->t_node); 36 + 37 + mutex_lock(&transport_mutex); 38 + list_add_tail(&t->t_node, &g_transport_list); 39 + mutex_unlock(&transport_mutex); 40 + 41 + pr_debug("Registered iSCSI transport: %s\n", t->name); 42 + 43 + return 0; 44 + } 45 + EXPORT_SYMBOL(iscsit_register_transport); 46 + 47 + void iscsit_unregister_transport(struct iscsit_transport *t) 48 + { 49 + mutex_lock(&transport_mutex); 50 + list_del(&t->t_node); 51 + mutex_unlock(&transport_mutex); 52 + 53 + pr_debug("Unregistered iSCSI transport: %s\n", t->name); 54 + } 55 + EXPORT_SYMBOL(iscsit_unregister_transport);
+29 -24
drivers/target/iscsi/iscsi_target_util.c
··· 24 24 #include <target/target_core_base.h> 25 25 #include <target/target_core_fabric.h> 26 26 #include <target/target_core_configfs.h> 27 + #include <target/iscsi/iscsi_transport.h> 27 28 28 29 #include "iscsi_target_core.h" 29 30 #include "iscsi_target_parameters.h" ··· 149 148 spin_unlock_bh(&cmd->r2t_lock); 150 149 } 151 150 151 + struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) 152 + { 153 + struct iscsi_cmd *cmd; 154 + 155 + cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask); 156 + if (!cmd) 157 + return NULL; 158 + 159 + cmd->release_cmd = &iscsit_release_cmd; 160 + return cmd; 161 + } 162 + 152 163 /* 153 164 * May be called from software interrupt (timer) context for allocating 154 165 * iSCSI NopINs. ··· 169 156 { 170 157 struct iscsi_cmd *cmd; 171 158 172 - cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask); 159 + cmd = conn->conn_transport->iscsit_alloc_cmd(conn, gfp_mask); 173 160 if (!cmd) { 174 161 pr_err("Unable to allocate memory for struct iscsi_cmd.\n"); 175 162 return NULL; 176 163 } 177 - 178 - cmd->conn = conn; 164 + cmd->conn = conn; 179 165 INIT_LIST_HEAD(&cmd->i_conn_node); 180 166 INIT_LIST_HEAD(&cmd->datain_list); 181 167 INIT_LIST_HEAD(&cmd->cmd_r2t_list); ··· 187 175 188 176 return cmd; 189 177 } 178 + EXPORT_SYMBOL(iscsit_allocate_cmd); 190 179 191 180 struct iscsi_seq *iscsit_get_seq_holder_for_datain( 192 181 struct iscsi_cmd *cmd, ··· 317 304 318 305 return ret; 319 306 } 307 + EXPORT_SYMBOL(iscsit_sequence_cmd); 320 308 321 309 int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf) 322 310 { ··· 703 689 */ 704 690 switch (cmd->iscsi_opcode) { 705 691 case ISCSI_OP_SCSI_CMD: 692 + if (cmd->data_direction == DMA_TO_DEVICE) 693 + iscsit_stop_dataout_timer(cmd); 694 + /* 695 + * Fallthrough 696 + */ 706 697 case ISCSI_OP_SCSI_TMFUNC: 707 698 transport_generic_free_cmd(&cmd->se_cmd, 1); 708 699 break; ··· 723 704 } 724 705 /* Fall-through */ 725 706 default: 726 - iscsit_release_cmd(cmd); 707 + cmd->release_cmd(cmd); 727 708 break; 728 709 } 729 710 } ··· 1245 1226 */ 1246 1227 int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail) 1247 1228 { 1248 - u8 iscsi_hdr[ISCSI_HDR_LEN]; 1249 - int err; 1250 - struct kvec iov; 1251 1229 struct iscsi_login_rsp *hdr; 1230 + struct iscsi_login *login = conn->conn_login; 1252 1231 1232 + login->login_failed = 1; 1253 1233 iscsit_collect_login_stats(conn, status_class, status_detail); 1254 1234 1255 - memset(&iov, 0, sizeof(struct kvec)); 1256 - memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN); 1257 - 1258 - hdr = (struct iscsi_login_rsp *)&iscsi_hdr; 1235 + hdr = (struct iscsi_login_rsp *)&login->rsp[0]; 1259 1236 hdr->opcode = ISCSI_OP_LOGIN_RSP; 1260 1237 hdr->status_class = status_class; 1261 1238 hdr->status_detail = status_detail; 1262 1239 hdr->itt = conn->login_itt; 1263 1240 1264 - iov.iov_base = &iscsi_hdr; 1265 - iov.iov_len = ISCSI_HDR_LEN; 1266 - 1267 - PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN); 1268 - 1269 - err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN); 1270 - if (err != ISCSI_HDR_LEN) { 1271 - pr_err("tx_data returned less than expected\n"); 1272 - return -1; 1273 - } 1274 - 1275 - return 0; 1241 + return conn->conn_transport->iscsit_put_login_tx(conn, login, 0); 1276 1242 } 1277 1243 1278 1244 void iscsit_print_session_params(struct iscsi_session *sess) ··· 1436 1432 strcpy(ls->last_intr_fail_name, 1437 1433 (intrname ? intrname->value : "Unknown")); 1438 1434 1439 - ls->last_intr_fail_ip_family = conn->sock->sk->sk_family; 1435 + ls->last_intr_fail_ip_family = conn->login_family; 1436 + 1440 1437 snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE, 1441 1438 "%s", conn->login_ip); 1442 1439 ls->last_fail_time = get_jiffies_64();
+1
drivers/target/iscsi/iscsi_target_util.h
··· 8 8 extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *); 9 9 extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *); 10 10 extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *); 11 + extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t); 11 12 extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); 12 13 extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32); 13 14 extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
+117 -5
drivers/target/target_core_file.c
··· 30 30 #include <linux/slab.h> 31 31 #include <linux/spinlock.h> 32 32 #include <linux/module.h> 33 + #include <linux/falloc.h> 33 34 #include <scsi/scsi.h> 34 35 #include <scsi/scsi_host.h> 36 + #include <asm/unaligned.h> 35 37 36 38 #include <target/target_core_base.h> 37 39 #include <target/target_core_backend.h> ··· 168 166 " block_device blocks: %llu logical_block_size: %d\n", 169 167 dev_size, div_u64(dev_size, fd_dev->fd_block_size), 170 168 fd_dev->fd_block_size); 169 + /* 170 + * Check if the underlying struct block_device request_queue supports 171 + * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 172 + * in ATA and we need to set TPE=1 173 + */ 174 + if (blk_queue_discard(q)) { 175 + dev->dev_attrib.max_unmap_lba_count = 176 + q->limits.max_discard_sectors; 177 + /* 178 + * Currently hardcoded to 1 in Linux/SCSI code.. 179 + */ 180 + dev->dev_attrib.max_unmap_block_desc_count = 1; 181 + dev->dev_attrib.unmap_granularity = 182 + q->limits.discard_granularity >> 9; 183 + dev->dev_attrib.unmap_granularity_alignment = 184 + q->limits.discard_alignment; 185 + pr_debug("IFILE: BLOCK Discard support available," 186 + " disabled by default\n"); 187 + } 188 + /* 189 + * Enable write same emulation for IBLOCK and use 0xFFFF as 190 + * the smaller WRITE_SAME(10) only has a two-byte block count. 191 + */ 192 + dev->dev_attrib.max_write_same_len = 0xFFFF; 193 + 194 + if (blk_queue_nonrot(q)) 195 + dev->dev_attrib.is_nonrot = 1; 171 196 } else { 172 197 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { 173 198 pr_err("FILEIO: Missing fd_dev_size=" ··· 205 176 206 177 dev->dev_attrib.hw_block_size = FD_BLOCKSIZE; 207 178 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; 179 + 180 + /* 181 + * Limit UNMAP emulation to 8k Number of LBAs (NoLB) 182 + */ 183 + dev->dev_attrib.max_unmap_lba_count = 0x2000; 184 + /* 185 + * Currently hardcoded to 1 in Linux/SCSI code.. 186 + */ 187 + dev->dev_attrib.max_unmap_block_desc_count = 1; 188 + dev->dev_attrib.unmap_granularity = 1; 189 + dev->dev_attrib.unmap_granularity_alignment = 0; 190 + 191 + /* 192 + * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB) 193 + * based upon struct iovec limit for vfs_writev() 194 + */ 195 + dev->dev_attrib.max_write_same_len = 0x1000; 208 196 } 209 197 210 198 fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; ··· 236 190 237 191 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; 238 192 fd_dev->fd_queue_depth = dev->queue_depth; 239 - /* 240 - * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB) 241 - * based upon struct iovec limit for vfs_writev() 242 - */ 243 - dev->dev_attrib.max_write_same_len = 0x1000; 244 193 245 194 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," 246 195 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, ··· 483 442 } 484 443 485 444 static sense_reason_t 445 + fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) 446 + { 447 + struct file *file = priv; 448 + struct inode *inode = file->f_mapping->host; 449 + int ret; 450 + 451 + if (S_ISBLK(inode->i_mode)) { 452 + /* The backend is block device, use discard */ 453 + struct block_device *bdev = inode->i_bdev; 454 + 455 + ret = blkdev_issue_discard(bdev, lba, 456 + nolb, GFP_KERNEL, 0); 457 + if (ret < 0) { 458 + pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", 459 + ret); 460 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 461 + } 462 + } else { 463 + /* The backend is normal file, use fallocate */ 464 + struct se_device *se_dev = cmd->se_dev; 465 + loff_t pos = lba * se_dev->dev_attrib.block_size; 466 + unsigned int len = nolb * se_dev->dev_attrib.block_size; 467 + int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; 468 + 469 + if (!file->f_op->fallocate) 470 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 471 + 472 + ret = file->f_op->fallocate(file, mode, pos, len); 473 + if (ret < 0) { 474 + pr_warn("FILEIO: fallocate() failed: %d\n", ret); 475 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 476 + } 477 + } 478 + 479 + return 0; 480 + } 481 + 482 + static sense_reason_t 483 + fd_execute_write_same_unmap(struct se_cmd *cmd) 484 + { 485 + struct se_device *se_dev = cmd->se_dev; 486 + struct fd_dev *fd_dev = FD_DEV(se_dev); 487 + struct file *file = fd_dev->fd_file; 488 + sector_t lba = cmd->t_task_lba; 489 + sector_t nolb = sbc_get_write_same_sectors(cmd); 490 + int ret; 491 + 492 + if (!nolb) { 493 + target_complete_cmd(cmd, SAM_STAT_GOOD); 494 + return 0; 495 + } 496 + 497 + ret = fd_do_unmap(cmd, file, lba, nolb); 498 + if (ret) 499 + return ret; 500 + 501 + target_complete_cmd(cmd, GOOD); 502 + return 0; 503 + } 504 + 505 + static sense_reason_t 506 + fd_execute_unmap(struct se_cmd *cmd) 507 + { 508 + struct file *file = FD_DEV(cmd->se_dev)->fd_file; 509 + 510 + return sbc_execute_unmap(cmd, fd_do_unmap, file); 511 + } 512 + 513 + static sense_reason_t 486 514 fd_execute_rw(struct se_cmd *cmd) 487 515 { 488 516 struct scatterlist *sgl = cmd->t_data_sg; ··· 710 600 .execute_rw = fd_execute_rw, 711 601 .execute_sync_cache = fd_execute_sync_cache, 712 602 .execute_write_same = fd_execute_write_same, 603 + .execute_write_same_unmap = fd_execute_write_same_unmap, 604 + .execute_unmap = fd_execute_unmap, 713 605 }; 714 606 715 607 static sense_reason_t
+25 -89
drivers/target/target_core_iblock.c
··· 380 380 } 381 381 382 382 static sense_reason_t 383 + iblock_do_unmap(struct se_cmd *cmd, void *priv, 384 + sector_t lba, sector_t nolb) 385 + { 386 + struct block_device *bdev = priv; 387 + int ret; 388 + 389 + ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); 390 + if (ret < 0) { 391 + pr_err("blkdev_issue_discard() failed: %d\n", ret); 392 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 393 + } 394 + 395 + return 0; 396 + } 397 + 398 + static sense_reason_t 383 399 iblock_execute_unmap(struct se_cmd *cmd) 384 400 { 385 - struct se_device *dev = cmd->se_dev; 386 - struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 387 - unsigned char *buf, *ptr = NULL; 388 - sector_t lba; 389 - int size; 390 - u32 range; 391 - sense_reason_t ret = 0; 392 - int dl, bd_dl, err; 401 + struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; 393 402 394 - /* We never set ANC_SUP */ 395 - if (cmd->t_task_cdb[1]) 396 - return TCM_INVALID_CDB_FIELD; 397 - 398 - if (cmd->data_length == 0) { 399 - target_complete_cmd(cmd, SAM_STAT_GOOD); 400 - return 0; 401 - } 402 - 403 - if (cmd->data_length < 8) { 404 - pr_warn("UNMAP parameter list length %u too small\n", 405 - cmd->data_length); 406 - return TCM_PARAMETER_LIST_LENGTH_ERROR; 407 - } 408 - 409 - buf = transport_kmap_data_sg(cmd); 410 - if (!buf) 411 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 412 - 413 - dl = get_unaligned_be16(&buf[0]); 414 - bd_dl = get_unaligned_be16(&buf[2]); 415 - 416 - size = cmd->data_length - 8; 417 - if (bd_dl > size) 418 - pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 419 - cmd->data_length, bd_dl); 420 - else 421 - size = bd_dl; 422 - 423 - if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 424 - ret = TCM_INVALID_PARAMETER_LIST; 425 - goto err; 426 - } 427 - 428 - /* First UNMAP block descriptor starts at 8 byte offset */ 429 - ptr = &buf[8]; 430 - pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 431 - " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 432 - 433 - while (size >= 16) { 434 - lba = get_unaligned_be64(&ptr[0]); 435 - range = get_unaligned_be32(&ptr[8]); 436 - pr_debug("UNMAP: Using lba: %llu and range: %u\n", 437 - (unsigned long long)lba, range); 438 - 439 - if (range > dev->dev_attrib.max_unmap_lba_count) { 440 - ret = TCM_INVALID_PARAMETER_LIST; 441 - goto err; 442 - } 443 - 444 - if (lba + range > dev->transport->get_blocks(dev) + 1) { 445 - ret = TCM_ADDRESS_OUT_OF_RANGE; 446 - goto err; 447 - } 448 - 449 - err = blkdev_issue_discard(ib_dev->ibd_bd, lba, range, 450 - GFP_KERNEL, 0); 451 - if (err < 0) { 452 - pr_err("blkdev_issue_discard() failed: %d\n", 453 - err); 454 - ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 455 - goto err; 456 - } 457 - 458 - ptr += 16; 459 - size -= 16; 460 - } 461 - 462 - err: 463 - transport_kunmap_data_sg(cmd); 464 - if (!ret) 465 - target_complete_cmd(cmd, GOOD); 466 - return ret; 403 + return sbc_execute_unmap(cmd, iblock_do_unmap, bdev); 467 404 } 468 405 469 406 static sense_reason_t 470 407 iblock_execute_write_same_unmap(struct se_cmd *cmd) 471 408 { 472 - struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); 473 - int rc; 409 + struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; 410 + sector_t lba = cmd->t_task_lba; 411 + sector_t nolb = sbc_get_write_same_sectors(cmd); 412 + int ret; 474 413 475 - rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba, 476 - sbc_get_write_same_sectors(cmd), GFP_KERNEL, 0); 477 - if (rc < 0) { 478 - pr_warn("blkdev_issue_discard() failed: %d\n", rc); 479 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 480 - } 414 + ret = iblock_do_unmap(cmd, bdev, lba, nolb); 415 + if (ret) 416 + return ret; 481 417 482 418 target_complete_cmd(cmd, GOOD); 483 419 return 0;
+85
drivers/target/target_core_sbc.c
··· 596 596 return TYPE_DISK; 597 597 } 598 598 EXPORT_SYMBOL(sbc_get_device_type); 599 + 600 + sense_reason_t 601 + sbc_execute_unmap(struct se_cmd *cmd, 602 + sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *, 603 + sector_t, sector_t), 604 + void *priv) 605 + { 606 + struct se_device *dev = cmd->se_dev; 607 + unsigned char *buf, *ptr = NULL; 608 + sector_t lba; 609 + int size; 610 + u32 range; 611 + sense_reason_t ret = 0; 612 + int dl, bd_dl; 613 + 614 + /* We never set ANC_SUP */ 615 + if (cmd->t_task_cdb[1]) 616 + return TCM_INVALID_CDB_FIELD; 617 + 618 + if (cmd->data_length == 0) { 619 + target_complete_cmd(cmd, SAM_STAT_GOOD); 620 + return 0; 621 + } 622 + 623 + if (cmd->data_length < 8) { 624 + pr_warn("UNMAP parameter list length %u too small\n", 625 + cmd->data_length); 626 + return TCM_PARAMETER_LIST_LENGTH_ERROR; 627 + } 628 + 629 + buf = transport_kmap_data_sg(cmd); 630 + if (!buf) 631 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 632 + 633 + dl = get_unaligned_be16(&buf[0]); 634 + bd_dl = get_unaligned_be16(&buf[2]); 635 + 636 + size = cmd->data_length - 8; 637 + if (bd_dl > size) 638 + pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 639 + cmd->data_length, bd_dl); 640 + else 641 + size = bd_dl; 642 + 643 + if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 644 + ret = TCM_INVALID_PARAMETER_LIST; 645 + goto err; 646 + } 647 + 648 + /* First UNMAP block descriptor starts at 8 byte offset */ 649 + ptr = &buf[8]; 650 + pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 651 + " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 652 + 653 + while (size >= 16) { 654 + lba = get_unaligned_be64(&ptr[0]); 655 + range = get_unaligned_be32(&ptr[8]); 656 + pr_debug("UNMAP: Using lba: %llu and range: %u\n", 657 + (unsigned long long)lba, range); 658 + 659 + if (range > dev->dev_attrib.max_unmap_lba_count) { 660 + ret = TCM_INVALID_PARAMETER_LIST; 661 + goto err; 662 + } 663 + 664 + if (lba + range > dev->transport->get_blocks(dev) + 1) { 665 + ret = TCM_ADDRESS_OUT_OF_RANGE; 666 + goto err; 667 + } 668 + 669 + ret = do_unmap_fn(cmd, priv, lba, range); 670 + if (ret) 671 + goto err; 672 + 673 + ptr += 16; 674 + size -= 16; 675 + } 676 + 677 + err: 678 + transport_kunmap_data_sg(cmd); 679 + if (!ret) 680 + target_complete_cmd(cmd, GOOD); 681 + return ret; 682 + } 683 + EXPORT_SYMBOL(sbc_execute_unmap);
+9 -4
drivers/target/target_core_transport.c
··· 65 65 static void transport_handle_queue_full(struct se_cmd *cmd, 66 66 struct se_device *dev); 67 67 static int transport_generic_get_mem(struct se_cmd *cmd); 68 - static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); 69 68 static void transport_put_cmd(struct se_cmd *cmd); 70 69 static void target_complete_ok_work(struct work_struct *work); 71 70 ··· 2178 2179 * @se_cmd: command descriptor to add 2179 2180 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2180 2181 */ 2181 - static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, 2182 + int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, 2182 2183 bool ack_kref) 2183 2184 { 2184 2185 unsigned long flags; ··· 2207 2208 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2208 2209 return ret; 2209 2210 } 2211 + EXPORT_SYMBOL(target_get_sess_cmd); 2210 2212 2211 2213 static void target_release_cmd_kref(struct kref *kref) 2212 2214 { ··· 2765 2765 /* CURRENT ERROR */ 2766 2766 buffer[0] = 0x70; 2767 2767 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2768 - /* ILLEGAL REQUEST */ 2769 - buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2768 + /* 2769 + * Returning ILLEGAL REQUEST would cause immediate IO errors on 2770 + * Solaris initiators. Returning NOT READY instead means the 2771 + * operations will be retried a finite number of times and we 2772 + * can survive intermittent errors. 2773 + */ 2774 + buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2770 2775 /* LOGICAL UNIT COMMUNICATION FAILURE */ 2771 2776 buffer[SPC_ASC_KEY_OFFSET] = 0x08; 2772 2777 break;
+8 -1
drivers/target/tcm_fc/tfc_io.c
··· 103 103 use_sg = !(remaining % 4); 104 104 105 105 while (remaining) { 106 + struct fc_seq *seq = cmd->seq; 107 + 108 + if (!seq) { 109 + pr_debug("%s: Command aborted, xid 0x%x\n", 110 + __func__, ep->xid); 111 + break; 112 + } 106 113 if (!mem_len) { 107 114 sg = sg_next(sg); 108 115 mem_len = min((size_t)sg->length, remaining); ··· 176 169 f_ctl |= FC_FC_END_SEQ; 177 170 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, 178 171 FC_TYPE_FCP, f_ctl, fh_off); 179 - error = lport->tt.seq_send(lport, cmd->seq, fp); 172 + error = lport->tt.seq_send(lport, seq, fp); 180 173 if (error) { 181 174 /* XXX For now, initiator will retry */ 182 175 pr_err_ratelimited("%s: Failed to send frame %p, "
+1 -8
drivers/target/tcm_fc/tfc_sess.c
··· 428 428 return ret; 429 429 } 430 430 431 - static void ft_sess_rcu_free(struct rcu_head *rcu) 432 - { 433 - struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu); 434 - 435 - kfree(sess); 436 - } 437 - 438 431 static void ft_sess_free(struct kref *kref) 439 432 { 440 433 struct ft_sess *sess = container_of(kref, struct ft_sess, kref); 441 434 442 435 transport_deregister_session(sess->se_sess); 443 - call_rcu(&sess->rcu, ft_sess_rcu_free); 436 + kfree_rcu(sess, rcu); 444 437 } 445 438 446 439 void ft_sess_put(struct ft_sess *sess)
+246 -16
drivers/vhost/tcm_vhost.c
··· 66 66 * TODO: debug and remove the workaround. 67 67 */ 68 68 enum { 69 - VHOST_SCSI_FEATURES = VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX) 69 + VHOST_SCSI_FEATURES = (VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)) | 70 + (1ULL << VIRTIO_SCSI_F_HOTPLUG) 70 71 }; 71 72 72 73 #define VHOST_SCSI_MAX_TARGET 256 73 74 #define VHOST_SCSI_MAX_VQ 128 75 + #define VHOST_SCSI_MAX_EVENT 128 74 76 75 77 struct vhost_scsi { 76 78 /* Protected by vhost_scsi->dev.mutex */ ··· 84 82 85 83 struct vhost_work vs_completion_work; /* cmd completion work item */ 86 84 struct llist_head vs_completion_list; /* cmd completion queue */ 85 + 86 + struct vhost_work vs_event_work; /* evt injection work item */ 87 + struct llist_head vs_event_list; /* evt injection queue */ 88 + 89 + bool vs_events_missed; /* any missed events, protected by vq->mutex */ 90 + int vs_events_nr; /* num of pending events, protected by vq->mutex */ 87 91 }; 88 92 89 93 /* Local pointer to allocated TCM configfs fabric module */ ··· 357 349 return 0; 358 350 } 359 351 352 + static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) 353 + { 354 + vs->vs_events_nr--; 355 + kfree(evt); 356 + } 357 + 358 + static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs, 359 + u32 event, u32 reason) 360 + { 361 + struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; 362 + struct tcm_vhost_evt *evt; 363 + 364 + if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { 365 + vs->vs_events_missed = true; 366 + return NULL; 367 + } 368 + 369 + evt = kzalloc(sizeof(*evt), GFP_KERNEL); 370 + if (!evt) { 371 + vq_err(vq, "Failed to allocate tcm_vhost_evt\n"); 372 + vs->vs_events_missed = true; 373 + return NULL; 374 + } 375 + 376 + evt->event.event = event; 377 + evt->event.reason = reason; 378 + vs->vs_events_nr++; 379 + 380 + return evt; 381 + } 382 + 360 383 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) 361 384 { 362 385 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; ··· 404 365 } 405 366 406 367 kfree(tv_cmd); 368 + } 369 + 370 + static void tcm_vhost_do_evt_work(struct vhost_scsi *vs, 371 + struct tcm_vhost_evt *evt) 372 + { 373 + struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; 374 + struct virtio_scsi_event *event = &evt->event; 375 + struct virtio_scsi_event __user *eventp; 376 + unsigned out, in; 377 + int head, ret; 378 + 379 + if (!vq->private_data) { 380 + vs->vs_events_missed = true; 381 + return; 382 + } 383 + 384 + again: 385 + vhost_disable_notify(&vs->dev, vq); 386 + head = vhost_get_vq_desc(&vs->dev, vq, vq->iov, 387 + ARRAY_SIZE(vq->iov), &out, &in, 388 + NULL, NULL); 389 + if (head < 0) { 390 + vs->vs_events_missed = true; 391 + return; 392 + } 393 + if (head == vq->num) { 394 + if (vhost_enable_notify(&vs->dev, vq)) 395 + goto again; 396 + vs->vs_events_missed = true; 397 + return; 398 + } 399 + 400 + if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) { 401 + vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n", 402 + vq->iov[out].iov_len); 403 + vs->vs_events_missed = true; 404 + return; 405 + } 406 + 407 + if (vs->vs_events_missed) { 408 + event->event |= VIRTIO_SCSI_T_EVENTS_MISSED; 409 + vs->vs_events_missed = false; 410 + } 411 + 412 + eventp = vq->iov[out].iov_base; 413 + ret = __copy_to_user(eventp, event, sizeof(*event)); 414 + if (!ret) 415 + vhost_add_used_and_signal(&vs->dev, vq, head, 0); 416 + else 417 + vq_err(vq, "Faulted on tcm_vhost_send_event\n"); 418 + } 419 + 420 + static void tcm_vhost_evt_work(struct vhost_work *work) 421 + { 422 + struct vhost_scsi *vs = container_of(work, struct vhost_scsi, 423 + vs_event_work); 424 + struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; 425 + struct tcm_vhost_evt *evt; 426 + struct llist_node *llnode; 427 + 428 + mutex_lock(&vq->mutex); 429 + llnode = llist_del_all(&vs->vs_event_list); 430 + while (llnode) { 431 + evt = llist_entry(llnode, struct tcm_vhost_evt, list); 432 + llnode = llist_next(llnode); 433 + tcm_vhost_do_evt_work(vs, evt); 434 + tcm_vhost_free_evt(vs, evt); 435 + } 436 + mutex_unlock(&vq->mutex); 407 437 } 408 438 409 439 /* Fill in status and signal that we are done processing this command ··· 885 777 pr_debug("%s: The handling func for control queue.\n", __func__); 886 778 } 887 779 780 + static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg, 781 + struct se_lun *lun, u32 event, u32 reason) 782 + { 783 + struct tcm_vhost_evt *evt; 784 + 785 + evt = tcm_vhost_allocate_evt(vs, event, reason); 786 + if (!evt) 787 + return; 788 + 789 + if (tpg && lun) { 790 + /* TODO: share lun setup code with virtio-scsi.ko */ 791 + /* 792 + * Note: evt->event is zeroed when we allocate it and 793 + * lun[4-7] need to be zero according to virtio-scsi spec. 794 + */ 795 + evt->event.lun[0] = 0x01; 796 + evt->event.lun[1] = tpg->tport_tpgt & 0xFF; 797 + if (lun->unpacked_lun >= 256) 798 + evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; 799 + evt->event.lun[3] = lun->unpacked_lun & 0xFF; 800 + } 801 + 802 + llist_add(&evt->list, &vs->vs_event_list); 803 + vhost_work_queue(&vs->dev, &vs->vs_event_work); 804 + } 805 + 888 806 static void vhost_scsi_evt_handle_kick(struct vhost_work *work) 889 807 { 890 - pr_debug("%s: The handling func for event queue.\n", __func__); 808 + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 809 + poll.work); 810 + struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); 811 + 812 + mutex_lock(&vq->mutex); 813 + if (!vq->private_data) 814 + goto out; 815 + 816 + if (vs->vs_events_missed) 817 + tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); 818 + out: 819 + mutex_unlock(&vq->mutex); 891 820 } 892 821 893 822 static void vhost_scsi_handle_kick(struct vhost_work *work) ··· 948 803 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 949 804 vhost_scsi_flush_vq(vs, i); 950 805 vhost_work_flush(&vs->dev, &vs->vs_completion_work); 806 + vhost_work_flush(&vs->dev, &vs->vs_event_work); 951 807 } 952 808 953 809 /* 954 810 * Called from vhost_scsi_ioctl() context to walk the list of available 955 811 * tcm_vhost_tpg with an active struct tcm_vhost_nexus 812 + * 813 + * The lock nesting rule is: 814 + * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex 956 815 */ 957 816 static int vhost_scsi_set_endpoint( 958 817 struct vhost_scsi *vs, ··· 969 820 int index, ret, i, len; 970 821 bool match = false; 971 822 823 + mutex_lock(&tcm_vhost_mutex); 972 824 mutex_lock(&vs->dev.mutex); 825 + 973 826 /* Verify that ring has been setup correctly. */ 974 827 for (index = 0; index < vs->dev.nvqs; ++index) { 975 828 /* Verify that ring has been setup correctly. */ 976 829 if (!vhost_vq_access_ok(&vs->vqs[index])) { 977 - mutex_unlock(&vs->dev.mutex); 978 - return -EFAULT; 830 + ret = -EFAULT; 831 + goto out; 979 832 } 980 833 } 981 834 982 835 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; 983 836 vs_tpg = kzalloc(len, GFP_KERNEL); 984 837 if (!vs_tpg) { 985 - mutex_unlock(&vs->dev.mutex); 986 - return -ENOMEM; 838 + ret = -ENOMEM; 839 + goto out; 987 840 } 988 841 if (vs->vs_tpg) 989 842 memcpy(vs_tpg, vs->vs_tpg, len); 990 843 991 - mutex_lock(&tcm_vhost_mutex); 992 844 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { 993 845 mutex_lock(&tv_tpg->tv_tpg_mutex); 994 846 if (!tv_tpg->tpg_nexus) { ··· 1004 854 1005 855 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { 1006 856 if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) { 1007 - mutex_unlock(&tv_tpg->tv_tpg_mutex); 1008 - mutex_unlock(&tcm_vhost_mutex); 1009 - mutex_unlock(&vs->dev.mutex); 1010 857 kfree(vs_tpg); 1011 - return -EEXIST; 858 + mutex_unlock(&tv_tpg->tv_tpg_mutex); 859 + ret = -EEXIST; 860 + goto out; 1012 861 } 1013 862 tv_tpg->tv_tpg_vhost_count++; 863 + tv_tpg->vhost_scsi = vs; 1014 864 vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; 1015 865 smp_mb__after_atomic_inc(); 1016 866 match = true; 1017 867 } 1018 868 mutex_unlock(&tv_tpg->tv_tpg_mutex); 1019 869 } 1020 - mutex_unlock(&tcm_vhost_mutex); 1021 870 1022 871 if (match) { 1023 872 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, ··· 1042 893 kfree(vs->vs_tpg); 1043 894 vs->vs_tpg = vs_tpg; 1044 895 896 + out: 1045 897 mutex_unlock(&vs->dev.mutex); 898 + mutex_unlock(&tcm_vhost_mutex); 1046 899 return ret; 1047 900 } 1048 901 ··· 1059 908 int index, ret, i; 1060 909 u8 target; 1061 910 911 + mutex_lock(&tcm_vhost_mutex); 1062 912 mutex_lock(&vs->dev.mutex); 1063 913 /* Verify that ring has been setup correctly. */ 1064 914 for (index = 0; index < vs->dev.nvqs; ++index) { ··· 1070 918 } 1071 919 1072 920 if (!vs->vs_tpg) { 1073 - mutex_unlock(&vs->dev.mutex); 1074 - return 0; 921 + ret = 0; 922 + goto err_dev; 1075 923 } 1076 924 1077 925 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { ··· 1096 944 goto err_tpg; 1097 945 } 1098 946 tv_tpg->tv_tpg_vhost_count--; 947 + tv_tpg->vhost_scsi = NULL; 1099 948 vs->vs_tpg[target] = NULL; 1100 949 match = true; 1101 950 mutex_unlock(&tv_tpg->tv_tpg_mutex); ··· 1117 964 vhost_scsi_flush(vs); 1118 965 kfree(vs->vs_tpg); 1119 966 vs->vs_tpg = NULL; 967 + WARN_ON(vs->vs_events_nr); 1120 968 mutex_unlock(&vs->dev.mutex); 1121 - 969 + mutex_unlock(&tcm_vhost_mutex); 1122 970 return 0; 1123 971 1124 972 err_tpg: 1125 973 mutex_unlock(&tv_tpg->tv_tpg_mutex); 1126 974 err_dev: 1127 975 mutex_unlock(&vs->dev.mutex); 976 + mutex_unlock(&tcm_vhost_mutex); 1128 977 return ret; 1129 978 } 1130 979 ··· 1158 1003 return -ENOMEM; 1159 1004 1160 1005 vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); 1006 + vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work); 1007 + 1008 + s->vs_events_nr = 0; 1009 + s->vs_events_missed = false; 1161 1010 1162 1011 s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; 1163 1012 s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; ··· 1188 1029 vhost_scsi_clear_endpoint(s, &t); 1189 1030 vhost_dev_stop(&s->dev); 1190 1031 vhost_dev_cleanup(&s->dev, false); 1032 + /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ 1033 + vhost_scsi_flush(s); 1191 1034 kfree(s); 1192 1035 return 0; 1193 1036 } ··· 1201 1040 struct vhost_scsi_target backend; 1202 1041 void __user *argp = (void __user *)arg; 1203 1042 u64 __user *featurep = argp; 1043 + u32 __user *eventsp = argp; 1044 + u32 events_missed; 1204 1045 u64 features; 1205 1046 int r, abi_version = VHOST_SCSI_ABI_VERSION; 1047 + struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; 1206 1048 1207 1049 switch (ioctl) { 1208 1050 case VHOST_SCSI_SET_ENDPOINT: ··· 1224 1060 return vhost_scsi_clear_endpoint(vs, &backend); 1225 1061 case VHOST_SCSI_GET_ABI_VERSION: 1226 1062 if (copy_to_user(argp, &abi_version, sizeof abi_version)) 1063 + return -EFAULT; 1064 + return 0; 1065 + case VHOST_SCSI_SET_EVENTS_MISSED: 1066 + if (get_user(events_missed, eventsp)) 1067 + return -EFAULT; 1068 + mutex_lock(&vq->mutex); 1069 + vs->vs_events_missed = events_missed; 1070 + mutex_unlock(&vq->mutex); 1071 + return 0; 1072 + case VHOST_SCSI_GET_EVENTS_MISSED: 1073 + mutex_lock(&vq->mutex); 1074 + events_missed = vs->vs_events_missed; 1075 + mutex_unlock(&vq->mutex); 1076 + if (put_user(events_missed, eventsp)) 1227 1077 return -EFAULT; 1228 1078 return 0; 1229 1079 case VHOST_GET_FEATURES: ··· 1311 1133 return "Unknown"; 1312 1134 } 1313 1135 1136 + static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, 1137 + struct se_lun *lun, bool plug) 1138 + { 1139 + 1140 + struct vhost_scsi *vs = tpg->vhost_scsi; 1141 + struct vhost_virtqueue *vq; 1142 + u32 reason; 1143 + 1144 + if (!vs) 1145 + return; 1146 + 1147 + mutex_lock(&vs->dev.mutex); 1148 + if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) { 1149 + mutex_unlock(&vs->dev.mutex); 1150 + return; 1151 + } 1152 + 1153 + if (plug) 1154 + reason = VIRTIO_SCSI_EVT_RESET_RESCAN; 1155 + else 1156 + reason = VIRTIO_SCSI_EVT_RESET_REMOVED; 1157 + 1158 + vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; 1159 + mutex_lock(&vq->mutex); 1160 + tcm_vhost_send_evt(vs, tpg, lun, 1161 + VIRTIO_SCSI_T_TRANSPORT_RESET, reason); 1162 + mutex_unlock(&vq->mutex); 1163 + mutex_unlock(&vs->dev.mutex); 1164 + } 1165 + 1166 + static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) 1167 + { 1168 + tcm_vhost_do_plug(tpg, lun, true); 1169 + } 1170 + 1171 + static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) 1172 + { 1173 + tcm_vhost_do_plug(tpg, lun, false); 1174 + } 1175 + 1314 1176 static int tcm_vhost_port_link(struct se_portal_group *se_tpg, 1315 1177 struct se_lun *lun) 1316 1178 { 1317 1179 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, 1318 1180 struct tcm_vhost_tpg, se_tpg); 1319 1181 1182 + mutex_lock(&tcm_vhost_mutex); 1183 + 1320 1184 mutex_lock(&tv_tpg->tv_tpg_mutex); 1321 1185 tv_tpg->tv_tpg_port_count++; 1322 1186 mutex_unlock(&tv_tpg->tv_tpg_mutex); 1187 + 1188 + tcm_vhost_hotplug(tv_tpg, lun); 1189 + 1190 + mutex_unlock(&tcm_vhost_mutex); 1323 1191 1324 1192 return 0; 1325 1193 } 1326 1194 1327 1195 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, 1328 - struct se_lun *se_lun) 1196 + struct se_lun *lun) 1329 1197 { 1330 1198 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, 1331 1199 struct tcm_vhost_tpg, se_tpg); 1332 1200 1201 + mutex_lock(&tcm_vhost_mutex); 1202 + 1333 1203 mutex_lock(&tv_tpg->tv_tpg_mutex); 1334 1204 tv_tpg->tv_tpg_port_count--; 1335 1205 mutex_unlock(&tv_tpg->tv_tpg_mutex); 1206 + 1207 + tcm_vhost_hotunplug(tv_tpg, lun); 1208 + 1209 + mutex_unlock(&tcm_vhost_mutex); 1336 1210 } 1337 1211 1338 1212 static struct se_node_acl *tcm_vhost_make_nodeacl(
+13
drivers/vhost/tcm_vhost.h
··· 53 53 struct se_node_acl se_node_acl; 54 54 }; 55 55 56 + struct vhost_scsi; 56 57 struct tcm_vhost_tpg { 57 58 /* Vhost port target portal group tag for TCM */ 58 59 u16 tport_tpgt; ··· 71 70 struct tcm_vhost_tport *tport; 72 71 /* Returned by tcm_vhost_make_tpg() */ 73 72 struct se_portal_group se_tpg; 73 + /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ 74 + struct vhost_scsi *vhost_scsi; 74 75 }; 75 76 76 77 struct tcm_vhost_tport { ··· 84 81 char tport_name[TCM_VHOST_NAMELEN]; 85 82 /* Returned by tcm_vhost_make_tport() */ 86 83 struct se_wwn tport_wwn; 84 + }; 85 + 86 + struct tcm_vhost_evt { 87 + /* event to be sent to guest */ 88 + struct virtio_scsi_event event; 89 + /* event list, serviced from vhost worker thread */ 90 + struct llist_node list; 87 91 }; 88 92 89 93 /* ··· 123 113 #define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target) 124 114 /* Changing this breaks userspace. */ 125 115 #define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int) 116 + /* Set and get the events missed flag */ 117 + #define VHOST_SCSI_SET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x43, __u32) 118 + #define VHOST_SCSI_GET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x44, __u32)
+83
include/target/iscsi/iscsi_transport.h
··· 1 + #include <linux/module.h> 2 + #include <linux/list.h> 3 + #include "../../../drivers/target/iscsi/iscsi_target_core.h" 4 + 5 + struct iscsit_transport { 6 + #define ISCSIT_TRANSPORT_NAME 16 7 + char name[ISCSIT_TRANSPORT_NAME]; 8 + int transport_type; 9 + struct module *owner; 10 + struct list_head t_node; 11 + int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *); 12 + int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *); 13 + void (*iscsit_free_np)(struct iscsi_np *); 14 + void (*iscsit_free_conn)(struct iscsi_conn *); 15 + struct iscsi_cmd *(*iscsit_alloc_cmd)(struct iscsi_conn *, gfp_t); 16 + int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *); 17 + int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32); 18 + int (*iscsit_immediate_queue)(struct iscsi_conn *, struct iscsi_cmd *, int); 19 + int (*iscsit_response_queue)(struct iscsi_conn *, struct iscsi_cmd *, int); 20 + int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool); 21 + int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *); 22 + int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *); 23 + }; 24 + 25 + /* 26 + * From iscsi_target_transport.c 27 + */ 28 + 29 + extern int iscsit_register_transport(struct iscsit_transport *); 30 + extern void iscsit_unregister_transport(struct iscsit_transport *); 31 + extern struct iscsit_transport *iscsit_get_transport(int); 32 + extern void iscsit_put_transport(struct iscsit_transport *); 33 + 34 + /* 35 + * From iscsi_target.c 36 + */ 37 + extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, 38 + struct iscsi_cmd *); 39 + extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *, 40 + unsigned char *); 41 + extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *); 42 + extern int iscsit_process_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *, 43 + struct iscsi_scsi_req *); 44 + extern int iscsit_check_dataout_hdr(struct iscsi_conn *, unsigned char *, 45 + struct iscsi_cmd **); 46 + extern int iscsit_check_dataout_payload(struct iscsi_cmd *, struct iscsi_data *, 47 + bool); 48 + extern int iscsit_handle_nop_out(struct iscsi_conn *, struct iscsi_cmd *, 49 + unsigned char *); 50 + extern int iscsit_handle_logout_cmd(struct iscsi_conn *, struct iscsi_cmd *, 51 + unsigned char *); 52 + extern int iscsit_handle_task_mgt_cmd(struct iscsi_conn *, struct iscsi_cmd *, 53 + unsigned char *); 54 + extern void iscsit_build_rsp_pdu(struct iscsi_cmd *, struct iscsi_conn *, 55 + bool, struct iscsi_scsi_rsp *); 56 + extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *, 57 + struct iscsi_nopin *, bool); 58 + extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *, 59 + struct iscsi_tm_rsp *); 60 + extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *, 61 + struct iscsi_reject *); 62 + extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *, 63 + struct iscsi_logout_rsp *); 64 + extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *); 65 + /* 66 + * From iscsi_target_device.c 67 + */ 68 + extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *); 69 + /* 70 + * From iscsi_target_erl1.c 71 + */ 72 + extern void iscsit_stop_dataout_timer(struct iscsi_cmd *); 73 + 74 + /* 75 + * From iscsi_target_tmr.c 76 + */ 77 + extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *); 78 + 79 + /* 80 + * From iscsi_target_util.c 81 + */ 82 + extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); 83 + extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, __be32);
+4
include/target/target_core_backend.h
··· 60 60 u32 sbc_get_device_rev(struct se_device *dev); 61 61 u32 sbc_get_device_type(struct se_device *dev); 62 62 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd); 63 + sense_reason_t sbc_execute_unmap(struct se_cmd *cmd, 64 + sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv, 65 + sector_t lba, sector_t nolb), 66 + void *priv); 63 67 64 68 void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); 65 69 int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
+1 -1
include/target/target_core_fabric.h
··· 120 120 int transport_check_aborted_status(struct se_cmd *, int); 121 121 int transport_send_check_condition_and_sense(struct se_cmd *, 122 122 sense_reason_t, int); 123 - 123 + int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); 124 124 int target_put_sess_cmd(struct se_session *, struct se_cmd *); 125 125 void target_sess_cmd_list_set_waiting(struct se_session *); 126 126 void target_wait_for_sess_cmds(struct se_session *, int);