Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target updates from Nicholas Bellinger:
"The highlights this round include:

- add support for SCSI Referrals (Hannes)
- add support for T10 DIF into target core (nab + mkp)
- add support for T10 DIF emulation in FILEIO + RAMDISK backends (Sagi + nab)
- add support for T10 DIF -> bio_integrity passthrough in IBLOCK backend (nab)
- prep changes to iser-target for >= v3.15 T10 DIF support (Sagi)
- add support for qla2xxx N_Port ID Virtualization - NPIV (Saurav + Quinn)
- allow percpu_ida_alloc() to receive task state bitmask (Kent)
- fix >= v3.12 iscsi-target session reset hung task regression (nab)
- fix >= v3.13 percpu_ref se_lun->lun_ref_active race (nab)
- fix a long-standing network portal creation race (Andy)"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (51 commits)
target: Fix percpu_ref_put race in transport_lun_remove_cmd
target/iscsi: Fix network portal creation race
target: Report bad sector in sense data for DIF errors
iscsi-target: Convert gfp_t parameter to task state bitmask
iscsi-target: Fix connection reset hang with percpu_ida_alloc
percpu_ida: Make percpu_ida_alloc + callers accept task state bitmask
iscsi-target: Pre-allocate more tags to avoid ack starvation
qla2xxx: Configure NPIV fc_vport via tcm_qla2xxx_npiv_make_lport
qla2xxx: Enhancements to enable NPIV support for QLOGIC ISPs with TCM/LIO.
qla2xxx: Fix scsi_host leak on qlt_lport_register callback failure
IB/isert: pass scatterlist instead of cmd to fast_reg_mr routine
IB/isert: Move fastreg descriptor creation to a function
IB/isert: Avoid frwr notation, user fastreg
IB/isert: seperate connection protection domains and dma MRs
tcm_loop: Enable DIF/DIX modes in SCSI host LLD
target/rd: Add DIF protection into rd_execute_rw
target/rd: Add support for protection SGL setup + release
target/rd: Refactor rd_build_device_space + rd_release_device_space
target/file: Add DIF protection support to fd_execute_rw
target/file: Add DIF protection init/format support
...

+2302 -507
+4 -2
block/blk-mq-tag.c
··· 36 { 37 int tag; 38 39 - tag = percpu_ida_alloc(&tags->free_tags, gfp); 40 if (tag < 0) 41 return BLK_MQ_TAG_FAIL; 42 return tag + tags->nr_reserved_tags; ··· 53 return BLK_MQ_TAG_FAIL; 54 } 55 56 - tag = percpu_ida_alloc(&tags->reserved_tags, gfp); 57 if (tag < 0) 58 return BLK_MQ_TAG_FAIL; 59 return tag;
··· 36 { 37 int tag; 38 39 + tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ? 40 + TASK_UNINTERRUPTIBLE : TASK_RUNNING); 41 if (tag < 0) 42 return BLK_MQ_TAG_FAIL; 43 return tag + tags->nr_reserved_tags; ··· 52 return BLK_MQ_TAG_FAIL; 53 } 54 55 + tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ? 56 + TASK_UNINTERRUPTIBLE : TASK_RUNNING); 57 if (tag < 0) 58 return BLK_MQ_TAG_FAIL; 59 return tag;
+117 -105
drivers/infiniband/ulp/isert/ib_isert.c
··· 47 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 48 struct isert_rdma_wr *wr); 49 static void 50 - isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 51 static int 52 - isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 53 - struct isert_rdma_wr *wr); 54 55 static void 56 isert_qp_event_callback(struct ib_event *e, void *context) ··· 227 228 /* asign function handlers */ 229 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { 230 - device->use_frwr = 1; 231 - device->reg_rdma_mem = isert_reg_rdma_frwr; 232 - device->unreg_rdma_mem = isert_unreg_rdma_frwr; 233 } else { 234 - device->use_frwr = 0; 235 device->reg_rdma_mem = isert_map_rdma; 236 device->unreg_rdma_mem = isert_unmap_cmd; 237 } ··· 239 device->cqs_used = min_t(int, num_online_cpus(), 240 device->ib_device->num_comp_vectors); 241 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); 242 - pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n", 243 device->cqs_used, device->ib_device->name, 244 - device->ib_device->num_comp_vectors, device->use_frwr); 245 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * 246 device->cqs_used, GFP_KERNEL); 247 if (!device->cq_desc) { ··· 250 return -ENOMEM; 251 } 252 cq_desc = device->cq_desc; 253 - 254 - device->dev_pd = ib_alloc_pd(ib_dev); 255 - if (IS_ERR(device->dev_pd)) { 256 - ret = PTR_ERR(device->dev_pd); 257 - pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret); 258 - goto out_cq_desc; 259 - } 260 261 for (i = 0; i < device->cqs_used; i++) { 262 cq_desc[i].device = device; ··· 288 goto out_cq; 289 } 290 291 - device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE); 292 - if (IS_ERR(device->dev_mr)) { 293 - ret = PTR_ERR(device->dev_mr); 294 - pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret); 295 - goto out_cq; 296 - } 297 - 298 return 0; 299 300 out_cq: ··· 303 ib_destroy_cq(device->dev_tx_cq[j]); 304 } 305 } 306 - ib_dealloc_pd(device->dev_pd); 307 - 308 - out_cq_desc: 309 kfree(device->cq_desc); 310 311 return ret; ··· 325 device->dev_tx_cq[i] = NULL; 326 } 327 328 - ib_dereg_mr(device->dev_mr); 329 - ib_dealloc_pd(device->dev_pd); 330 kfree(device->cq_desc); 331 } 332 ··· 380 } 381 382 static void 383 - isert_conn_free_frwr_pool(struct isert_conn *isert_conn) 384 { 385 struct fast_reg_descriptor *fr_desc, *tmp; 386 int i = 0; 387 388 - if (list_empty(&isert_conn->conn_frwr_pool)) 389 return; 390 391 - pr_debug("Freeing conn %p frwr pool", isert_conn); 392 393 list_for_each_entry_safe(fr_desc, tmp, 394 - &isert_conn->conn_frwr_pool, list) { 395 list_del(&fr_desc->list); 396 ib_free_fast_reg_page_list(fr_desc->data_frpl); 397 ib_dereg_mr(fr_desc->data_mr); ··· 399 ++i; 400 } 401 402 - if (i < isert_conn->conn_frwr_pool_size) 403 pr_warn("Pool still has %d regions registered\n", 404 - isert_conn->conn_frwr_pool_size - i); 405 } 406 407 static int 408 - isert_conn_create_frwr_pool(struct isert_conn *isert_conn) 409 { 410 struct fast_reg_descriptor *fr_desc; 411 struct isert_device *device = isert_conn->conn_device; 412 int i, ret; 413 414 - INIT_LIST_HEAD(&isert_conn->conn_frwr_pool); 415 - isert_conn->conn_frwr_pool_size = 0; 416 for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) { 417 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); 418 if (!fr_desc) { ··· 448 goto err; 449 } 450 451 - fr_desc->data_frpl = 452 - ib_alloc_fast_reg_page_list(device->ib_device, 453 - ISCSI_ISER_SG_TABLESIZE); 454 - if (IS_ERR(fr_desc->data_frpl)) { 455 - pr_err("Failed to allocate fr_pg_list err=%ld\n", 456 - PTR_ERR(fr_desc->data_frpl)); 457 - ret = PTR_ERR(fr_desc->data_frpl); 458 goto err; 459 } 460 461 - fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd, 462 - ISCSI_ISER_SG_TABLESIZE); 463 - if (IS_ERR(fr_desc->data_mr)) { 464 - pr_err("Failed to allocate frmr err=%ld\n", 465 - PTR_ERR(fr_desc->data_mr)); 466 - ret = PTR_ERR(fr_desc->data_mr); 467 - ib_free_fast_reg_page_list(fr_desc->data_frpl); 468 - goto err; 469 - } 470 - pr_debug("Create fr_desc %p page_list %p\n", 471 - fr_desc, fr_desc->data_frpl->page_list); 472 - 473 - fr_desc->valid = true; 474 - list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); 475 - isert_conn->conn_frwr_pool_size++; 476 } 477 478 - pr_debug("Creating conn %p frwr pool size=%d", 479 - isert_conn, isert_conn->conn_frwr_pool_size); 480 481 return 0; 482 483 err: 484 - isert_conn_free_frwr_pool(isert_conn); 485 return ret; 486 } 487 ··· 552 } 553 554 isert_conn->conn_device = device; 555 - isert_conn->conn_pd = device->dev_pd; 556 - isert_conn->conn_mr = device->dev_mr; 557 558 - if (device->use_frwr) { 559 - ret = isert_conn_create_frwr_pool(isert_conn); 560 if (ret) { 561 - pr_err("Conn: %p failed to create frwr_pool\n", isert_conn); 562 - goto out_frwr; 563 } 564 } 565 ··· 591 return 0; 592 593 out_conn_dev: 594 - if (device->use_frwr) 595 - isert_conn_free_frwr_pool(isert_conn); 596 - out_frwr: 597 isert_device_try_release(device); 598 out_rsp_dma_map: 599 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, ··· 621 622 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 623 624 - if (device && device->use_frwr) 625 - isert_conn_free_frwr_pool(isert_conn); 626 627 if (isert_conn->conn_qp) { 628 cq_index = ((struct isert_cq_desc *) ··· 635 636 isert_free_rx_descriptors(isert_conn); 637 rdma_destroy_id(isert_conn->conn_cm_id); 638 639 if (isert_conn->login_buf) { 640 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, ··· 1040 } 1041 1042 static struct iscsi_cmd 1043 - *isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp) 1044 { 1045 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1046 struct isert_cmd *isert_cmd; 1047 struct iscsi_cmd *cmd; 1048 1049 - cmd = iscsit_allocate_cmd(conn, gfp); 1050 if (!cmd) { 1051 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1052 return NULL; ··· 1235 1236 switch (opcode) { 1237 case ISCSI_OP_SCSI_CMD: 1238 - cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1239 if (!cmd) 1240 break; 1241 ··· 1249 rx_desc, (unsigned char *)hdr); 1250 break; 1251 case ISCSI_OP_NOOP_OUT: 1252 - cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1253 if (!cmd) 1254 break; 1255 ··· 1262 (unsigned char *)hdr); 1263 break; 1264 case ISCSI_OP_SCSI_TMFUNC: 1265 - cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1266 if (!cmd) 1267 break; 1268 ··· 1270 (unsigned char *)hdr); 1271 break; 1272 case ISCSI_OP_LOGOUT: 1273 - cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1274 if (!cmd) 1275 break; 1276 ··· 1281 HZ); 1282 break; 1283 case ISCSI_OP_TEXT: 1284 - cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1285 if (!cmd) 1286 break; 1287 ··· 1420 } 1421 1422 static void 1423 - isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1424 { 1425 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1426 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1427 LIST_HEAD(unmap_list); 1428 1429 - pr_debug("unreg_frwr_cmd: %p\n", isert_cmd); 1430 1431 if (wr->fr_desc) { 1432 - pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n", 1433 isert_cmd, wr->fr_desc); 1434 spin_lock_bh(&isert_conn->conn_lock); 1435 - list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool); 1436 spin_unlock_bh(&isert_conn->conn_lock); 1437 wr->fr_desc = NULL; 1438 } 1439 1440 if (wr->sge) { 1441 - pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd); 1442 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, 1443 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 1444 DMA_TO_DEVICE : DMA_FROM_DEVICE); ··· 2179 2180 static int 2181 isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, 2182 - struct isert_cmd *isert_cmd, struct isert_conn *isert_conn, 2183 - struct ib_sge *ib_sge, u32 offset, unsigned int data_len) 2184 { 2185 - struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 2186 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2187 - struct scatterlist *sg_start; 2188 - u32 sg_off, page_off; 2189 struct ib_send_wr fr_wr, inv_wr; 2190 struct ib_send_wr *bad_wr, *wr = NULL; 2191 u8 key; 2192 - int ret, sg_nents, pagelist_len; 2193 2194 - sg_off = offset / PAGE_SIZE; 2195 - sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 2196 - sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off, 2197 - ISCSI_ISER_SG_TABLESIZE); 2198 page_off = offset % PAGE_SIZE; 2199 2200 - pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n", 2201 - isert_cmd, fr_desc, sg_nents, sg_off, offset); 2202 2203 pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents, 2204 &fr_desc->data_frpl->page_list[0]); ··· 2244 } 2245 2246 static int 2247 - isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2248 - struct isert_rdma_wr *wr) 2249 { 2250 struct se_cmd *se_cmd = &cmd->se_cmd; 2251 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); ··· 2263 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2264 data_left = se_cmd->data_length; 2265 } else { 2266 - sg_off = cmd->write_data_done / PAGE_SIZE; 2267 - data_left = se_cmd->data_length - cmd->write_data_done; 2268 offset = cmd->write_data_done; 2269 isert_cmd->tx_desc.isert_cmd = isert_cmd; 2270 } 2271 ··· 2323 wr->fr_desc = NULL; 2324 } else { 2325 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2326 - fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, 2327 struct fast_reg_descriptor, list); 2328 list_del(&fr_desc->list); 2329 spin_unlock_irqrestore(&isert_conn->conn_lock, flags); 2330 wr->fr_desc = fr_desc; 2331 2332 - ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, 2333 - ib_sge, offset, data_len); 2334 if (ret) { 2335 - list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); 2336 goto unmap_sg; 2337 } 2338 }
··· 47 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 48 struct isert_rdma_wr *wr); 49 static void 50 + isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 51 static int 52 + isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 53 + struct isert_rdma_wr *wr); 54 55 static void 56 isert_qp_event_callback(struct ib_event *e, void *context) ··· 227 228 /* asign function handlers */ 229 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { 230 + device->use_fastreg = 1; 231 + device->reg_rdma_mem = isert_reg_rdma; 232 + device->unreg_rdma_mem = isert_unreg_rdma; 233 } else { 234 + device->use_fastreg = 0; 235 device->reg_rdma_mem = isert_map_rdma; 236 device->unreg_rdma_mem = isert_unmap_cmd; 237 } ··· 239 device->cqs_used = min_t(int, num_online_cpus(), 240 device->ib_device->num_comp_vectors); 241 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); 242 + pr_debug("Using %d CQs, device %s supports %d vectors support " 243 + "Fast registration %d\n", 244 device->cqs_used, device->ib_device->name, 245 + device->ib_device->num_comp_vectors, device->use_fastreg); 246 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * 247 device->cqs_used, GFP_KERNEL); 248 if (!device->cq_desc) { ··· 249 return -ENOMEM; 250 } 251 cq_desc = device->cq_desc; 252 253 for (i = 0; i < device->cqs_used; i++) { 254 cq_desc[i].device = device; ··· 294 goto out_cq; 295 } 296 297 return 0; 298 299 out_cq: ··· 316 ib_destroy_cq(device->dev_tx_cq[j]); 317 } 318 } 319 kfree(device->cq_desc); 320 321 return ret; ··· 341 device->dev_tx_cq[i] = NULL; 342 } 343 344 kfree(device->cq_desc); 345 } 346 ··· 398 } 399 400 static void 401 + isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) 402 { 403 struct fast_reg_descriptor *fr_desc, *tmp; 404 int i = 0; 405 406 + if (list_empty(&isert_conn->conn_fr_pool)) 407 return; 408 409 + pr_debug("Freeing conn %p fastreg pool", isert_conn); 410 411 list_for_each_entry_safe(fr_desc, tmp, 412 + &isert_conn->conn_fr_pool, list) { 413 list_del(&fr_desc->list); 414 ib_free_fast_reg_page_list(fr_desc->data_frpl); 415 ib_dereg_mr(fr_desc->data_mr); ··· 417 ++i; 418 } 419 420 + if (i < isert_conn->conn_fr_pool_size) 421 pr_warn("Pool still has %d regions registered\n", 422 + isert_conn->conn_fr_pool_size - i); 423 } 424 425 static int 426 + isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, 427 + struct fast_reg_descriptor *fr_desc) 428 + { 429 + fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, 430 + ISCSI_ISER_SG_TABLESIZE); 431 + if (IS_ERR(fr_desc->data_frpl)) { 432 + pr_err("Failed to allocate data frpl err=%ld\n", 433 + PTR_ERR(fr_desc->data_frpl)); 434 + return PTR_ERR(fr_desc->data_frpl); 435 + } 436 + 437 + fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); 438 + if (IS_ERR(fr_desc->data_mr)) { 439 + pr_err("Failed to allocate data frmr err=%ld\n", 440 + PTR_ERR(fr_desc->data_mr)); 441 + ib_free_fast_reg_page_list(fr_desc->data_frpl); 442 + return PTR_ERR(fr_desc->data_mr); 443 + } 444 + pr_debug("Create fr_desc %p page_list %p\n", 445 + fr_desc, fr_desc->data_frpl->page_list); 446 + 447 + fr_desc->valid = true; 448 + 449 + return 0; 450 + } 451 + 452 + static int 453 + isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) 454 { 455 struct fast_reg_descriptor *fr_desc; 456 struct isert_device *device = isert_conn->conn_device; 457 int i, ret; 458 459 + INIT_LIST_HEAD(&isert_conn->conn_fr_pool); 460 + isert_conn->conn_fr_pool_size = 0; 461 for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) { 462 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); 463 if (!fr_desc) { ··· 439 goto err; 440 } 441 442 + ret = isert_create_fr_desc(device->ib_device, 443 + isert_conn->conn_pd, fr_desc); 444 + if (ret) { 445 + pr_err("Failed to create fastreg descriptor err=%d\n", 446 + ret); 447 goto err; 448 } 449 450 + list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); 451 + isert_conn->conn_fr_pool_size++; 452 } 453 454 + pr_debug("Creating conn %p fastreg pool size=%d", 455 + isert_conn, isert_conn->conn_fr_pool_size); 456 457 return 0; 458 459 err: 460 + isert_conn_free_fastreg_pool(isert_conn); 461 return ret; 462 } 463 ··· 558 } 559 560 isert_conn->conn_device = device; 561 + isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); 562 + if (IS_ERR(isert_conn->conn_pd)) { 563 + ret = PTR_ERR(isert_conn->conn_pd); 564 + pr_err("ib_alloc_pd failed for conn %p: ret=%d\n", 565 + isert_conn, ret); 566 + goto out_pd; 567 + } 568 569 + isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd, 570 + IB_ACCESS_LOCAL_WRITE); 571 + if (IS_ERR(isert_conn->conn_mr)) { 572 + ret = PTR_ERR(isert_conn->conn_mr); 573 + pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n", 574 + isert_conn, ret); 575 + goto out_mr; 576 + } 577 + 578 + if (device->use_fastreg) { 579 + ret = isert_conn_create_fastreg_pool(isert_conn); 580 if (ret) { 581 + pr_err("Conn: %p failed to create fastreg pool\n", 582 + isert_conn); 583 + goto out_fastreg; 584 } 585 } 586 ··· 582 return 0; 583 584 out_conn_dev: 585 + if (device->use_fastreg) 586 + isert_conn_free_fastreg_pool(isert_conn); 587 + out_fastreg: 588 + ib_dereg_mr(isert_conn->conn_mr); 589 + out_mr: 590 + ib_dealloc_pd(isert_conn->conn_pd); 591 + out_pd: 592 isert_device_try_release(device); 593 out_rsp_dma_map: 594 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, ··· 608 609 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 610 611 + if (device && device->use_fastreg) 612 + isert_conn_free_fastreg_pool(isert_conn); 613 614 if (isert_conn->conn_qp) { 615 cq_index = ((struct isert_cq_desc *) ··· 622 623 isert_free_rx_descriptors(isert_conn); 624 rdma_destroy_id(isert_conn->conn_cm_id); 625 + 626 + ib_dereg_mr(isert_conn->conn_mr); 627 + ib_dealloc_pd(isert_conn->conn_pd); 628 629 if (isert_conn->login_buf) { 630 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, ··· 1024 } 1025 1026 static struct iscsi_cmd 1027 + *isert_allocate_cmd(struct iscsi_conn *conn) 1028 { 1029 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1030 struct isert_cmd *isert_cmd; 1031 struct iscsi_cmd *cmd; 1032 1033 + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 1034 if (!cmd) { 1035 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1036 return NULL; ··· 1219 1220 switch (opcode) { 1221 case ISCSI_OP_SCSI_CMD: 1222 + cmd = isert_allocate_cmd(conn); 1223 if (!cmd) 1224 break; 1225 ··· 1233 rx_desc, (unsigned char *)hdr); 1234 break; 1235 case ISCSI_OP_NOOP_OUT: 1236 + cmd = isert_allocate_cmd(conn); 1237 if (!cmd) 1238 break; 1239 ··· 1246 (unsigned char *)hdr); 1247 break; 1248 case ISCSI_OP_SCSI_TMFUNC: 1249 + cmd = isert_allocate_cmd(conn); 1250 if (!cmd) 1251 break; 1252 ··· 1254 (unsigned char *)hdr); 1255 break; 1256 case ISCSI_OP_LOGOUT: 1257 + cmd = isert_allocate_cmd(conn); 1258 if (!cmd) 1259 break; 1260 ··· 1265 HZ); 1266 break; 1267 case ISCSI_OP_TEXT: 1268 + cmd = isert_allocate_cmd(conn); 1269 if (!cmd) 1270 break; 1271 ··· 1404 } 1405 1406 static void 1407 + isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1408 { 1409 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1410 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1411 LIST_HEAD(unmap_list); 1412 1413 + pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); 1414 1415 if (wr->fr_desc) { 1416 + pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n", 1417 isert_cmd, wr->fr_desc); 1418 spin_lock_bh(&isert_conn->conn_lock); 1419 + list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool); 1420 spin_unlock_bh(&isert_conn->conn_lock); 1421 wr->fr_desc = NULL; 1422 } 1423 1424 if (wr->sge) { 1425 + pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); 1426 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, 1427 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 1428 DMA_TO_DEVICE : DMA_FROM_DEVICE); ··· 2163 2164 static int 2165 isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, 2166 + struct isert_conn *isert_conn, struct scatterlist *sg_start, 2167 + struct ib_sge *ib_sge, u32 sg_nents, u32 offset, 2168 + unsigned int data_len) 2169 { 2170 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2171 struct ib_send_wr fr_wr, inv_wr; 2172 struct ib_send_wr *bad_wr, *wr = NULL; 2173 + int ret, pagelist_len; 2174 + u32 page_off; 2175 u8 key; 2176 2177 + sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE); 2178 page_off = offset % PAGE_SIZE; 2179 2180 + pr_debug("Use fr_desc %p sg_nents %d offset %u\n", 2181 + fr_desc, sg_nents, offset); 2182 2183 pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents, 2184 &fr_desc->data_frpl->page_list[0]); ··· 2232 } 2233 2234 static int 2235 + isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2236 + struct isert_rdma_wr *wr) 2237 { 2238 struct se_cmd *se_cmd = &cmd->se_cmd; 2239 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); ··· 2251 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2252 data_left = se_cmd->data_length; 2253 } else { 2254 offset = cmd->write_data_done; 2255 + sg_off = offset / PAGE_SIZE; 2256 + data_left = se_cmd->data_length - cmd->write_data_done; 2257 isert_cmd->tx_desc.isert_cmd = isert_cmd; 2258 } 2259 ··· 2311 wr->fr_desc = NULL; 2312 } else { 2313 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2314 + fr_desc = list_first_entry(&isert_conn->conn_fr_pool, 2315 struct fast_reg_descriptor, list); 2316 list_del(&fr_desc->list); 2317 spin_unlock_irqrestore(&isert_conn->conn_lock, flags); 2318 wr->fr_desc = fr_desc; 2319 2320 + ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start, 2321 + ib_sge, sg_nents, offset, data_len); 2322 if (ret) { 2323 + list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); 2324 goto unmap_sg; 2325 } 2326 }
+4 -6
drivers/infiniband/ulp/isert/ib_isert.h
··· 119 wait_queue_head_t conn_wait; 120 wait_queue_head_t conn_wait_comp_err; 121 struct kref conn_kref; 122 - struct list_head conn_frwr_pool; 123 - int conn_frwr_pool_size; 124 - /* lock to protect frwr_pool */ 125 spinlock_t conn_lock; 126 #define ISERT_COMP_BATCH_COUNT 8 127 int conn_comp_batch; ··· 139 }; 140 141 struct isert_device { 142 - int use_frwr; 143 int cqs_used; 144 int refcount; 145 int cq_active_qps[ISERT_MAX_CQ]; 146 struct ib_device *ib_device; 147 - struct ib_pd *dev_pd; 148 - struct ib_mr *dev_mr; 149 struct ib_cq *dev_rx_cq[ISERT_MAX_CQ]; 150 struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; 151 struct isert_cq_desc *cq_desc;
··· 119 wait_queue_head_t conn_wait; 120 wait_queue_head_t conn_wait_comp_err; 121 struct kref conn_kref; 122 + struct list_head conn_fr_pool; 123 + int conn_fr_pool_size; 124 + /* lock to protect fastreg pool */ 125 spinlock_t conn_lock; 126 #define ISERT_COMP_BATCH_COUNT 8 127 int conn_comp_batch; ··· 139 }; 140 141 struct isert_device { 142 + int use_fastreg; 143 int cqs_used; 144 int refcount; 145 int cq_active_qps[ISERT_MAX_CQ]; 146 struct ib_device *ib_device; 147 struct ib_cq *dev_rx_cq[ISERT_MAX_CQ]; 148 struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; 149 struct isert_cq_desc *cq_desc;
+2
drivers/scsi/qla2xxx/qla_attr.c
··· 1990 1991 vha->flags.delete_progress = 1; 1992 1993 fc_remove_host(vha->host); 1994 1995 scsi_remove_host(vha->host);
··· 1990 1991 vha->flags.delete_progress = 1; 1992 1993 + qlt_remove_target(ha, vha); 1994 + 1995 fc_remove_host(vha->host); 1996 1997 scsi_remove_host(vha->host);
+8 -4
drivers/scsi/qla2xxx/qla_def.h
··· 2750 uint32_t len; 2751 }; 2752 2753 struct qlt_hw_data { 2754 /* Protected by hw lock */ 2755 uint32_t enable_class_2:1; ··· 2772 uint32_t __iomem *atio_q_in; 2773 uint32_t __iomem *atio_q_out; 2774 2775 - void *target_lport_ptr; 2776 struct qla_tgt_func_tmpl *tgt_ops; 2777 - struct qla_tgt *qla_tgt; 2778 struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS]; 2779 uint16_t current_handle; 2780 2781 struct qla_tgt_vp_map *tgt_vp_map; 2782 - struct mutex tgt_mutex; 2783 - struct mutex tgt_host_action_mutex; 2784 2785 int saved_set; 2786 uint16_t saved_exchange_count; ··· 3438 #define VP_ERR_FAB_LOGOUT 4 3439 #define VP_ERR_ADAP_NORESOURCES 5 3440 struct qla_hw_data *hw; 3441 struct req_que *req; 3442 int fw_heartbeat_counter; 3443 int seconds_since_last_heartbeat;
··· 2750 uint32_t len; 2751 }; 2752 2753 + struct scsi_qlt_host { 2754 + void *target_lport_ptr; 2755 + struct mutex tgt_mutex; 2756 + struct mutex tgt_host_action_mutex; 2757 + struct qla_tgt *qla_tgt; 2758 + }; 2759 + 2760 struct qlt_hw_data { 2761 /* Protected by hw lock */ 2762 uint32_t enable_class_2:1; ··· 2765 uint32_t __iomem *atio_q_in; 2766 uint32_t __iomem *atio_q_out; 2767 2768 struct qla_tgt_func_tmpl *tgt_ops; 2769 struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS]; 2770 uint16_t current_handle; 2771 2772 struct qla_tgt_vp_map *tgt_vp_map; 2773 2774 int saved_set; 2775 uint16_t saved_exchange_count; ··· 3435 #define VP_ERR_FAB_LOGOUT 4 3436 #define VP_ERR_ADAP_NORESOURCES 5 3437 struct qla_hw_data *hw; 3438 + struct scsi_qlt_host vha_tgt; 3439 struct req_que *req; 3440 int fw_heartbeat_counter; 3441 int seconds_since_last_heartbeat;
+87 -84
drivers/scsi/qla2xxx/qla_target.c
··· 590 591 /* Check to avoid double sessions */ 592 spin_lock_irqsave(&ha->hardware_lock, flags); 593 - list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list, 594 sess_list_entry) { 595 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { 596 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, ··· 627 628 return NULL; 629 } 630 - sess->tgt = ha->tgt.qla_tgt; 631 sess->vha = vha; 632 sess->s_id = fcport->d_id; 633 sess->loop_id = fcport->loop_id; ··· 635 636 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 637 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 638 - sess, ha->tgt.qla_tgt); 639 640 be_sid[0] = sess->s_id.b.domain; 641 be_sid[1] = sess->s_id.b.area; ··· 662 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); 663 664 spin_lock_irqsave(&ha->hardware_lock, flags); 665 - list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list); 666 - ha->tgt.qla_tgt->sess_count++; 667 spin_unlock_irqrestore(&ha->hardware_lock, flags); 668 669 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, ··· 682 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 683 { 684 struct qla_hw_data *ha = vha->hw; 685 - struct qla_tgt *tgt = ha->tgt.qla_tgt; 686 struct qla_tgt_sess *sess; 687 unsigned long flags; 688 ··· 690 return; 691 692 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 693 return; 694 695 spin_lock_irqsave(&ha->hardware_lock, flags); ··· 704 if (!sess) { 705 spin_unlock_irqrestore(&ha->hardware_lock, flags); 706 707 - mutex_lock(&ha->tgt.tgt_mutex); 708 sess = qlt_create_sess(vha, fcport, false); 709 - mutex_unlock(&ha->tgt.tgt_mutex); 710 711 spin_lock_irqsave(&ha->hardware_lock, flags); 712 } else { ··· 742 void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) 743 { 744 struct qla_hw_data *ha = vha->hw; 745 - struct qla_tgt *tgt = ha->tgt.qla_tgt; 746 struct qla_tgt_sess *sess; 747 unsigned long flags; 748 ··· 809 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 810 * Lock is needed, because we still can get an incoming packet. 811 */ 812 - mutex_lock(&ha->tgt.tgt_mutex); 813 spin_lock_irqsave(&ha->hardware_lock, flags); 814 tgt->tgt_stop = 1; 815 qlt_clear_tgt_db(tgt, true); 816 spin_unlock_irqrestore(&ha->hardware_lock, flags); 817 - mutex_unlock(&ha->tgt.tgt_mutex); 818 819 flush_delayed_work(&tgt->sess_del_work); 820 ··· 848 void qlt_stop_phase2(struct qla_tgt *tgt) 849 { 850 struct qla_hw_data *ha = tgt->ha; 851 unsigned long flags; 852 853 if (tgt->tgt_stopped) { 854 - ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f, 855 "Already in tgt->tgt_stopped state\n"); 856 dump_stack(); 857 return; 858 } 859 860 - ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b, 861 "Waiting for %d IRQ commands to complete (tgt %p)", 862 tgt->irq_cmd_count, tgt); 863 864 - mutex_lock(&ha->tgt.tgt_mutex); 865 spin_lock_irqsave(&ha->hardware_lock, flags); 866 while (tgt->irq_cmd_count != 0) { 867 spin_unlock_irqrestore(&ha->hardware_lock, flags); ··· 872 tgt->tgt_stop = 0; 873 tgt->tgt_stopped = 1; 874 spin_unlock_irqrestore(&ha->hardware_lock, flags); 875 - mutex_unlock(&ha->tgt.tgt_mutex); 876 877 - ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished", 878 tgt); 879 } 880 EXPORT_SYMBOL(qlt_stop_phase2); ··· 882 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 883 static void qlt_release(struct qla_tgt *tgt) 884 { 885 - struct qla_hw_data *ha = tgt->ha; 886 887 - if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped) 888 qlt_stop_phase2(tgt); 889 890 - ha->tgt.qla_tgt = NULL; 891 892 - ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d, 893 "Release of tgt %p finished\n", tgt); 894 895 kfree(tgt); ··· 953 return; 954 } 955 956 - if (ha->tgt.qla_tgt != NULL) 957 - ha->tgt.qla_tgt->notify_ack_expected++; 958 959 pkt->entry_type = NOTIFY_ACK_TYPE; 960 pkt->entry_count = 1; ··· 1058 /* Other bytes are zero */ 1059 } 1060 1061 - ha->tgt.qla_tgt->abts_resp_expected++; 1062 1063 qla2x00_start_iocbs(vha, vha->req); 1064 } ··· 1210 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 1211 "qla_target(%d): task abort for non-existant session\n", 1212 vha->vp_idx); 1213 - rc = qlt_sched_sess_work(ha->tgt.qla_tgt, 1214 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); 1215 if (rc != 0) { 1216 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, ··· 2161 struct qla_tgt_cmd *cmd, void *ctio) 2162 { 2163 struct qla_tgt_srr_ctio *sc; 2164 - struct qla_hw_data *ha = vha->hw; 2165 - struct qla_tgt *tgt = ha->tgt.qla_tgt; 2166 struct qla_tgt_srr_imm *imm; 2167 2168 tgt->ctio_srr_id++; ··· 2477 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 2478 scsi_qla_host_t *vha = cmd->vha; 2479 struct qla_hw_data *ha = vha->hw; 2480 - struct qla_tgt *tgt = ha->tgt.qla_tgt; 2481 struct qla_tgt_sess *sess = NULL; 2482 struct atio_from_isp *atio = &cmd->atio; 2483 unsigned char *cdb; ··· 2510 goto out_term; 2511 } 2512 2513 - mutex_lock(&ha->tgt.tgt_mutex); 2514 sess = qlt_make_local_sess(vha, s_id); 2515 /* sess has an extra creation ref. */ 2516 - mutex_unlock(&ha->tgt.tgt_mutex); 2517 2518 if (!sess) 2519 goto out_term; ··· 2579 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 2580 struct atio_from_isp *atio) 2581 { 2582 - struct qla_hw_data *ha = vha->hw; 2583 - struct qla_tgt *tgt = ha->tgt.qla_tgt; 2584 struct qla_tgt_cmd *cmd; 2585 2586 if (unlikely(tgt->tgt_stop)) { ··· 2599 2600 memcpy(&cmd->atio, atio, sizeof(*atio)); 2601 cmd->state = QLA_TGT_STATE_NEW; 2602 - cmd->tgt = ha->tgt.qla_tgt; 2603 cmd->vha = vha; 2604 2605 INIT_WORK(&cmd->work, qlt_do_work); ··· 2725 uint32_t lun, unpacked_lun; 2726 int lun_size, fn; 2727 2728 - tgt = ha->tgt.qla_tgt; 2729 2730 lun = a->u.isp24.fcp_cmnd.lun; 2731 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); ··· 2799 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 2800 "qla_target(%d): task abort for unexisting " 2801 "session\n", vha->vp_idx); 2802 - return qlt_sched_sess_work(ha->tgt.qla_tgt, 2803 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 2804 } 2805 ··· 2812 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 2813 struct imm_ntfy_from_isp *iocb) 2814 { 2815 - struct qla_hw_data *ha = vha->hw; 2816 int res = 0; 2817 2818 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, ··· 2829 case ELS_PDISC: 2830 case ELS_ADISC: 2831 { 2832 - struct qla_tgt *tgt = ha->tgt.qla_tgt; 2833 if (tgt->link_reinit_iocb_pending) { 2834 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 2835 0, 0, 0, 0, 0, 0); ··· 3203 struct imm_ntfy_from_isp *iocb) 3204 { 3205 struct qla_tgt_srr_imm *imm; 3206 - struct qla_hw_data *ha = vha->hw; 3207 - struct qla_tgt *tgt = ha->tgt.qla_tgt; 3208 struct qla_tgt_srr_ctio *sctio; 3209 3210 tgt->imm_srr_id++; ··· 3313 3314 case IMM_NTFY_LIP_LINK_REINIT: 3315 { 3316 - struct qla_tgt *tgt = ha->tgt.qla_tgt; 3317 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 3318 "qla_target(%d): LINK REINIT (loop %#x, " 3319 "subcode %x)\n", vha->vp_idx, ··· 3489 struct atio_from_isp *atio) 3490 { 3491 struct qla_hw_data *ha = vha->hw; 3492 - struct qla_tgt *tgt = ha->tgt.qla_tgt; 3493 int rc; 3494 3495 if (unlikely(tgt == NULL)) { ··· 3591 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) 3592 { 3593 struct qla_hw_data *ha = vha->hw; 3594 - struct qla_tgt *tgt = ha->tgt.qla_tgt; 3595 3596 if (unlikely(tgt == NULL)) { 3597 ql_dbg(ql_dbg_tgt, vha, 0xe05d, ··· 3794 uint16_t *mailbox) 3795 { 3796 struct qla_hw_data *ha = vha->hw; 3797 - struct qla_tgt *tgt = ha->tgt.qla_tgt; 3798 int login_code; 3799 3800 ql_dbg(ql_dbg_tgt, vha, 0xe039, ··· 3924 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, 3925 uint8_t *s_id) 3926 { 3927 - struct qla_hw_data *ha = vha->hw; 3928 struct qla_tgt_sess *sess = NULL; 3929 fc_port_t *fcport = NULL; 3930 int rc, global_resets; 3931 uint16_t loop_id = 0; 3932 3933 retry: 3934 - global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); 3935 3936 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 3937 if (rc != 0) { ··· 3958 return NULL; 3959 3960 if (global_resets != 3961 - atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) { 3962 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 3963 "qla_target(%d): global reset during session discovery " 3964 "(counter was %d, new %d), retrying", vha->vp_idx, 3965 global_resets, 3966 - atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)); 3967 goto retry; 3968 } 3969 ··· 3999 if (!sess) { 4000 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4001 4002 - mutex_lock(&ha->tgt.tgt_mutex); 4003 sess = qlt_make_local_sess(vha, s_id); 4004 /* sess has got an extra creation ref */ 4005 - mutex_unlock(&ha->tgt.tgt_mutex); 4006 4007 spin_lock_irqsave(&ha->hardware_lock, flags); 4008 if (!sess) ··· 4053 if (!sess) { 4054 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4055 4056 - mutex_lock(&ha->tgt.tgt_mutex); 4057 sess = qlt_make_local_sess(vha, s_id); 4058 /* sess has got an extra creation ref */ 4059 - mutex_unlock(&ha->tgt.tgt_mutex); 4060 4061 spin_lock_irqsave(&ha->hardware_lock, flags); 4062 if (!sess) ··· 4142 } 4143 4144 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 4145 - "Registering target for host %ld(%p)", base_vha->host_no, ha); 4146 4147 - BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL)); 4148 4149 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 4150 if (!tgt) { ··· 4172 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); 4173 atomic_set(&tgt->tgt_global_resets_count, 0); 4174 4175 - ha->tgt.qla_tgt = tgt; 4176 4177 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 4178 "qla_target(%d): using 64 Bit PCI addressing", ··· 4193 /* Must be called under tgt_host_action_mutex */ 4194 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 4195 { 4196 - if (!ha->tgt.qla_tgt) 4197 return 0; 4198 4199 mutex_lock(&qla_tgt_mutex); 4200 - list_del(&ha->tgt.qla_tgt->tgt_list_entry); 4201 mutex_unlock(&qla_tgt_mutex); 4202 4203 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 4204 vha->host_no, ha); 4205 - qlt_release(ha->tgt.qla_tgt); 4206 4207 return 0; 4208 } ··· 4236 * @callback: lport initialization callback for tcm_qla2xxx code 4237 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 4238 */ 4239 - int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, 4240 - int (*callback)(struct scsi_qla_host *), void *target_lport_ptr) 4241 { 4242 struct qla_tgt *tgt; 4243 struct scsi_qla_host *vha; ··· 4257 if (!host) 4258 continue; 4259 4260 - if (ha->tgt.tgt_ops != NULL) 4261 - continue; 4262 - 4263 if (!(host->hostt->supported_mode & MODE_TARGET)) 4264 continue; 4265 4266 spin_lock_irqsave(&ha->hardware_lock, flags); 4267 - if (host->active_mode & MODE_TARGET) { 4268 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 4269 host->host_no); 4270 spin_unlock_irqrestore(&ha->hardware_lock, flags); ··· 4275 " qla2xxx scsi_host\n"); 4276 continue; 4277 } 4278 - qlt_lport_dump(vha, wwpn, b); 4279 4280 if (memcmp(vha->port_name, b, WWN_SIZE)) { 4281 scsi_host_put(host); 4282 continue; 4283 } 4284 - /* 4285 - * Setup passed parameters ahead of invoking callback 4286 - */ 4287 - ha->tgt.tgt_ops = qla_tgt_ops; 4288 - ha->tgt.target_lport_ptr = target_lport_ptr; 4289 - rc = (*callback)(vha); 4290 - if (rc != 0) { 4291 - ha->tgt.tgt_ops = NULL; 4292 - ha->tgt.target_lport_ptr = NULL; 4293 - scsi_host_put(host); 4294 - } 4295 mutex_unlock(&qla_tgt_mutex); 4296 return rc; 4297 } 4298 mutex_unlock(&qla_tgt_mutex); ··· 4307 /* 4308 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 4309 */ 4310 - ha->tgt.target_lport_ptr = NULL; 4311 ha->tgt.tgt_ops = NULL; 4312 /* 4313 * Release the Scsi_Host reference for the underlying qla2xxx host ··· 4369 qlt_enable_vha(struct scsi_qla_host *vha) 4370 { 4371 struct qla_hw_data *ha = vha->hw; 4372 - struct qla_tgt *tgt = ha->tgt.qla_tgt; 4373 unsigned long flags; 4374 4375 if (!tgt) { 4376 ql_dbg(ql_dbg_tgt, vha, 0xe069, ··· 4386 qlt_set_mode(vha); 4387 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4388 4389 - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4390 - qla2xxx_wake_dpc(vha); 4391 - qla2x00_wait_for_hba_online(vha); 4392 } 4393 EXPORT_SYMBOL(qlt_enable_vha); 4394 ··· 4406 qlt_disable_vha(struct scsi_qla_host *vha) 4407 { 4408 struct qla_hw_data *ha = vha->hw; 4409 - struct qla_tgt *tgt = ha->tgt.qla_tgt; 4410 unsigned long flags; 4411 4412 if (!tgt) { ··· 4437 if (!qla_tgt_mode_enabled(vha)) 4438 return; 4439 4440 - mutex_init(&ha->tgt.tgt_mutex); 4441 - mutex_init(&ha->tgt.tgt_host_action_mutex); 4442 4443 qlt_clear_mode(vha); 4444 ··· 4451 * assigning the value appropriately. 4452 */ 4453 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 4454 } 4455 4456 void ··· 4771 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 4772 } 4773 4774 - mutex_init(&ha->tgt.tgt_mutex); 4775 - mutex_init(&ha->tgt.tgt_host_action_mutex); 4776 qlt_clear_mode(base_vha); 4777 } 4778
··· 590 591 /* Check to avoid double sessions */ 592 spin_lock_irqsave(&ha->hardware_lock, flags); 593 + list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list, 594 sess_list_entry) { 595 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { 596 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, ··· 627 628 return NULL; 629 } 630 + sess->tgt = vha->vha_tgt.qla_tgt; 631 sess->vha = vha; 632 sess->s_id = fcport->d_id; 633 sess->loop_id = fcport->loop_id; ··· 635 636 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 637 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 638 + sess, vha->vha_tgt.qla_tgt); 639 640 be_sid[0] = sess->s_id.b.domain; 641 be_sid[1] = sess->s_id.b.area; ··· 662 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); 663 664 spin_lock_irqsave(&ha->hardware_lock, flags); 665 + list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); 666 + vha->vha_tgt.qla_tgt->sess_count++; 667 spin_unlock_irqrestore(&ha->hardware_lock, flags); 668 669 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, ··· 682 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 683 { 684 struct qla_hw_data *ha = vha->hw; 685 + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 686 struct qla_tgt_sess *sess; 687 unsigned long flags; 688 ··· 690 return; 691 692 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 693 + return; 694 + 695 + if (qla_ini_mode_enabled(vha)) 696 return; 697 698 spin_lock_irqsave(&ha->hardware_lock, flags); ··· 701 if (!sess) { 702 spin_unlock_irqrestore(&ha->hardware_lock, flags); 703 704 + mutex_lock(&vha->vha_tgt.tgt_mutex); 705 sess = qlt_create_sess(vha, fcport, false); 706 + mutex_unlock(&vha->vha_tgt.tgt_mutex); 707 708 spin_lock_irqsave(&ha->hardware_lock, flags); 709 } else { ··· 739 void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) 740 { 741 struct qla_hw_data *ha = vha->hw; 742 + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 743 struct qla_tgt_sess *sess; 744 unsigned long flags; 745 ··· 806 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 807 * Lock is needed, because we still can get an incoming packet. 808 */ 809 + mutex_lock(&vha->vha_tgt.tgt_mutex); 810 spin_lock_irqsave(&ha->hardware_lock, flags); 811 tgt->tgt_stop = 1; 812 qlt_clear_tgt_db(tgt, true); 813 spin_unlock_irqrestore(&ha->hardware_lock, flags); 814 + mutex_unlock(&vha->vha_tgt.tgt_mutex); 815 816 flush_delayed_work(&tgt->sess_del_work); 817 ··· 845 void qlt_stop_phase2(struct qla_tgt *tgt) 846 { 847 struct qla_hw_data *ha = tgt->ha; 848 + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 849 unsigned long flags; 850 851 if (tgt->tgt_stopped) { 852 + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, 853 "Already in tgt->tgt_stopped state\n"); 854 dump_stack(); 855 return; 856 } 857 858 + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, 859 "Waiting for %d IRQ commands to complete (tgt %p)", 860 tgt->irq_cmd_count, tgt); 861 862 + mutex_lock(&vha->vha_tgt.tgt_mutex); 863 spin_lock_irqsave(&ha->hardware_lock, flags); 864 while (tgt->irq_cmd_count != 0) { 865 spin_unlock_irqrestore(&ha->hardware_lock, flags); ··· 868 tgt->tgt_stop = 0; 869 tgt->tgt_stopped = 1; 870 spin_unlock_irqrestore(&ha->hardware_lock, flags); 871 + mutex_unlock(&vha->vha_tgt.tgt_mutex); 872 873 + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished", 874 tgt); 875 } 876 EXPORT_SYMBOL(qlt_stop_phase2); ··· 878 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 879 static void qlt_release(struct qla_tgt *tgt) 880 { 881 + scsi_qla_host_t *vha = tgt->vha; 882 883 + if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) 884 qlt_stop_phase2(tgt); 885 886 + vha->vha_tgt.qla_tgt = NULL; 887 888 + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, 889 "Release of tgt %p finished\n", tgt); 890 891 kfree(tgt); ··· 949 return; 950 } 951 952 + if (vha->vha_tgt.qla_tgt != NULL) 953 + vha->vha_tgt.qla_tgt->notify_ack_expected++; 954 955 pkt->entry_type = NOTIFY_ACK_TYPE; 956 pkt->entry_count = 1; ··· 1054 /* Other bytes are zero */ 1055 } 1056 1057 + vha->vha_tgt.qla_tgt->abts_resp_expected++; 1058 1059 qla2x00_start_iocbs(vha, vha->req); 1060 } ··· 1206 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 1207 "qla_target(%d): task abort for non-existant session\n", 1208 vha->vp_idx); 1209 + rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 1210 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); 1211 if (rc != 0) { 1212 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, ··· 2157 struct qla_tgt_cmd *cmd, void *ctio) 2158 { 2159 struct qla_tgt_srr_ctio *sc; 2160 + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2161 struct qla_tgt_srr_imm *imm; 2162 2163 tgt->ctio_srr_id++; ··· 2474 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 2475 scsi_qla_host_t *vha = cmd->vha; 2476 struct qla_hw_data *ha = vha->hw; 2477 + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2478 struct qla_tgt_sess *sess = NULL; 2479 struct atio_from_isp *atio = &cmd->atio; 2480 unsigned char *cdb; ··· 2507 goto out_term; 2508 } 2509 2510 + mutex_lock(&vha->vha_tgt.tgt_mutex); 2511 sess = qlt_make_local_sess(vha, s_id); 2512 /* sess has an extra creation ref. */ 2513 + mutex_unlock(&vha->vha_tgt.tgt_mutex); 2514 2515 if (!sess) 2516 goto out_term; ··· 2576 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 2577 struct atio_from_isp *atio) 2578 { 2579 + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2580 struct qla_tgt_cmd *cmd; 2581 2582 if (unlikely(tgt->tgt_stop)) { ··· 2597 2598 memcpy(&cmd->atio, atio, sizeof(*atio)); 2599 cmd->state = QLA_TGT_STATE_NEW; 2600 + cmd->tgt = vha->vha_tgt.qla_tgt; 2601 cmd->vha = vha; 2602 2603 INIT_WORK(&cmd->work, qlt_do_work); ··· 2723 uint32_t lun, unpacked_lun; 2724 int lun_size, fn; 2725 2726 + tgt = vha->vha_tgt.qla_tgt; 2727 2728 lun = a->u.isp24.fcp_cmnd.lun; 2729 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); ··· 2797 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 2798 "qla_target(%d): task abort for unexisting " 2799 "session\n", vha->vp_idx); 2800 + return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 2801 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 2802 } 2803 ··· 2810 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 2811 struct imm_ntfy_from_isp *iocb) 2812 { 2813 int res = 0; 2814 2815 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, ··· 2828 case ELS_PDISC: 2829 case ELS_ADISC: 2830 { 2831 + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2832 if (tgt->link_reinit_iocb_pending) { 2833 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 2834 0, 0, 0, 0, 0, 0); ··· 3202 struct imm_ntfy_from_isp *iocb) 3203 { 3204 struct qla_tgt_srr_imm *imm; 3205 + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3206 struct qla_tgt_srr_ctio *sctio; 3207 3208 tgt->imm_srr_id++; ··· 3313 3314 case IMM_NTFY_LIP_LINK_REINIT: 3315 { 3316 + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3317 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 3318 "qla_target(%d): LINK REINIT (loop %#x, " 3319 "subcode %x)\n", vha->vp_idx, ··· 3489 struct atio_from_isp *atio) 3490 { 3491 struct qla_hw_data *ha = vha->hw; 3492 + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3493 int rc; 3494 3495 if (unlikely(tgt == NULL)) { ··· 3591 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) 3592 { 3593 struct qla_hw_data *ha = vha->hw; 3594 + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3595 3596 if (unlikely(tgt == NULL)) { 3597 ql_dbg(ql_dbg_tgt, vha, 0xe05d, ··· 3794 uint16_t *mailbox) 3795 { 3796 struct qla_hw_data *ha = vha->hw; 3797 + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3798 int login_code; 3799 3800 ql_dbg(ql_dbg_tgt, vha, 0xe039, ··· 3924 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, 3925 uint8_t *s_id) 3926 { 3927 struct qla_tgt_sess *sess = NULL; 3928 fc_port_t *fcport = NULL; 3929 int rc, global_resets; 3930 uint16_t loop_id = 0; 3931 3932 retry: 3933 + global_resets = 3934 + atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 3935 3936 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 3937 if (rc != 0) { ··· 3958 return NULL; 3959 3960 if (global_resets != 3961 + atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { 3962 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 3963 "qla_target(%d): global reset during session discovery " 3964 "(counter was %d, new %d), retrying", vha->vp_idx, 3965 global_resets, 3966 + atomic_read(&vha->vha_tgt. 3967 + qla_tgt->tgt_global_resets_count)); 3968 goto retry; 3969 } 3970 ··· 3998 if (!sess) { 3999 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4000 4001 + mutex_lock(&vha->vha_tgt.tgt_mutex); 4002 sess = qlt_make_local_sess(vha, s_id); 4003 /* sess has got an extra creation ref */ 4004 + mutex_unlock(&vha->vha_tgt.tgt_mutex); 4005 4006 spin_lock_irqsave(&ha->hardware_lock, flags); 4007 if (!sess) ··· 4052 if (!sess) { 4053 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4054 4055 + mutex_lock(&vha->vha_tgt.tgt_mutex); 4056 sess = qlt_make_local_sess(vha, s_id); 4057 /* sess has got an extra creation ref */ 4058 + mutex_unlock(&vha->vha_tgt.tgt_mutex); 4059 4060 spin_lock_irqsave(&ha->hardware_lock, flags); 4061 if (!sess) ··· 4141 } 4142 4143 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 4144 + "Registering target for host %ld(%p).\n", base_vha->host_no, ha); 4145 4146 + BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); 4147 4148 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 4149 if (!tgt) { ··· 4171 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); 4172 atomic_set(&tgt->tgt_global_resets_count, 0); 4173 4174 + base_vha->vha_tgt.qla_tgt = tgt; 4175 4176 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 4177 "qla_target(%d): using 64 Bit PCI addressing", ··· 4192 /* Must be called under tgt_host_action_mutex */ 4193 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 4194 { 4195 + if (!vha->vha_tgt.qla_tgt) 4196 return 0; 4197 4198 mutex_lock(&qla_tgt_mutex); 4199 + list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 4200 mutex_unlock(&qla_tgt_mutex); 4201 4202 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 4203 vha->host_no, ha); 4204 + qlt_release(vha->vha_tgt.qla_tgt); 4205 4206 return 0; 4207 } ··· 4235 * @callback: lport initialization callback for tcm_qla2xxx code 4236 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 4237 */ 4238 + int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, 4239 + u64 npiv_wwpn, u64 npiv_wwnn, 4240 + int (*callback)(struct scsi_qla_host *, void *, u64, u64)) 4241 { 4242 struct qla_tgt *tgt; 4243 struct scsi_qla_host *vha; ··· 4255 if (!host) 4256 continue; 4257 4258 if (!(host->hostt->supported_mode & MODE_TARGET)) 4259 continue; 4260 4261 spin_lock_irqsave(&ha->hardware_lock, flags); 4262 + if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { 4263 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 4264 host->host_no); 4265 spin_unlock_irqrestore(&ha->hardware_lock, flags); ··· 4276 " qla2xxx scsi_host\n"); 4277 continue; 4278 } 4279 + qlt_lport_dump(vha, phys_wwpn, b); 4280 4281 if (memcmp(vha->port_name, b, WWN_SIZE)) { 4282 scsi_host_put(host); 4283 continue; 4284 } 4285 mutex_unlock(&qla_tgt_mutex); 4286 + 4287 + rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 4288 + if (rc != 0) 4289 + scsi_host_put(host); 4290 + 4291 return rc; 4292 } 4293 mutex_unlock(&qla_tgt_mutex); ··· 4314 /* 4315 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 4316 */ 4317 + vha->vha_tgt.target_lport_ptr = NULL; 4318 ha->tgt.tgt_ops = NULL; 4319 /* 4320 * Release the Scsi_Host reference for the underlying qla2xxx host ··· 4376 qlt_enable_vha(struct scsi_qla_host *vha) 4377 { 4378 struct qla_hw_data *ha = vha->hw; 4379 + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4380 unsigned long flags; 4381 + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4382 4383 if (!tgt) { 4384 ql_dbg(ql_dbg_tgt, vha, 0xe069, ··· 4392 qlt_set_mode(vha); 4393 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4394 4395 + if (vha->vp_idx) { 4396 + qla24xx_disable_vp(vha); 4397 + qla24xx_enable_vp(vha); 4398 + } else { 4399 + set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 4400 + qla2xxx_wake_dpc(base_vha); 4401 + qla2x00_wait_for_hba_online(base_vha); 4402 + } 4403 } 4404 EXPORT_SYMBOL(qlt_enable_vha); 4405 ··· 4407 qlt_disable_vha(struct scsi_qla_host *vha) 4408 { 4409 struct qla_hw_data *ha = vha->hw; 4410 + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4411 unsigned long flags; 4412 4413 if (!tgt) { ··· 4438 if (!qla_tgt_mode_enabled(vha)) 4439 return; 4440 4441 + vha->vha_tgt.qla_tgt = NULL; 4442 + 4443 + mutex_init(&vha->vha_tgt.tgt_mutex); 4444 + mutex_init(&vha->vha_tgt.tgt_host_action_mutex); 4445 4446 qlt_clear_mode(vha); 4447 ··· 4450 * assigning the value appropriately. 4451 */ 4452 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 4453 + 4454 + qlt_add_target(ha, vha); 4455 } 4456 4457 void ··· 4768 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 4769 } 4770 4771 + mutex_init(&base_vha->vha_tgt.tgt_mutex); 4772 + mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); 4773 qlt_clear_mode(base_vha); 4774 } 4775
+2 -2
drivers/scsi/qla2xxx/qla_target.h
··· 932 */ 933 extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *); 934 extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *); 935 - extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64, 936 - int (*callback)(struct scsi_qla_host *), void *); 937 extern void qlt_lport_deregister(struct scsi_qla_host *); 938 extern void qlt_unreg_sess(struct qla_tgt_sess *); 939 extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
··· 932 */ 933 extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *); 934 extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *); 935 + extern int qlt_lport_register(void *, u64, u64, u64, 936 + int (*callback)(struct scsi_qla_host *, void *, u64, u64)); 937 extern void qlt_lport_deregister(struct scsi_qla_host *); 938 extern void qlt_unreg_sess(struct qla_tgt_sess *); 939 extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
+123 -59
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 53 struct workqueue_struct *tcm_qla2xxx_free_wq; 54 struct workqueue_struct *tcm_qla2xxx_cmd_wq; 55 56 - static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg) 57 - { 58 - return 1; 59 - } 60 - 61 - static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg) 62 - { 63 - return 0; 64 - } 65 - 66 /* 67 * Parse WWN. 68 * If strict, we require lower-case hex and colon separators to be sure ··· 164 *wwnn = 0; 165 166 /* count may include a LF at end of string */ 167 - if (name[cnt-1] == '\n') 168 cnt--; 169 170 /* validate we have enough characters for WWPN */ ··· 767 768 static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) 769 { 770 assert_spin_locked(&sess->vha->hw->hardware_lock); 771 kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session); 772 } ··· 950 struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 951 struct tcm_qla2xxx_lport, lport_wwn); 952 struct scsi_qla_host *vha = lport->qla_vha; 953 - struct qla_hw_data *ha = vha->hw; 954 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 955 struct tcm_qla2xxx_tpg, se_tpg); 956 unsigned long op; ··· 969 atomic_set(&tpg->lport_tpg_enabled, 1); 970 qlt_enable_vha(vha); 971 } else { 972 - if (!ha->tgt.qla_tgt) { 973 - pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n"); 974 return -ENODEV; 975 } 976 atomic_set(&tpg->lport_tpg_enabled, 0); 977 - qlt_stop_phase1(ha->tgt.qla_tgt); 978 } 979 980 return count; ··· 1003 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) 1004 return ERR_PTR(-EINVAL); 1005 1006 - if (!lport->qla_npiv_vp && (tpgt != 1)) { 1007 pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n"); 1008 return ERR_PTR(-ENOSYS); 1009 } ··· 1030 kfree(tpg); 1031 return NULL; 1032 } 1033 - /* 1034 - * Setup local TPG=1 pointer for non NPIV mode. 1035 - */ 1036 - if (lport->qla_npiv_vp == NULL) 1037 - lport->tpg_1 = tpg; 1038 1039 return &tpg->se_tpg; 1040 } ··· 1042 struct tcm_qla2xxx_tpg, se_tpg); 1043 struct tcm_qla2xxx_lport *lport = tpg->lport; 1044 struct scsi_qla_host *vha = lport->qla_vha; 1045 - struct qla_hw_data *ha = vha->hw; 1046 /* 1047 * Call into qla2x_target.c LLD logic to shutdown the active 1048 * FC Nexuses and disable target mode operation for this qla_hw_data 1049 */ 1050 - if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop) 1051 - qlt_stop_phase1(ha->tgt.qla_tgt); 1052 1053 core_tpg_deregister(se_tpg); 1054 /* 1055 * Clear local TPG=1 pointer for non NPIV mode. 1056 */ 1057 - if (lport->qla_npiv_vp == NULL) 1058 lport->tpg_1 = NULL; 1059 1060 kfree(tpg); ··· 1082 tpg->lport = lport; 1083 tpg->lport_tpgt = tpgt; 1084 1085 ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn, 1086 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1087 if (ret < 0) { 1088 kfree(tpg); 1089 return NULL; 1090 } 1091 return &tpg->se_tpg; 1092 } 1093 ··· 1108 scsi_qla_host_t *vha, 1109 const uint8_t *s_id) 1110 { 1111 - struct qla_hw_data *ha = vha->hw; 1112 struct tcm_qla2xxx_lport *lport; 1113 struct se_node_acl *se_nacl; 1114 struct tcm_qla2xxx_nacl *nacl; 1115 u32 key; 1116 1117 - lport = ha->tgt.target_lport_ptr; 1118 if (!lport) { 1119 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1120 dump_stack(); ··· 1217 scsi_qla_host_t *vha, 1218 const uint16_t loop_id) 1219 { 1220 - struct qla_hw_data *ha = vha->hw; 1221 struct tcm_qla2xxx_lport *lport; 1222 struct se_node_acl *se_nacl; 1223 struct tcm_qla2xxx_nacl *nacl; 1224 struct tcm_qla2xxx_fc_loopid *fc_loopid; 1225 1226 - lport = ha->tgt.target_lport_ptr; 1227 if (!lport) { 1228 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1229 dump_stack(); ··· 1336 { 1337 struct qla_tgt *tgt = sess->tgt; 1338 struct qla_hw_data *ha = tgt->ha; 1339 struct se_session *se_sess; 1340 struct se_node_acl *se_nacl; 1341 struct tcm_qla2xxx_lport *lport; ··· 1353 se_nacl = se_sess->se_node_acl; 1354 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1355 1356 - lport = ha->tgt.target_lport_ptr; 1357 if (!lport) { 1358 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1359 dump_stack(); ··· 1387 unsigned char port_name[36]; 1388 unsigned long flags; 1389 1390 - lport = ha->tgt.target_lport_ptr; 1391 if (!lport) { 1392 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1393 dump_stack(); ··· 1451 { 1452 struct qla_tgt *tgt = sess->tgt; 1453 struct qla_hw_data *ha = tgt->ha; 1454 - struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr; 1455 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; 1456 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 1457 struct tcm_qla2xxx_nacl, se_node_acl); ··· 1559 return 0; 1560 } 1561 1562 - static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha) 1563 { 1564 struct qla_hw_data *ha = vha->hw; 1565 - struct tcm_qla2xxx_lport *lport; 1566 /* 1567 - * Setup local pointer to vha, NPIV VP pointer (if present) and 1568 - * vha->tcm_lport pointer 1569 */ 1570 - lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr; 1571 lport->qla_vha = vha; 1572 1573 return 0; ··· 1602 if (ret != 0) 1603 goto out; 1604 1605 - ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn, 1606 - tcm_qla2xxx_lport_register_cb, lport); 1607 if (ret != 0) 1608 goto out_lport; 1609 ··· 1621 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1622 struct tcm_qla2xxx_lport, lport_wwn); 1623 struct scsi_qla_host *vha = lport->qla_vha; 1624 - struct qla_hw_data *ha = vha->hw; 1625 struct se_node_acl *node; 1626 u32 key = 0; 1627 ··· 1629 * shutdown of struct qla_tgt after the call to 1630 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above.. 1631 */ 1632 - if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped) 1633 - qlt_stop_phase2(ha->tgt.qla_tgt); 1634 1635 qlt_lport_deregister(vha); 1636 ··· 1641 kfree(lport); 1642 } 1643 1644 static struct se_wwn *tcm_qla2xxx_npiv_make_lport( 1645 struct target_fabric_configfs *tf, 1646 struct config_group *group, 1647 const char *name) 1648 { 1649 struct tcm_qla2xxx_lport *lport; 1650 - u64 npiv_wwpn, npiv_wwnn; 1651 int ret; 1652 1653 - if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1, 1654 - &npiv_wwpn, &npiv_wwnn) < 0) 1655 return ERR_PTR(-EINVAL); 1656 1657 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); ··· 1718 TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn); 1719 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); 1720 1721 - /* FIXME: tcm_qla2xxx_npiv_make_lport */ 1722 - ret = -ENOSYS; 1723 if (ret != 0) 1724 goto out; 1725 1726 return &lport->lport_wwn; 1727 out: 1728 kfree(lport); 1729 return ERR_PTR(ret); ··· 1740 { 1741 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1742 struct tcm_qla2xxx_lport, lport_wwn); 1743 - struct scsi_qla_host *vha = lport->qla_vha; 1744 - struct Scsi_Host *sh = vha->host; 1745 - /* 1746 - * Notify libfc that we want to release the lport->npiv_vport 1747 - */ 1748 - fc_vport_terminate(lport->npiv_vport); 1749 1750 - scsi_host_put(sh); 1751 kfree(lport); 1752 } 1753 ··· 1830 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, 1831 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, 1832 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, 1833 - .tpg_check_demo_mode = tcm_qla2xxx_check_false, 1834 - .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true, 1835 - .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true, 1836 - .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false, 1837 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, 1838 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, 1839 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, 1840 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1841 .release_cmd = tcm_qla2xxx_release_cmd, 1842 .put_session = tcm_qla2xxx_put_session, 1843 .shutdown_session = tcm_qla2xxx_shutdown_session, ··· 1934 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl 1935 */ 1936 npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 1937 - npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL; 1938 npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 1939 npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 1940 npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
··· 53 struct workqueue_struct *tcm_qla2xxx_free_wq; 54 struct workqueue_struct *tcm_qla2xxx_cmd_wq; 55 56 /* 57 * Parse WWN. 58 * If strict, we require lower-case hex and colon separators to be sure ··· 174 *wwnn = 0; 175 176 /* count may include a LF at end of string */ 177 + if (name[cnt-1] == '\n' || name[cnt-1] == 0) 178 cnt--; 179 180 /* validate we have enough characters for WWPN */ ··· 777 778 static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) 779 { 780 + if (!sess) 781 + return; 782 + 783 assert_spin_locked(&sess->vha->hw->hardware_lock); 784 kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session); 785 } ··· 957 struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 958 struct tcm_qla2xxx_lport, lport_wwn); 959 struct scsi_qla_host *vha = lport->qla_vha; 960 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 961 struct tcm_qla2xxx_tpg, se_tpg); 962 unsigned long op; ··· 977 atomic_set(&tpg->lport_tpg_enabled, 1); 978 qlt_enable_vha(vha); 979 } else { 980 + if (!vha->vha_tgt.qla_tgt) { 981 + pr_err("struct qla_hw_data *vha->vha_tgt.qla_tgt is NULL\n"); 982 return -ENODEV; 983 } 984 atomic_set(&tpg->lport_tpg_enabled, 0); 985 + qlt_stop_phase1(vha->vha_tgt.qla_tgt); 986 } 987 988 return count; ··· 1011 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) 1012 return ERR_PTR(-EINVAL); 1013 1014 + if ((tpgt != 1)) { 1015 pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n"); 1016 return ERR_PTR(-ENOSYS); 1017 } ··· 1038 kfree(tpg); 1039 return NULL; 1040 } 1041 + 1042 + lport->tpg_1 = tpg; 1043 1044 return &tpg->se_tpg; 1045 } ··· 1053 struct tcm_qla2xxx_tpg, se_tpg); 1054 struct tcm_qla2xxx_lport *lport = tpg->lport; 1055 struct scsi_qla_host *vha = lport->qla_vha; 1056 /* 1057 * Call into qla2x_target.c LLD logic to shutdown the active 1058 * FC Nexuses and disable target mode operation for this qla_hw_data 1059 */ 1060 + if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop) 1061 + qlt_stop_phase1(vha->vha_tgt.qla_tgt); 1062 1063 core_tpg_deregister(se_tpg); 1064 /* 1065 * Clear local TPG=1 pointer for non NPIV mode. 1066 */ 1067 lport->tpg_1 = NULL; 1068 1069 kfree(tpg); ··· 1095 tpg->lport = lport; 1096 tpg->lport_tpgt = tpgt; 1097 1098 + /* 1099 + * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic 1100 + * NodeACLs 1101 + */ 1102 + tpg->tpg_attrib.generate_node_acls = 1; 1103 + tpg->tpg_attrib.demo_mode_write_protect = 1; 1104 + tpg->tpg_attrib.cache_dynamic_acls = 1; 1105 + tpg->tpg_attrib.demo_mode_login_only = 1; 1106 + 1107 ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn, 1108 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1109 if (ret < 0) { 1110 kfree(tpg); 1111 return NULL; 1112 } 1113 + lport->tpg_1 = tpg; 1114 return &tpg->se_tpg; 1115 } 1116 ··· 1111 scsi_qla_host_t *vha, 1112 const uint8_t *s_id) 1113 { 1114 struct tcm_qla2xxx_lport *lport; 1115 struct se_node_acl *se_nacl; 1116 struct tcm_qla2xxx_nacl *nacl; 1117 u32 key; 1118 1119 + lport = vha->vha_tgt.target_lport_ptr; 1120 if (!lport) { 1121 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1122 dump_stack(); ··· 1221 scsi_qla_host_t *vha, 1222 const uint16_t loop_id) 1223 { 1224 struct tcm_qla2xxx_lport *lport; 1225 struct se_node_acl *se_nacl; 1226 struct tcm_qla2xxx_nacl *nacl; 1227 struct tcm_qla2xxx_fc_loopid *fc_loopid; 1228 1229 + lport = vha->vha_tgt.target_lport_ptr; 1230 if (!lport) { 1231 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1232 dump_stack(); ··· 1341 { 1342 struct qla_tgt *tgt = sess->tgt; 1343 struct qla_hw_data *ha = tgt->ha; 1344 + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 1345 struct se_session *se_sess; 1346 struct se_node_acl *se_nacl; 1347 struct tcm_qla2xxx_lport *lport; ··· 1357 se_nacl = se_sess->se_node_acl; 1358 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1359 1360 + lport = vha->vha_tgt.target_lport_ptr; 1361 if (!lport) { 1362 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1363 dump_stack(); ··· 1391 unsigned char port_name[36]; 1392 unsigned long flags; 1393 1394 + lport = vha->vha_tgt.target_lport_ptr; 1395 if (!lport) { 1396 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1397 dump_stack(); ··· 1455 { 1456 struct qla_tgt *tgt = sess->tgt; 1457 struct qla_hw_data *ha = tgt->ha; 1458 + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 1459 + struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr; 1460 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; 1461 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 1462 struct tcm_qla2xxx_nacl, se_node_acl); ··· 1562 return 0; 1563 } 1564 1565 + static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha, 1566 + void *target_lport_ptr, 1567 + u64 npiv_wwpn, u64 npiv_wwnn) 1568 { 1569 struct qla_hw_data *ha = vha->hw; 1570 + struct tcm_qla2xxx_lport *lport = 1571 + (struct tcm_qla2xxx_lport *)target_lport_ptr; 1572 /* 1573 + * Setup tgt_ops, local pointer to vha and target_lport_ptr 1574 */ 1575 + ha->tgt.tgt_ops = &tcm_qla2xxx_template; 1576 + vha->vha_tgt.target_lport_ptr = target_lport_ptr; 1577 lport->qla_vha = vha; 1578 1579 return 0; ··· 1602 if (ret != 0) 1603 goto out; 1604 1605 + ret = qlt_lport_register(lport, wwpn, 0, 0, 1606 + tcm_qla2xxx_lport_register_cb); 1607 if (ret != 0) 1608 goto out_lport; 1609 ··· 1621 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1622 struct tcm_qla2xxx_lport, lport_wwn); 1623 struct scsi_qla_host *vha = lport->qla_vha; 1624 struct se_node_acl *node; 1625 u32 key = 0; 1626 ··· 1630 * shutdown of struct qla_tgt after the call to 1631 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above.. 1632 */ 1633 + if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped) 1634 + qlt_stop_phase2(vha->vha_tgt.qla_tgt); 1635 1636 qlt_lport_deregister(vha); 1637 ··· 1642 kfree(lport); 1643 } 1644 1645 + static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, 1646 + void *target_lport_ptr, 1647 + u64 npiv_wwpn, u64 npiv_wwnn) 1648 + { 1649 + struct fc_vport *vport; 1650 + struct Scsi_Host *sh = base_vha->host; 1651 + struct scsi_qla_host *npiv_vha; 1652 + struct tcm_qla2xxx_lport *lport = 1653 + (struct tcm_qla2xxx_lport *)target_lport_ptr; 1654 + struct fc_vport_identifiers vport_id; 1655 + 1656 + if (!qla_tgt_mode_enabled(base_vha)) { 1657 + pr_err("qla2xxx base_vha not enabled for target mode\n"); 1658 + return -EPERM; 1659 + } 1660 + 1661 + memset(&vport_id, 0, sizeof(vport_id)); 1662 + vport_id.port_name = npiv_wwpn; 1663 + vport_id.node_name = npiv_wwnn; 1664 + vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; 1665 + vport_id.vport_type = FC_PORTTYPE_NPIV; 1666 + vport_id.disable = false; 1667 + 1668 + vport = fc_vport_create(sh, 0, &vport_id); 1669 + if (!vport) { 1670 + pr_err("fc_vport_create failed for qla2xxx_npiv\n"); 1671 + return -ENODEV; 1672 + } 1673 + /* 1674 + * Setup local pointer to NPIV vhba + target_lport_ptr 1675 + */ 1676 + npiv_vha = (struct scsi_qla_host *)vport->dd_data; 1677 + npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr; 1678 + lport->qla_vha = npiv_vha; 1679 + 1680 + scsi_host_get(npiv_vha->host); 1681 + return 0; 1682 + } 1683 + 1684 + 1685 static struct se_wwn *tcm_qla2xxx_npiv_make_lport( 1686 struct target_fabric_configfs *tf, 1687 struct config_group *group, 1688 const char *name) 1689 { 1690 struct tcm_qla2xxx_lport *lport; 1691 + u64 phys_wwpn, npiv_wwpn, npiv_wwnn; 1692 + char *p, tmp[128]; 1693 int ret; 1694 1695 + snprintf(tmp, 128, "%s", name); 1696 + 1697 + p = strchr(tmp, '@'); 1698 + if (!p) { 1699 + pr_err("Unable to locate NPIV '@' seperator\n"); 1700 + return ERR_PTR(-EINVAL); 1701 + } 1702 + *p++ = '\0'; 1703 + 1704 + if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0) 1705 + return ERR_PTR(-EINVAL); 1706 + 1707 + if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1, 1708 + &npiv_wwpn, &npiv_wwnn) < 0) 1709 return ERR_PTR(-EINVAL); 1710 1711 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); ··· 1666 TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn); 1667 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); 1668 1669 + ret = tcm_qla2xxx_init_lport(lport); 1670 if (ret != 0) 1671 goto out; 1672 1673 + ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn, 1674 + tcm_qla2xxx_lport_register_npiv_cb); 1675 + if (ret != 0) 1676 + goto out_lport; 1677 + 1678 return &lport->lport_wwn; 1679 + out_lport: 1680 + vfree(lport->lport_loopid_map); 1681 + btree_destroy32(&lport->lport_fcport_map); 1682 out: 1683 kfree(lport); 1684 return ERR_PTR(ret); ··· 1681 { 1682 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1683 struct tcm_qla2xxx_lport, lport_wwn); 1684 + struct scsi_qla_host *npiv_vha = lport->qla_vha; 1685 + struct qla_hw_data *ha = npiv_vha->hw; 1686 + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1687 1688 + scsi_host_put(npiv_vha->host); 1689 + /* 1690 + * Notify libfc that we want to release the vha->fc_vport 1691 + */ 1692 + fc_vport_terminate(npiv_vha->fc_vport); 1693 + scsi_host_put(base_vha->host); 1694 kfree(lport); 1695 } 1696 ··· 1769 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, 1770 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, 1771 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, 1772 + .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, 1773 + .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, 1774 + .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode, 1775 + .tpg_check_prod_mode_write_protect = 1776 + tcm_qla2xxx_check_prod_write_protect, 1777 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, 1778 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, 1779 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, 1780 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1781 + .check_stop_free = tcm_qla2xxx_check_stop_free, 1782 .release_cmd = tcm_qla2xxx_release_cmd, 1783 .put_session = tcm_qla2xxx_put_session, 1784 .shutdown_session = tcm_qla2xxx_shutdown_session, ··· 1871 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl 1872 */ 1873 npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 1874 + npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = 1875 + tcm_qla2xxx_tpg_attrs; 1876 npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 1877 npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 1878 npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
-4
drivers/scsi/qla2xxx/tcm_qla2xxx.h
··· 70 struct tcm_qla2xxx_fc_loopid *lport_loopid_map; 71 /* Pointer to struct scsi_qla_host from qla2xxx LLD */ 72 struct scsi_qla_host *qla_vha; 73 - /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */ 74 - struct scsi_qla_host *qla_npiv_vp; 75 /* Pointer to struct qla_tgt pointer */ 76 struct qla_tgt lport_qla_tgt; 77 - /* Pointer to struct fc_vport for NPIV vport from libfc */ 78 - struct fc_vport *npiv_vport; 79 /* Pointer to TPG=1 for non NPIV mode */ 80 struct tcm_qla2xxx_tpg *tpg_1; 81 /* Returned by tcm_qla2xxx_make_lport() */
··· 70 struct tcm_qla2xxx_fc_loopid *lport_loopid_map; 71 /* Pointer to struct scsi_qla_host from qla2xxx LLD */ 72 struct scsi_qla_host *qla_vha; 73 /* Pointer to struct qla_tgt pointer */ 74 struct qla_tgt lport_qla_tgt; 75 /* Pointer to TPG=1 for non NPIV mode */ 76 struct tcm_qla2xxx_tpg *tpg_1; 77 /* Returned by tcm_qla2xxx_make_lport() */
+2
drivers/target/Kconfig
··· 3 tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure" 4 depends on SCSI && BLOCK 5 select CONFIGFS_FS 6 default n 7 help 8 Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled ··· 14 15 config TCM_IBLOCK 16 tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK" 17 help 18 Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered 19 access to Linux/Block devices using BIO
··· 3 tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure" 4 depends on SCSI && BLOCK 5 select CONFIGFS_FS 6 + select CRC_T10DIF 7 default n 8 help 9 Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled ··· 13 14 config TCM_IBLOCK 15 tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK" 16 + select BLK_DEV_INTEGRITY 17 help 18 Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered 19 access to Linux/Block devices using BIO
+28 -20
drivers/target/iscsi/iscsi_target.c
··· 52 static LIST_HEAD(g_tiqn_list); 53 static LIST_HEAD(g_np_list); 54 static DEFINE_SPINLOCK(tiqn_lock); 55 - static DEFINE_SPINLOCK(np_lock); 56 57 static struct idr tiqn_idr; 58 struct idr sess_idr; ··· 307 return false; 308 } 309 310 static struct iscsi_np *iscsit_get_np( 311 struct __kernel_sockaddr_storage *sockaddr, 312 int network_transport) ··· 317 struct iscsi_np *np; 318 bool match; 319 320 - spin_lock_bh(&np_lock); 321 list_for_each_entry(np, &g_np_list, np_list) { 322 - spin_lock(&np->np_thread_lock); 323 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 324 - spin_unlock(&np->np_thread_lock); 325 continue; 326 } 327 ··· 332 * while iscsi_tpg_add_network_portal() is called. 333 */ 334 np->np_exports++; 335 - spin_unlock(&np->np_thread_lock); 336 - spin_unlock_bh(&np_lock); 337 return np; 338 } 339 - spin_unlock(&np->np_thread_lock); 340 } 341 - spin_unlock_bh(&np_lock); 342 343 return NULL; 344 } ··· 350 struct sockaddr_in6 *sock_in6; 351 struct iscsi_np *np; 352 int ret; 353 /* 354 * Locate the existing struct iscsi_np if already active.. 355 */ 356 np = iscsit_get_np(sockaddr, network_transport); 357 - if (np) 358 return np; 359 360 np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL); 361 if (!np) { 362 pr_err("Unable to allocate memory for struct iscsi_np\n"); 363 return ERR_PTR(-ENOMEM); 364 } 365 ··· 388 ret = iscsi_target_setup_login_socket(np, sockaddr); 389 if (ret != 0) { 390 kfree(np); 391 return ERR_PTR(ret); 392 } 393 ··· 397 pr_err("Unable to create kthread: iscsi_np\n"); 398 ret = PTR_ERR(np->np_thread); 399 kfree(np); 400 return ERR_PTR(ret); 401 } 402 /* ··· 408 * point because iscsi_np has not been added to g_np_list yet. 409 */ 410 np->np_exports = 1; 411 412 - spin_lock_bh(&np_lock); 413 list_add_tail(&np->np_list, &g_np_list); 414 - spin_unlock_bh(&np_lock); 415 416 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n", 417 np->np_ip, np->np_port, np->np_transport->name); ··· 478 479 np->np_transport->iscsit_free_np(np); 480 481 - spin_lock_bh(&np_lock); 482 list_del(&np->np_list); 483 - spin_unlock_bh(&np_lock); 484 485 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n", 486 np->np_ip, np->np_port, np->np_transport->name); ··· 630 { 631 struct iscsi_cmd *cmd; 632 633 - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 634 if (!cmd) 635 return -1; 636 ··· 2483 if (!conn_p) 2484 return; 2485 2486 - cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC); 2487 if (!cmd) { 2488 iscsit_dec_conn_usage_count(conn_p); 2489 return; ··· 3959 3960 switch (hdr->opcode & ISCSI_OPCODE_MASK) { 3961 case ISCSI_OP_SCSI_CMD: 3962 - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 3963 if (!cmd) 3964 goto reject; 3965 ··· 3971 case ISCSI_OP_NOOP_OUT: 3972 cmd = NULL; 3973 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { 3974 - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 3975 if (!cmd) 3976 goto reject; 3977 } 3978 ret = iscsit_handle_nop_out(conn, cmd, buf); 3979 break; 3980 case ISCSI_OP_SCSI_TMFUNC: 3981 - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 3982 if (!cmd) 3983 goto reject; 3984 3985 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf); 3986 break; 3987 case ISCSI_OP_TEXT: 3988 - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 3989 if (!cmd) 3990 goto reject; 3991 3992 ret = iscsit_handle_text_cmd(conn, cmd, buf); 3993 break; 3994 case ISCSI_OP_LOGOUT: 3995 - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 3996 if (!cmd) 3997 goto reject; 3998
··· 52 static LIST_HEAD(g_tiqn_list); 53 static LIST_HEAD(g_np_list); 54 static DEFINE_SPINLOCK(tiqn_lock); 55 + static DEFINE_MUTEX(np_lock); 56 57 static struct idr tiqn_idr; 58 struct idr sess_idr; ··· 307 return false; 308 } 309 310 + /* 311 + * Called with mutex np_lock held 312 + */ 313 static struct iscsi_np *iscsit_get_np( 314 struct __kernel_sockaddr_storage *sockaddr, 315 int network_transport) ··· 314 struct iscsi_np *np; 315 bool match; 316 317 list_for_each_entry(np, &g_np_list, np_list) { 318 + spin_lock_bh(&np->np_thread_lock); 319 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 320 + spin_unlock_bh(&np->np_thread_lock); 321 continue; 322 } 323 ··· 330 * while iscsi_tpg_add_network_portal() is called. 331 */ 332 np->np_exports++; 333 + spin_unlock_bh(&np->np_thread_lock); 334 return np; 335 } 336 + spin_unlock_bh(&np->np_thread_lock); 337 } 338 339 return NULL; 340 } ··· 350 struct sockaddr_in6 *sock_in6; 351 struct iscsi_np *np; 352 int ret; 353 + 354 + mutex_lock(&np_lock); 355 + 356 /* 357 * Locate the existing struct iscsi_np if already active.. 358 */ 359 np = iscsit_get_np(sockaddr, network_transport); 360 + if (np) { 361 + mutex_unlock(&np_lock); 362 return np; 363 + } 364 365 np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL); 366 if (!np) { 367 pr_err("Unable to allocate memory for struct iscsi_np\n"); 368 + mutex_unlock(&np_lock); 369 return ERR_PTR(-ENOMEM); 370 } 371 ··· 382 ret = iscsi_target_setup_login_socket(np, sockaddr); 383 if (ret != 0) { 384 kfree(np); 385 + mutex_unlock(&np_lock); 386 return ERR_PTR(ret); 387 } 388 ··· 390 pr_err("Unable to create kthread: iscsi_np\n"); 391 ret = PTR_ERR(np->np_thread); 392 kfree(np); 393 + mutex_unlock(&np_lock); 394 return ERR_PTR(ret); 395 } 396 /* ··· 400 * point because iscsi_np has not been added to g_np_list yet. 401 */ 402 np->np_exports = 1; 403 + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 404 405 list_add_tail(&np->np_list, &g_np_list); 406 + mutex_unlock(&np_lock); 407 408 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n", 409 np->np_ip, np->np_port, np->np_transport->name); ··· 470 471 np->np_transport->iscsit_free_np(np); 472 473 + mutex_lock(&np_lock); 474 list_del(&np->np_list); 475 + mutex_unlock(&np_lock); 476 477 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n", 478 np->np_ip, np->np_port, np->np_transport->name); ··· 622 { 623 struct iscsi_cmd *cmd; 624 625 + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 626 if (!cmd) 627 return -1; 628 ··· 2475 if (!conn_p) 2476 return; 2477 2478 + cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING); 2479 if (!cmd) { 2480 iscsit_dec_conn_usage_count(conn_p); 2481 return; ··· 3951 3952 switch (hdr->opcode & ISCSI_OPCODE_MASK) { 3953 case ISCSI_OP_SCSI_CMD: 3954 + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 3955 if (!cmd) 3956 goto reject; 3957 ··· 3963 case ISCSI_OP_NOOP_OUT: 3964 cmd = NULL; 3965 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { 3966 + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 3967 if (!cmd) 3968 goto reject; 3969 } 3970 ret = iscsit_handle_nop_out(conn, cmd, buf); 3971 break; 3972 case ISCSI_OP_SCSI_TMFUNC: 3973 + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 3974 if (!cmd) 3975 goto reject; 3976 3977 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf); 3978 break; 3979 case ISCSI_OP_TEXT: 3980 + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 3981 if (!cmd) 3982 goto reject; 3983 3984 ret = iscsit_handle_text_cmd(conn, cmd, buf); 3985 break; 3986 case ISCSI_OP_LOGOUT: 3987 + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 3988 if (!cmd) 3989 goto reject; 3990
+1 -1
drivers/target/iscsi/iscsi_target_nego.c
··· 1192 */ 1193 alloc_tags: 1194 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); 1195 - tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS; 1196 tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 1197 1198 ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
··· 1192 */ 1193 alloc_tags: 1194 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); 1195 + tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS; 1196 tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 1197 1198 ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
+6 -3
drivers/target/iscsi/iscsi_target_util.c
··· 152 * May be called from software interrupt (timer) context for allocating 153 * iSCSI NopINs. 154 */ 155 - struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) 156 { 157 struct iscsi_cmd *cmd; 158 struct se_session *se_sess = conn->sess->se_sess; 159 int size, tag; 160 161 - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask); 162 size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 163 cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size)); 164 memset(cmd, 0, size); ··· 929 u8 state; 930 struct iscsi_cmd *cmd; 931 932 - cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC); 933 if (!cmd) 934 return -1; 935
··· 152 * May be called from software interrupt (timer) context for allocating 153 * iSCSI NopINs. 154 */ 155 + struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state) 156 { 157 struct iscsi_cmd *cmd; 158 struct se_session *se_sess = conn->sess->se_sess; 159 int size, tag; 160 161 + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state); 162 + if (tag < 0) 163 + return NULL; 164 + 165 size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 166 cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size)); 167 memset(cmd, 0, size); ··· 926 u8 state; 927 struct iscsi_cmd *cmd; 928 929 + cmd = iscsit_allocate_cmd(conn, TASK_RUNNING); 930 if (!cmd) 931 return -1; 932
+1 -1
drivers/target/iscsi/iscsi_target_util.h
··· 9 extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *); 10 extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *); 11 extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t); 12 - extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); 13 extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32); 14 extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *); 15 extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
··· 9 extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *); 10 extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *); 11 extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t); 12 + extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int); 13 extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32); 14 extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *); 15 extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
+14 -6
drivers/target/loopback/tcm_loop.c
··· 217 scsi_bufflen(sc), tcm_loop_sam_attr(sc), 218 sc->sc_data_direction, 0, 219 scsi_sglist(sc), scsi_sg_count(sc), 220 - sgl_bidi, sgl_bidi_count); 221 if (rc < 0) { 222 set_host_byte(sc, DID_NO_CONNECT); 223 goto out_done; ··· 463 { 464 struct tcm_loop_hba *tl_hba; 465 struct Scsi_Host *sh; 466 - int error; 467 468 tl_hba = to_tcm_loop_hba(dev); 469 ··· 486 sh->max_lun = 0; 487 sh->max_channel = 0; 488 sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; 489 490 error = scsi_add_host(sh, &tl_hba->dev); 491 if (error) { ··· 1236 1237 /* Start items for tcm_loop_naa_cit */ 1238 1239 - struct se_portal_group *tcm_loop_make_naa_tpg( 1240 struct se_wwn *wwn, 1241 struct config_group *group, 1242 const char *name) ··· 1281 return &tl_tpg->tl_se_tpg; 1282 } 1283 1284 - void tcm_loop_drop_naa_tpg( 1285 struct se_portal_group *se_tpg) 1286 { 1287 struct se_wwn *wwn = se_tpg->se_tpg_wwn; ··· 1313 1314 /* Start items for tcm_loop_cit */ 1315 1316 - struct se_wwn *tcm_loop_make_scsi_hba( 1317 struct target_fabric_configfs *tf, 1318 struct config_group *group, 1319 const char *name) ··· 1383 return ERR_PTR(ret); 1384 } 1385 1386 - void tcm_loop_drop_scsi_hba( 1387 struct se_wwn *wwn) 1388 { 1389 struct tcm_loop_hba *tl_hba = container_of(wwn,
··· 217 scsi_bufflen(sc), tcm_loop_sam_attr(sc), 218 sc->sc_data_direction, 0, 219 scsi_sglist(sc), scsi_sg_count(sc), 220 + sgl_bidi, sgl_bidi_count, 221 + scsi_prot_sglist(sc), scsi_prot_sg_count(sc)); 222 if (rc < 0) { 223 set_host_byte(sc, DID_NO_CONNECT); 224 goto out_done; ··· 462 { 463 struct tcm_loop_hba *tl_hba; 464 struct Scsi_Host *sh; 465 + int error, host_prot; 466 467 tl_hba = to_tcm_loop_hba(dev); 468 ··· 485 sh->max_lun = 0; 486 sh->max_channel = 0; 487 sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; 488 + 489 + host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | 490 + SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | 491 + SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; 492 + 493 + scsi_host_set_prot(sh, host_prot); 494 + scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC); 495 496 error = scsi_add_host(sh, &tl_hba->dev); 497 if (error) { ··· 1228 1229 /* Start items for tcm_loop_naa_cit */ 1230 1231 + static struct se_portal_group *tcm_loop_make_naa_tpg( 1232 struct se_wwn *wwn, 1233 struct config_group *group, 1234 const char *name) ··· 1273 return &tl_tpg->tl_se_tpg; 1274 } 1275 1276 + static void tcm_loop_drop_naa_tpg( 1277 struct se_portal_group *se_tpg) 1278 { 1279 struct se_wwn *wwn = se_tpg->se_tpg_wwn; ··· 1305 1306 /* Start items for tcm_loop_cit */ 1307 1308 + static struct se_wwn *tcm_loop_make_scsi_hba( 1309 struct target_fabric_configfs *tf, 1310 struct config_group *group, 1311 const char *name) ··· 1375 return ERR_PTR(ret); 1376 } 1377 1378 + static void tcm_loop_drop_scsi_hba( 1379 struct se_wwn *wwn) 1380 { 1381 struct tcm_loop_hba *tl_hba = container_of(wwn,
+466 -94
drivers/target/target_core_alua.c
··· 41 #include "target_core_alua.h" 42 #include "target_core_ua.h" 43 44 - static sense_reason_t core_alua_check_transition(int state, int *primary); 45 static int core_alua_set_tg_pt_secondary_state( 46 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 47 struct se_port *port, int explicit, int offline); 48 49 static u16 alua_lu_gps_counter; 50 static u32 alua_lu_gps_count; ··· 56 static LIST_HEAD(lu_gps_list); 57 58 struct t10_alua_lu_gp *default_lu_gp; 59 60 /* 61 * REPORT_TARGET_PORT_GROUPS ··· 293 unsigned char *ptr; 294 sense_reason_t rc = TCM_NO_SENSE; 295 u32 len = 4; /* Skip over RESERVED area in header */ 296 - int alua_access_state, primary = 0; 297 u16 tg_pt_id, rtpi; 298 299 if (!l_port) ··· 335 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 336 goto out; 337 } 338 339 ptr = &buf[4]; /* Skip over RESERVED area in header */ 340 ··· 347 * the state is a primary or secondary target port asymmetric 348 * access state. 349 */ 350 - rc = core_alua_check_transition(alua_access_state, &primary); 351 if (rc) { 352 /* 353 * If the SET TARGET PORT GROUPS attempts to establish ··· 468 */ 469 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; 470 cmd->alua_nonop_delay = nonop_delay_msecs; 471 return 0; 472 } 473 ··· 743 case ALUA_ACCESS_STATE_TRANSITION: 744 ret = core_alua_state_transition(cmd, cdb, &alua_ascq); 745 break; 746 /* 747 * OFFLINE is a secondary ALUA target port group access state, that is 748 * handled above with struct se_port->sep_tg_pt_secondary_offline=1 ··· 781 * Check implicit and explicit ALUA state change request. 782 */ 783 static sense_reason_t 784 - core_alua_check_transition(int state, int *primary) 785 { 786 switch (state) { 787 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 788 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 789 case ALUA_ACCESS_STATE_STANDBY: 790 case ALUA_ACCESS_STATE_UNAVAILABLE: 791 - /* 792 - * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are 793 - * defined as primary target port asymmetric access states. 794 - */ 795 *primary = 1; 796 break; 797 case ALUA_ACCESS_STATE_OFFLINE: ··· 818 * OFFLINE state is defined as a secondary target port 819 * asymmetric access state. 820 */ 821 *primary = 0; 822 break; 823 default: 824 pr_err("Unknown ALUA access state: 0x%02x\n", state); 825 return TCM_INVALID_PARAMETER_LIST; 826 } 827 828 return 0; 829 } 830 831 static char *core_alua_dump_state(int state) ··· 848 return "Active/Optimized"; 849 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 850 return "Active/NonOptimized"; 851 case ALUA_ACCESS_STATE_STANDBY: 852 return "Standby"; 853 case ALUA_ACCESS_STATE_UNAVAILABLE: 854 return "Unavailable"; 855 case ALUA_ACCESS_STATE_OFFLINE: 856 return "Offline"; 857 default: 858 return "Unknown"; 859 } ··· 934 * Called with tg_pt_gp->tg_pt_gp_md_mutex held 935 */ 936 static int core_alua_update_tpg_primary_metadata( 937 - struct t10_alua_tg_pt_gp *tg_pt_gp, 938 - int primary_state, 939 - unsigned char *md_buf) 940 { 941 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; 942 char path[ALUA_METADATA_PATH_LEN]; 943 - int len; 944 945 memset(path, 0, ALUA_METADATA_PATH_LEN); 946 947 - len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len, 948 "tg_pt_gp_id=%hu\n" 949 "alua_access_state=0x%02x\n" 950 "alua_access_status=0x%02x\n", 951 - tg_pt_gp->tg_pt_gp_id, primary_state, 952 tg_pt_gp->tg_pt_gp_alua_access_status); 953 954 snprintf(path, ALUA_METADATA_PATH_LEN, 955 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], 956 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); 957 958 - return core_alua_write_tpg_metadata(path, md_buf, len); 959 } 960 961 - static int core_alua_do_transition_tg_pt( 962 - struct t10_alua_tg_pt_gp *tg_pt_gp, 963 - struct se_port *l_port, 964 - struct se_node_acl *nacl, 965 - unsigned char *md_buf, 966 - int new_state, 967 - int explicit) 968 { 969 struct se_dev_entry *se_deve; 970 struct se_lun_acl *lacl; 971 struct se_port *port; 972 struct t10_alua_tg_pt_gp_member *mem; 973 - int old_state = 0; 974 - /* 975 - * Save the old primary ALUA access state, and set the current state 976 - * to ALUA_ACCESS_STATE_TRANSITION. 977 - */ 978 - old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 979 - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 980 - ALUA_ACCESS_STATE_TRANSITION); 981 - tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? 982 - ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 983 - ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 984 - /* 985 - * Check for the optional ALUA primary state transition delay 986 - */ 987 - if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) 988 - msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); 989 990 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 991 list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, ··· 1011 if (!lacl) 1012 continue; 1013 1014 - if (explicit && 1015 - (nacl != NULL) && (nacl == lacl->se_lun_nacl) && 1016 - (l_port != NULL) && (l_port == port)) 1017 continue; 1018 1019 core_scsi3_ua_allocate(lacl->se_lun_nacl, ··· 1044 */ 1045 if (tg_pt_gp->tg_pt_gp_write_metadata) { 1046 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); 1047 - core_alua_update_tpg_primary_metadata(tg_pt_gp, 1048 - new_state, md_buf); 1049 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); 1050 } 1051 /* 1052 * Set the current primary ALUA access state to the requested new state 1053 */ 1054 - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); 1055 1056 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 1057 " from primary access state %s to %s\n", (explicit) ? "explicit" : 1058 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1059 - tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), 1060 - core_alua_dump_state(new_state)); 1061 1062 return 0; 1063 } ··· 1153 int explicit) 1154 { 1155 struct se_device *dev; 1156 - struct se_port *port; 1157 - struct se_node_acl *nacl; 1158 struct t10_alua_lu_gp *lu_gp; 1159 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; 1160 struct t10_alua_tg_pt_gp *tg_pt_gp; 1161 - unsigned char *md_buf; 1162 - int primary; 1163 1164 - if (core_alua_check_transition(new_state, &primary) != 0) 1165 return -EINVAL; 1166 - 1167 - md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); 1168 - if (!md_buf) { 1169 - pr_err("Unable to allocate buf for ALUA metadata\n"); 1170 - return -ENOMEM; 1171 - } 1172 1173 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; 1174 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); ··· 1178 * core_alua_do_transition_tg_pt() will always return 1179 * success. 1180 */ 1181 - core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, 1182 - md_buf, new_state, explicit); 1183 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1184 smp_mb__after_atomic_dec(); 1185 - kfree(md_buf); 1186 - return 0; 1187 } 1188 /* 1189 * For all other LU groups aside from 'default_lu_gp', walk all of ··· 1219 continue; 1220 1221 if (l_tg_pt_gp == tg_pt_gp) { 1222 - port = l_port; 1223 - nacl = l_nacl; 1224 } else { 1225 - port = NULL; 1226 - nacl = NULL; 1227 } 1228 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1229 smp_mb__after_atomic_inc(); ··· 1232 * core_alua_do_transition_tg_pt() will always return 1233 * success. 1234 */ 1235 - core_alua_do_transition_tg_pt(tg_pt_gp, port, 1236 - nacl, md_buf, new_state, explicit); 1237 1238 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1239 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1240 smp_mb__after_atomic_dec(); 1241 } 1242 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1243 ··· 1249 } 1250 spin_unlock(&lu_gp->lu_gp_lock); 1251 1252 - pr_debug("Successfully processed LU Group: %s all ALUA TG PT" 1253 - " Group IDs: %hu %s transition to primary state: %s\n", 1254 - config_item_name(&lu_gp->lu_gp_group.cg_item), 1255 - l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit", 1256 - core_alua_dump_state(new_state)); 1257 1258 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1259 smp_mb__after_atomic_dec(); 1260 - kfree(md_buf); 1261 - return 0; 1262 } 1263 1264 /* ··· 1268 */ 1269 static int core_alua_update_tpg_secondary_metadata( 1270 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1271 - struct se_port *port, 1272 - unsigned char *md_buf, 1273 - u32 md_buf_len) 1274 { 1275 struct se_portal_group *se_tpg = port->sep_tpg; 1276 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; 1277 - int len; 1278 1279 memset(path, 0, ALUA_METADATA_PATH_LEN); 1280 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); ··· 1291 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", 1292 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); 1293 1294 - len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" 1295 "alua_tg_pt_status=0x%02x\n", 1296 atomic_read(&port->sep_tg_pt_secondary_offline), 1297 port->sep_tg_pt_secondary_stat); ··· 1300 se_tpg->se_tpg_tfo->get_fabric_name(), wwn, 1301 port->sep_lun->unpacked_lun); 1302 1303 - return core_alua_write_tpg_metadata(path, md_buf, len); 1304 } 1305 1306 static int core_alua_set_tg_pt_secondary_state( ··· 1313 int offline) 1314 { 1315 struct t10_alua_tg_pt_gp *tg_pt_gp; 1316 - unsigned char *md_buf; 1317 - u32 md_buf_len; 1318 int trans_delay_msecs; 1319 1320 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); ··· 1333 else 1334 atomic_set(&port->sep_tg_pt_secondary_offline, 0); 1335 1336 - md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len; 1337 port->sep_tg_pt_secondary_stat = (explicit) ? 1338 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1339 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; ··· 1354 * secondary state and status 1355 */ 1356 if (port->sep_tg_pt_secondary_write_md) { 1357 - md_buf = kzalloc(md_buf_len, GFP_KERNEL); 1358 - if (!md_buf) { 1359 - pr_err("Unable to allocate md_buf for" 1360 - " secondary ALUA access metadata\n"); 1361 - return -ENOMEM; 1362 - } 1363 mutex_lock(&port->sep_tg_pt_md_mutex); 1364 - core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, 1365 - md_buf, md_buf_len); 1366 mutex_unlock(&port->sep_tg_pt_md_mutex); 1367 - 1368 - kfree(md_buf); 1369 } 1370 1371 return 0; 1372 } 1373 1374 struct t10_alua_lu_gp * ··· 1715 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1716 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1717 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1718 tg_pt_gp->tg_pt_gp_dev = dev; 1719 - tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; 1720 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1721 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); 1722 /* ··· 1844 list_del(&tg_pt_gp->tg_pt_gp_list); 1845 dev->t10_alua.alua_tg_pt_gps_counter--; 1846 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1847 1848 /* 1849 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
··· 41 #include "target_core_alua.h" 42 #include "target_core_ua.h" 43 44 + static sense_reason_t core_alua_check_transition(int state, int valid, 45 + int *primary); 46 static int core_alua_set_tg_pt_secondary_state( 47 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 48 struct se_port *port, int explicit, int offline); 49 + 50 + static char *core_alua_dump_state(int state); 51 52 static u16 alua_lu_gps_counter; 53 static u32 alua_lu_gps_count; ··· 53 static LIST_HEAD(lu_gps_list); 54 55 struct t10_alua_lu_gp *default_lu_gp; 56 + 57 + /* 58 + * REPORT REFERRALS 59 + * 60 + * See sbc3r35 section 5.23 61 + */ 62 + sense_reason_t 63 + target_emulate_report_referrals(struct se_cmd *cmd) 64 + { 65 + struct se_device *dev = cmd->se_dev; 66 + struct t10_alua_lba_map *map; 67 + struct t10_alua_lba_map_member *map_mem; 68 + unsigned char *buf; 69 + u32 rd_len = 0, off; 70 + 71 + if (cmd->data_length < 4) { 72 + pr_warn("REPORT REFERRALS allocation length %u too" 73 + " small\n", cmd->data_length); 74 + return TCM_INVALID_CDB_FIELD; 75 + } 76 + 77 + buf = transport_kmap_data_sg(cmd); 78 + if (!buf) 79 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 80 + 81 + off = 4; 82 + spin_lock(&dev->t10_alua.lba_map_lock); 83 + if (list_empty(&dev->t10_alua.lba_map_list)) { 84 + spin_unlock(&dev->t10_alua.lba_map_lock); 85 + transport_kunmap_data_sg(cmd); 86 + 87 + return TCM_UNSUPPORTED_SCSI_OPCODE; 88 + } 89 + 90 + list_for_each_entry(map, &dev->t10_alua.lba_map_list, 91 + lba_map_list) { 92 + int desc_num = off + 3; 93 + int pg_num; 94 + 95 + off += 4; 96 + if (cmd->data_length > off) 97 + put_unaligned_be64(map->lba_map_first_lba, &buf[off]); 98 + off += 8; 99 + if (cmd->data_length > off) 100 + put_unaligned_be64(map->lba_map_last_lba, &buf[off]); 101 + off += 8; 102 + rd_len += 20; 103 + pg_num = 0; 104 + list_for_each_entry(map_mem, &map->lba_map_mem_list, 105 + lba_map_mem_list) { 106 + int alua_state = map_mem->lba_map_mem_alua_state; 107 + int alua_pg_id = map_mem->lba_map_mem_alua_pg_id; 108 + 109 + if (cmd->data_length > off) 110 + buf[off] = alua_state & 0x0f; 111 + off += 2; 112 + if (cmd->data_length > off) 113 + buf[off] = (alua_pg_id >> 8) & 0xff; 114 + off++; 115 + if (cmd->data_length > off) 116 + buf[off] = (alua_pg_id & 0xff); 117 + off++; 118 + rd_len += 4; 119 + pg_num++; 120 + } 121 + if (cmd->data_length > desc_num) 122 + buf[desc_num] = pg_num; 123 + } 124 + spin_unlock(&dev->t10_alua.lba_map_lock); 125 + 126 + /* 127 + * Set the RETURN DATA LENGTH set in the header of the DataIN Payload 128 + */ 129 + put_unaligned_be16(rd_len, &buf[2]); 130 + 131 + transport_kunmap_data_sg(cmd); 132 + 133 + target_complete_cmd(cmd, GOOD); 134 + return 0; 135 + } 136 137 /* 138 * REPORT_TARGET_PORT_GROUPS ··· 210 unsigned char *ptr; 211 sense_reason_t rc = TCM_NO_SENSE; 212 u32 len = 4; /* Skip over RESERVED area in header */ 213 + int alua_access_state, primary = 0, valid_states; 214 u16 tg_pt_id, rtpi; 215 216 if (!l_port) ··· 252 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 253 goto out; 254 } 255 + valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; 256 257 ptr = &buf[4]; /* Skip over RESERVED area in header */ 258 ··· 263 * the state is a primary or secondary target port asymmetric 264 * access state. 265 */ 266 + rc = core_alua_check_transition(alua_access_state, 267 + valid_states, &primary); 268 if (rc) { 269 /* 270 * If the SET TARGET PORT GROUPS attempts to establish ··· 383 */ 384 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; 385 cmd->alua_nonop_delay = nonop_delay_msecs; 386 + return 0; 387 + } 388 + 389 + static inline int core_alua_state_lba_dependent( 390 + struct se_cmd *cmd, 391 + struct t10_alua_tg_pt_gp *tg_pt_gp, 392 + u8 *alua_ascq) 393 + { 394 + struct se_device *dev = cmd->se_dev; 395 + u64 segment_size, segment_mult, sectors, lba; 396 + 397 + /* Only need to check for cdb actually containing LBAs */ 398 + if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB)) 399 + return 0; 400 + 401 + spin_lock(&dev->t10_alua.lba_map_lock); 402 + segment_size = dev->t10_alua.lba_map_segment_size; 403 + segment_mult = dev->t10_alua.lba_map_segment_multiplier; 404 + sectors = cmd->data_length / dev->dev_attrib.block_size; 405 + 406 + lba = cmd->t_task_lba; 407 + while (lba < cmd->t_task_lba + sectors) { 408 + struct t10_alua_lba_map *cur_map = NULL, *map; 409 + struct t10_alua_lba_map_member *map_mem; 410 + 411 + list_for_each_entry(map, &dev->t10_alua.lba_map_list, 412 + lba_map_list) { 413 + u64 start_lba, last_lba; 414 + u64 first_lba = map->lba_map_first_lba; 415 + 416 + if (segment_mult) { 417 + u64 tmp = lba; 418 + start_lba = sector_div(tmp, segment_size * segment_mult); 419 + 420 + last_lba = first_lba + segment_size - 1; 421 + if (start_lba >= first_lba && 422 + start_lba <= last_lba) { 423 + lba += segment_size; 424 + cur_map = map; 425 + break; 426 + } 427 + } else { 428 + last_lba = map->lba_map_last_lba; 429 + if (lba >= first_lba && lba <= last_lba) { 430 + lba = last_lba + 1; 431 + cur_map = map; 432 + break; 433 + } 434 + } 435 + } 436 + if (!cur_map) { 437 + spin_unlock(&dev->t10_alua.lba_map_lock); 438 + *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 439 + return 1; 440 + } 441 + list_for_each_entry(map_mem, &cur_map->lba_map_mem_list, 442 + lba_map_mem_list) { 443 + if (map_mem->lba_map_mem_alua_pg_id != 444 + tg_pt_gp->tg_pt_gp_id) 445 + continue; 446 + switch(map_mem->lba_map_mem_alua_state) { 447 + case ALUA_ACCESS_STATE_STANDBY: 448 + spin_unlock(&dev->t10_alua.lba_map_lock); 449 + *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 450 + return 1; 451 + case ALUA_ACCESS_STATE_UNAVAILABLE: 452 + spin_unlock(&dev->t10_alua.lba_map_lock); 453 + *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 454 + return 1; 455 + default: 456 + break; 457 + } 458 + } 459 + } 460 + spin_unlock(&dev->t10_alua.lba_map_lock); 461 return 0; 462 } 463 ··· 583 case ALUA_ACCESS_STATE_TRANSITION: 584 ret = core_alua_state_transition(cmd, cdb, &alua_ascq); 585 break; 586 + case ALUA_ACCESS_STATE_LBA_DEPENDENT: 587 + ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq); 588 + break; 589 /* 590 * OFFLINE is a secondary ALUA target port group access state, that is 591 * handled above with struct se_port->sep_tg_pt_secondary_offline=1 ··· 618 * Check implicit and explicit ALUA state change request. 619 */ 620 static sense_reason_t 621 + core_alua_check_transition(int state, int valid, int *primary) 622 { 623 + /* 624 + * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are 625 + * defined as primary target port asymmetric access states. 626 + */ 627 switch (state) { 628 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 629 + if (!(valid & ALUA_AO_SUP)) 630 + goto not_supported; 631 + *primary = 1; 632 + break; 633 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 634 + if (!(valid & ALUA_AN_SUP)) 635 + goto not_supported; 636 + *primary = 1; 637 + break; 638 case ALUA_ACCESS_STATE_STANDBY: 639 + if (!(valid & ALUA_S_SUP)) 640 + goto not_supported; 641 + *primary = 1; 642 + break; 643 case ALUA_ACCESS_STATE_UNAVAILABLE: 644 + if (!(valid & ALUA_U_SUP)) 645 + goto not_supported; 646 + *primary = 1; 647 + break; 648 + case ALUA_ACCESS_STATE_LBA_DEPENDENT: 649 + if (!(valid & ALUA_LBD_SUP)) 650 + goto not_supported; 651 *primary = 1; 652 break; 653 case ALUA_ACCESS_STATE_OFFLINE: ··· 636 * OFFLINE state is defined as a secondary target port 637 * asymmetric access state. 638 */ 639 + if (!(valid & ALUA_O_SUP)) 640 + goto not_supported; 641 *primary = 0; 642 break; 643 + case ALUA_ACCESS_STATE_TRANSITION: 644 + /* 645 + * Transitioning is set internally, and 646 + * cannot be selected manually. 647 + */ 648 + goto not_supported; 649 default: 650 pr_err("Unknown ALUA access state: 0x%02x\n", state); 651 return TCM_INVALID_PARAMETER_LIST; 652 } 653 654 return 0; 655 + 656 + not_supported: 657 + pr_err("ALUA access state %s not supported", 658 + core_alua_dump_state(state)); 659 + return TCM_INVALID_PARAMETER_LIST; 660 } 661 662 static char *core_alua_dump_state(int state) ··· 653 return "Active/Optimized"; 654 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 655 return "Active/NonOptimized"; 656 + case ALUA_ACCESS_STATE_LBA_DEPENDENT: 657 + return "LBA Dependent"; 658 case ALUA_ACCESS_STATE_STANDBY: 659 return "Standby"; 660 case ALUA_ACCESS_STATE_UNAVAILABLE: 661 return "Unavailable"; 662 case ALUA_ACCESS_STATE_OFFLINE: 663 return "Offline"; 664 + case ALUA_ACCESS_STATE_TRANSITION: 665 + return "Transitioning"; 666 default: 667 return "Unknown"; 668 } ··· 735 * Called with tg_pt_gp->tg_pt_gp_md_mutex held 736 */ 737 static int core_alua_update_tpg_primary_metadata( 738 + struct t10_alua_tg_pt_gp *tg_pt_gp) 739 { 740 + unsigned char *md_buf; 741 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; 742 char path[ALUA_METADATA_PATH_LEN]; 743 + int len, rc; 744 + 745 + md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); 746 + if (!md_buf) { 747 + pr_err("Unable to allocate buf for ALUA metadata\n"); 748 + return -ENOMEM; 749 + } 750 751 memset(path, 0, ALUA_METADATA_PATH_LEN); 752 753 + len = snprintf(md_buf, ALUA_MD_BUF_LEN, 754 "tg_pt_gp_id=%hu\n" 755 "alua_access_state=0x%02x\n" 756 "alua_access_status=0x%02x\n", 757 + tg_pt_gp->tg_pt_gp_id, 758 + tg_pt_gp->tg_pt_gp_alua_pending_state, 759 tg_pt_gp->tg_pt_gp_alua_access_status); 760 761 snprintf(path, ALUA_METADATA_PATH_LEN, 762 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], 763 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); 764 765 + rc = core_alua_write_tpg_metadata(path, md_buf, len); 766 + kfree(md_buf); 767 + return rc; 768 } 769 770 + static void core_alua_do_transition_tg_pt_work(struct work_struct *work) 771 { 772 + struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, 773 + struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); 774 + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 775 struct se_dev_entry *se_deve; 776 struct se_lun_acl *lacl; 777 struct se_port *port; 778 struct t10_alua_tg_pt_gp_member *mem; 779 + bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == 780 + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); 781 782 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 783 list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, ··· 821 if (!lacl) 822 continue; 823 824 + if ((tg_pt_gp->tg_pt_gp_alua_access_status == 825 + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && 826 + (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) && 827 + (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) && 828 + (tg_pt_gp->tg_pt_gp_alua_port != NULL) && 829 + (tg_pt_gp->tg_pt_gp_alua_port == port)) 830 continue; 831 832 core_scsi3_ua_allocate(lacl->se_lun_nacl, ··· 851 */ 852 if (tg_pt_gp->tg_pt_gp_write_metadata) { 853 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); 854 + core_alua_update_tpg_primary_metadata(tg_pt_gp); 855 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); 856 } 857 /* 858 * Set the current primary ALUA access state to the requested new state 859 */ 860 + atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 861 + tg_pt_gp->tg_pt_gp_alua_pending_state); 862 863 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 864 " from primary access state %s to %s\n", (explicit) ? "explicit" : 865 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 866 + tg_pt_gp->tg_pt_gp_id, 867 + core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state), 868 + core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); 869 + spin_lock(&dev->t10_alua.tg_pt_gps_lock); 870 + atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 871 + smp_mb__after_atomic_dec(); 872 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 873 + 874 + if (tg_pt_gp->tg_pt_gp_transition_complete) 875 + complete(tg_pt_gp->tg_pt_gp_transition_complete); 876 + } 877 + 878 + static int core_alua_do_transition_tg_pt( 879 + struct t10_alua_tg_pt_gp *tg_pt_gp, 880 + int new_state, 881 + int explicit) 882 + { 883 + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 884 + DECLARE_COMPLETION_ONSTACK(wait); 885 + 886 + /* Nothing to be done here */ 887 + if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) 888 + return 0; 889 + 890 + if (new_state == ALUA_ACCESS_STATE_TRANSITION) 891 + return -EAGAIN; 892 + 893 + /* 894 + * Flush any pending transitions 895 + */ 896 + if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && 897 + atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == 898 + ALUA_ACCESS_STATE_TRANSITION) { 899 + /* Just in case */ 900 + tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; 901 + tg_pt_gp->tg_pt_gp_transition_complete = &wait; 902 + flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); 903 + wait_for_completion(&wait); 904 + tg_pt_gp->tg_pt_gp_transition_complete = NULL; 905 + return 0; 906 + } 907 + 908 + /* 909 + * Save the old primary ALUA access state, and set the current state 910 + * to ALUA_ACCESS_STATE_TRANSITION. 911 + */ 912 + tg_pt_gp->tg_pt_gp_alua_previous_state = 913 + atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 914 + tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; 915 + 916 + atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 917 + ALUA_ACCESS_STATE_TRANSITION); 918 + tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? 919 + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 920 + ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 921 + 922 + /* 923 + * Check for the optional ALUA primary state transition delay 924 + */ 925 + if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) 926 + msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); 927 + 928 + /* 929 + * Take a reference for workqueue item 930 + */ 931 + spin_lock(&dev->t10_alua.tg_pt_gps_lock); 932 + atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 933 + smp_mb__after_atomic_inc(); 934 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 935 + 936 + if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { 937 + unsigned long transition_tmo; 938 + 939 + transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ; 940 + queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, 941 + &tg_pt_gp->tg_pt_gp_transition_work, 942 + transition_tmo); 943 + } else { 944 + tg_pt_gp->tg_pt_gp_transition_complete = &wait; 945 + queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, 946 + &tg_pt_gp->tg_pt_gp_transition_work, 0); 947 + wait_for_completion(&wait); 948 + tg_pt_gp->tg_pt_gp_transition_complete = NULL; 949 + } 950 951 return 0; 952 } ··· 878 int explicit) 879 { 880 struct se_device *dev; 881 struct t10_alua_lu_gp *lu_gp; 882 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; 883 struct t10_alua_tg_pt_gp *tg_pt_gp; 884 + int primary, valid_states, rc = 0; 885 886 + valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; 887 + if (core_alua_check_transition(new_state, valid_states, &primary) != 0) 888 return -EINVAL; 889 890 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; 891 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); ··· 911 * core_alua_do_transition_tg_pt() will always return 912 * success. 913 */ 914 + l_tg_pt_gp->tg_pt_gp_alua_port = l_port; 915 + l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 916 + rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, 917 + new_state, explicit); 918 atomic_dec(&lu_gp->lu_gp_ref_cnt); 919 smp_mb__after_atomic_dec(); 920 + return rc; 921 } 922 /* 923 * For all other LU groups aside from 'default_lu_gp', walk all of ··· 951 continue; 952 953 if (l_tg_pt_gp == tg_pt_gp) { 954 + tg_pt_gp->tg_pt_gp_alua_port = l_port; 955 + tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 956 } else { 957 + tg_pt_gp->tg_pt_gp_alua_port = NULL; 958 + tg_pt_gp->tg_pt_gp_alua_nacl = NULL; 959 } 960 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 961 smp_mb__after_atomic_inc(); ··· 964 * core_alua_do_transition_tg_pt() will always return 965 * success. 966 */ 967 + rc = core_alua_do_transition_tg_pt(tg_pt_gp, 968 + new_state, explicit); 969 970 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 971 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 972 smp_mb__after_atomic_dec(); 973 + if (rc) 974 + break; 975 } 976 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 977 ··· 979 } 980 spin_unlock(&lu_gp->lu_gp_lock); 981 982 + if (!rc) { 983 + pr_debug("Successfully processed LU Group: %s all ALUA TG PT" 984 + " Group IDs: %hu %s transition to primary state: %s\n", 985 + config_item_name(&lu_gp->lu_gp_group.cg_item), 986 + l_tg_pt_gp->tg_pt_gp_id, 987 + (explicit) ? "explicit" : "implicit", 988 + core_alua_dump_state(new_state)); 989 + } 990 991 atomic_dec(&lu_gp->lu_gp_ref_cnt); 992 smp_mb__after_atomic_dec(); 993 + return rc; 994 } 995 996 /* ··· 996 */ 997 static int core_alua_update_tpg_secondary_metadata( 998 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 999 + struct se_port *port) 1000 { 1001 + unsigned char *md_buf; 1002 struct se_portal_group *se_tpg = port->sep_tpg; 1003 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; 1004 + int len, rc; 1005 + 1006 + md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); 1007 + if (!md_buf) { 1008 + pr_err("Unable to allocate buf for ALUA metadata\n"); 1009 + return -ENOMEM; 1010 + } 1011 1012 memset(path, 0, ALUA_METADATA_PATH_LEN); 1013 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); ··· 1014 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", 1015 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); 1016 1017 + len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n" 1018 "alua_tg_pt_status=0x%02x\n", 1019 atomic_read(&port->sep_tg_pt_secondary_offline), 1020 port->sep_tg_pt_secondary_stat); ··· 1023 se_tpg->se_tpg_tfo->get_fabric_name(), wwn, 1024 port->sep_lun->unpacked_lun); 1025 1026 + rc = core_alua_write_tpg_metadata(path, md_buf, len); 1027 + kfree(md_buf); 1028 + 1029 + return rc; 1030 } 1031 1032 static int core_alua_set_tg_pt_secondary_state( ··· 1033 int offline) 1034 { 1035 struct t10_alua_tg_pt_gp *tg_pt_gp; 1036 int trans_delay_msecs; 1037 1038 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); ··· 1055 else 1056 atomic_set(&port->sep_tg_pt_secondary_offline, 0); 1057 1058 port->sep_tg_pt_secondary_stat = (explicit) ? 1059 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1060 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; ··· 1077 * secondary state and status 1078 */ 1079 if (port->sep_tg_pt_secondary_write_md) { 1080 mutex_lock(&port->sep_tg_pt_md_mutex); 1081 + core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port); 1082 mutex_unlock(&port->sep_tg_pt_md_mutex); 1083 } 1084 1085 return 0; 1086 + } 1087 + 1088 + struct t10_alua_lba_map * 1089 + core_alua_allocate_lba_map(struct list_head *list, 1090 + u64 first_lba, u64 last_lba) 1091 + { 1092 + struct t10_alua_lba_map *lba_map; 1093 + 1094 + lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL); 1095 + if (!lba_map) { 1096 + pr_err("Unable to allocate struct t10_alua_lba_map\n"); 1097 + return ERR_PTR(-ENOMEM); 1098 + } 1099 + INIT_LIST_HEAD(&lba_map->lba_map_mem_list); 1100 + lba_map->lba_map_first_lba = first_lba; 1101 + lba_map->lba_map_last_lba = last_lba; 1102 + 1103 + list_add_tail(&lba_map->lba_map_list, list); 1104 + return lba_map; 1105 + } 1106 + 1107 + int 1108 + core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map, 1109 + int pg_id, int state) 1110 + { 1111 + struct t10_alua_lba_map_member *lba_map_mem; 1112 + 1113 + list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list, 1114 + lba_map_mem_list) { 1115 + if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) { 1116 + pr_err("Duplicate pg_id %d in lba_map\n", pg_id); 1117 + return -EINVAL; 1118 + } 1119 + } 1120 + 1121 + lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL); 1122 + if (!lba_map_mem) { 1123 + pr_err("Unable to allocate struct t10_alua_lba_map_mem\n"); 1124 + return -ENOMEM; 1125 + } 1126 + lba_map_mem->lba_map_mem_alua_state = state; 1127 + lba_map_mem->lba_map_mem_alua_pg_id = pg_id; 1128 + 1129 + list_add_tail(&lba_map_mem->lba_map_mem_list, 1130 + &lba_map->lba_map_mem_list); 1131 + return 0; 1132 + } 1133 + 1134 + void 1135 + core_alua_free_lba_map(struct list_head *lba_list) 1136 + { 1137 + struct t10_alua_lba_map *lba_map, *lba_map_tmp; 1138 + struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp; 1139 + 1140 + list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list, 1141 + lba_map_list) { 1142 + list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp, 1143 + &lba_map->lba_map_mem_list, 1144 + lba_map_mem_list) { 1145 + list_del(&lba_map_mem->lba_map_mem_list); 1146 + kmem_cache_free(t10_alua_lba_map_mem_cache, 1147 + lba_map_mem); 1148 + } 1149 + list_del(&lba_map->lba_map_list); 1150 + kmem_cache_free(t10_alua_lba_map_cache, lba_map); 1151 + } 1152 + } 1153 + 1154 + void 1155 + core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list, 1156 + int segment_size, int segment_mult) 1157 + { 1158 + struct list_head old_lba_map_list; 1159 + struct t10_alua_tg_pt_gp *tg_pt_gp; 1160 + int activate = 0, supported; 1161 + 1162 + INIT_LIST_HEAD(&old_lba_map_list); 1163 + spin_lock(&dev->t10_alua.lba_map_lock); 1164 + dev->t10_alua.lba_map_segment_size = segment_size; 1165 + dev->t10_alua.lba_map_segment_multiplier = segment_mult; 1166 + list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list); 1167 + if (lba_map_list) { 1168 + list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list); 1169 + activate = 1; 1170 + } 1171 + spin_unlock(&dev->t10_alua.lba_map_lock); 1172 + spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1173 + list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 1174 + tg_pt_gp_list) { 1175 + 1176 + if (!tg_pt_gp->tg_pt_gp_valid_id) 1177 + continue; 1178 + supported = tg_pt_gp->tg_pt_gp_alua_supported_states; 1179 + if (activate) 1180 + supported |= ALUA_LBD_SUP; 1181 + else 1182 + supported &= ~ALUA_LBD_SUP; 1183 + tg_pt_gp->tg_pt_gp_alua_supported_states = supported; 1184 + } 1185 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1186 + core_alua_free_lba_map(&old_lba_map_list); 1187 } 1188 1189 struct t10_alua_lu_gp * ··· 1346 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1347 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1348 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1349 + INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, 1350 + core_alua_do_transition_tg_pt_work); 1351 tg_pt_gp->tg_pt_gp_dev = dev; 1352 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1353 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); 1354 /* ··· 1474 list_del(&tg_pt_gp->tg_pt_gp_list); 1475 dev->t10_alua.alua_tg_pt_gps_counter--; 1476 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1477 + 1478 + flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); 1479 1480 /* 1481 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
+14 -1
drivers/target/target_core_alua.h
··· 13 /* 14 * ASYMMETRIC ACCESS STATE field 15 * 16 - * from spc4r17 section 6.27 Table 245 17 */ 18 #define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0 19 #define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1 20 #define ALUA_ACCESS_STATE_STANDBY 0x2 21 #define ALUA_ACCESS_STATE_UNAVAILABLE 0x3 22 #define ALUA_ACCESS_STATE_OFFLINE 0xe 23 #define ALUA_ACCESS_STATE_TRANSITION 0xf 24 ··· 79 */ 80 #define ALUA_SECONDARY_METADATA_WWN_LEN 256 81 82 extern struct kmem_cache *t10_alua_lu_gp_cache; 83 extern struct kmem_cache *t10_alua_lu_gp_mem_cache; 84 extern struct kmem_cache *t10_alua_tg_pt_gp_cache; 85 extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 86 87 extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *); 88 extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *); 89 extern int core_alua_check_nonop_delay(struct se_cmd *); 90 extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, 91 struct se_device *, struct se_port *, 92 struct se_node_acl *, int, int); 93 extern char *core_alua_dump_status(int); 94 extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int); 95 extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16); 96 extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
··· 13 /* 14 * ASYMMETRIC ACCESS STATE field 15 * 16 + * from spc4r36j section 6.37 Table 307 17 */ 18 #define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0 19 #define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1 20 #define ALUA_ACCESS_STATE_STANDBY 0x2 21 #define ALUA_ACCESS_STATE_UNAVAILABLE 0x3 22 + #define ALUA_ACCESS_STATE_LBA_DEPENDENT 0x4 23 #define ALUA_ACCESS_STATE_OFFLINE 0xe 24 #define ALUA_ACCESS_STATE_TRANSITION 0xf 25 ··· 78 */ 79 #define ALUA_SECONDARY_METADATA_WWN_LEN 256 80 81 + /* Used by core_alua_update_tpg_(primary,secondary)_metadata */ 82 + #define ALUA_MD_BUF_LEN 1024 83 + 84 extern struct kmem_cache *t10_alua_lu_gp_cache; 85 extern struct kmem_cache *t10_alua_lu_gp_mem_cache; 86 extern struct kmem_cache *t10_alua_tg_pt_gp_cache; 87 extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 88 + extern struct kmem_cache *t10_alua_lba_map_cache; 89 + extern struct kmem_cache *t10_alua_lba_map_mem_cache; 90 91 extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *); 92 extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *); 93 + extern sense_reason_t target_emulate_report_referrals(struct se_cmd *); 94 extern int core_alua_check_nonop_delay(struct se_cmd *); 95 extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, 96 struct se_device *, struct se_port *, 97 struct se_node_acl *, int, int); 98 extern char *core_alua_dump_status(int); 99 + extern struct t10_alua_lba_map *core_alua_allocate_lba_map( 100 + struct list_head *, u64, u64); 101 + extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int); 102 + extern void core_alua_free_lba_map(struct list_head *); 103 + extern void core_alua_set_lba_map(struct se_device *, struct list_head *, 104 + int, int); 105 extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int); 106 extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16); 107 extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
+192 -2
drivers/target/target_core_configfs.c
··· 643 DEF_DEV_ATTRIB(emulate_3pc); 644 SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR); 645 646 DEF_DEV_ATTRIB(enforce_pr_isids); 647 SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); 648 ··· 711 &target_core_dev_attrib_emulate_tpws.attr, 712 &target_core_dev_attrib_emulate_caw.attr, 713 &target_core_dev_attrib_emulate_3pc.attr, 714 &target_core_dev_attrib_enforce_pr_isids.attr, 715 &target_core_dev_attrib_is_nonrot.attr, 716 &target_core_dev_attrib_emulate_rest_reord.attr, ··· 1753 .store = target_core_store_alua_lu_gp, 1754 }; 1755 1756 static struct configfs_attribute *lio_core_dev_attrs[] = { 1757 &target_core_attr_dev_info.attr, 1758 &target_core_attr_dev_control.attr, ··· 1930 &target_core_attr_dev_udev_path.attr, 1931 &target_core_attr_dev_enable.attr, 1932 &target_core_attr_dev_alua_lu_gp.attr, 1933 NULL, 1934 }; 1935 ··· 2237 " transition while TPGS_IMPLICIT_ALUA is disabled\n"); 2238 return -EINVAL; 2239 } 2240 2241 ret = core_alua_do_port_transition(tg_pt_gp, dev, 2242 NULL, NULL, new_state, 0); ··· 2378 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); 2379 SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent, 2380 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); 2381 - SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR); 2382 2383 SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable, 2384 tg_pt_gp_alua_supported_states, ALUA_U_SUP); ··· 3127 * and ALUA Logical Unit Group and Target Port Group infrastructure. 3128 */ 3129 target_cg = &subsys->su_group; 3130 - target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2, 3131 GFP_KERNEL); 3132 if (!target_cg->default_groups) { 3133 pr_err("Unable to allocate target_cg->default_groups\n");
··· 643 DEF_DEV_ATTRIB(emulate_3pc); 644 SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR); 645 646 + DEF_DEV_ATTRIB(pi_prot_type); 647 + SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR); 648 + 649 + DEF_DEV_ATTRIB_RO(hw_pi_prot_type); 650 + SE_DEV_ATTR_RO(hw_pi_prot_type); 651 + 652 + DEF_DEV_ATTRIB(pi_prot_format); 653 + SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR); 654 + 655 DEF_DEV_ATTRIB(enforce_pr_isids); 656 SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); 657 ··· 702 &target_core_dev_attrib_emulate_tpws.attr, 703 &target_core_dev_attrib_emulate_caw.attr, 704 &target_core_dev_attrib_emulate_3pc.attr, 705 + &target_core_dev_attrib_pi_prot_type.attr, 706 + &target_core_dev_attrib_hw_pi_prot_type.attr, 707 + &target_core_dev_attrib_pi_prot_format.attr, 708 &target_core_dev_attrib_enforce_pr_isids.attr, 709 &target_core_dev_attrib_is_nonrot.attr, 710 &target_core_dev_attrib_emulate_rest_reord.attr, ··· 1741 .store = target_core_store_alua_lu_gp, 1742 }; 1743 1744 + static ssize_t target_core_show_dev_lba_map(void *p, char *page) 1745 + { 1746 + struct se_device *dev = p; 1747 + struct t10_alua_lba_map *map; 1748 + struct t10_alua_lba_map_member *mem; 1749 + char *b = page; 1750 + int bl = 0; 1751 + char state; 1752 + 1753 + spin_lock(&dev->t10_alua.lba_map_lock); 1754 + if (!list_empty(&dev->t10_alua.lba_map_list)) 1755 + bl += sprintf(b + bl, "%u %u\n", 1756 + dev->t10_alua.lba_map_segment_size, 1757 + dev->t10_alua.lba_map_segment_multiplier); 1758 + list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) { 1759 + bl += sprintf(b + bl, "%llu %llu", 1760 + map->lba_map_first_lba, map->lba_map_last_lba); 1761 + list_for_each_entry(mem, &map->lba_map_mem_list, 1762 + lba_map_mem_list) { 1763 + switch (mem->lba_map_mem_alua_state) { 1764 + case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 1765 + state = 'O'; 1766 + break; 1767 + case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 1768 + state = 'A'; 1769 + break; 1770 + case ALUA_ACCESS_STATE_STANDBY: 1771 + state = 'S'; 1772 + break; 1773 + case ALUA_ACCESS_STATE_UNAVAILABLE: 1774 + state = 'U'; 1775 + break; 1776 + default: 1777 + state = '.'; 1778 + break; 1779 + } 1780 + bl += sprintf(b + bl, " %d:%c", 1781 + mem->lba_map_mem_alua_pg_id, state); 1782 + } 1783 + bl += sprintf(b + bl, "\n"); 1784 + } 1785 + spin_unlock(&dev->t10_alua.lba_map_lock); 1786 + return bl; 1787 + } 1788 + 1789 + static ssize_t target_core_store_dev_lba_map( 1790 + void *p, 1791 + const char *page, 1792 + size_t count) 1793 + { 1794 + struct se_device *dev = p; 1795 + struct t10_alua_lba_map *lba_map = NULL; 1796 + struct list_head lba_list; 1797 + char *map_entries, *ptr; 1798 + char state; 1799 + int pg_num = -1, pg; 1800 + int ret = 0, num = 0, pg_id, alua_state; 1801 + unsigned long start_lba = -1, end_lba = -1; 1802 + unsigned long segment_size = -1, segment_mult = -1; 1803 + 1804 + map_entries = kstrdup(page, GFP_KERNEL); 1805 + if (!map_entries) 1806 + return -ENOMEM; 1807 + 1808 + INIT_LIST_HEAD(&lba_list); 1809 + while ((ptr = strsep(&map_entries, "\n")) != NULL) { 1810 + if (!*ptr) 1811 + continue; 1812 + 1813 + if (num == 0) { 1814 + if (sscanf(ptr, "%lu %lu\n", 1815 + &segment_size, &segment_mult) != 2) { 1816 + pr_err("Invalid line %d\n", num); 1817 + ret = -EINVAL; 1818 + break; 1819 + } 1820 + num++; 1821 + continue; 1822 + } 1823 + if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) { 1824 + pr_err("Invalid line %d\n", num); 1825 + ret = -EINVAL; 1826 + break; 1827 + } 1828 + ptr = strchr(ptr, ' '); 1829 + if (!ptr) { 1830 + pr_err("Invalid line %d, missing end lba\n", num); 1831 + ret = -EINVAL; 1832 + break; 1833 + } 1834 + ptr++; 1835 + ptr = strchr(ptr, ' '); 1836 + if (!ptr) { 1837 + pr_err("Invalid line %d, missing state definitions\n", 1838 + num); 1839 + ret = -EINVAL; 1840 + break; 1841 + } 1842 + ptr++; 1843 + lba_map = core_alua_allocate_lba_map(&lba_list, 1844 + start_lba, end_lba); 1845 + if (IS_ERR(lba_map)) { 1846 + ret = PTR_ERR(lba_map); 1847 + break; 1848 + } 1849 + pg = 0; 1850 + while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) { 1851 + switch (state) { 1852 + case 'O': 1853 + alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; 1854 + break; 1855 + case 'A': 1856 + alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED; 1857 + break; 1858 + case 'S': 1859 + alua_state = ALUA_ACCESS_STATE_STANDBY; 1860 + break; 1861 + case 'U': 1862 + alua_state = ALUA_ACCESS_STATE_UNAVAILABLE; 1863 + break; 1864 + default: 1865 + pr_err("Invalid ALUA state '%c'\n", state); 1866 + ret = -EINVAL; 1867 + goto out; 1868 + } 1869 + 1870 + ret = core_alua_allocate_lba_map_mem(lba_map, 1871 + pg_id, alua_state); 1872 + if (ret) { 1873 + pr_err("Invalid target descriptor %d:%c " 1874 + "at line %d\n", 1875 + pg_id, state, num); 1876 + break; 1877 + } 1878 + pg++; 1879 + ptr = strchr(ptr, ' '); 1880 + if (ptr) 1881 + ptr++; 1882 + else 1883 + break; 1884 + } 1885 + if (pg_num == -1) 1886 + pg_num = pg; 1887 + else if (pg != pg_num) { 1888 + pr_err("Only %d from %d port groups definitions " 1889 + "at line %d\n", pg, pg_num, num); 1890 + ret = -EINVAL; 1891 + break; 1892 + } 1893 + num++; 1894 + } 1895 + out: 1896 + if (ret) { 1897 + core_alua_free_lba_map(&lba_list); 1898 + count = ret; 1899 + } else 1900 + core_alua_set_lba_map(dev, &lba_list, 1901 + segment_size, segment_mult); 1902 + kfree(map_entries); 1903 + return count; 1904 + } 1905 + 1906 + static struct target_core_configfs_attribute target_core_attr_dev_lba_map = { 1907 + .attr = { .ca_owner = THIS_MODULE, 1908 + .ca_name = "lba_map", 1909 + .ca_mode = S_IRUGO | S_IWUSR }, 1910 + .show = target_core_show_dev_lba_map, 1911 + .store = target_core_store_dev_lba_map, 1912 + }; 1913 + 1914 static struct configfs_attribute *lio_core_dev_attrs[] = { 1915 &target_core_attr_dev_info.attr, 1916 &target_core_attr_dev_control.attr, ··· 1748 &target_core_attr_dev_udev_path.attr, 1749 &target_core_attr_dev_enable.attr, 1750 &target_core_attr_dev_alua_lu_gp.attr, 1751 + &target_core_attr_dev_lba_map.attr, 1752 NULL, 1753 }; 1754 ··· 2054 " transition while TPGS_IMPLICIT_ALUA is disabled\n"); 2055 return -EINVAL; 2056 } 2057 + if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA && 2058 + new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) { 2059 + /* LBA DEPENDENT is only allowed with implicit ALUA */ 2060 + pr_err("Unable to process implicit configfs ALUA transition" 2061 + " while explicit ALUA management is enabled\n"); 2062 + return -EINVAL; 2063 + } 2064 2065 ret = core_alua_do_port_transition(tg_pt_gp, dev, 2066 NULL, NULL, new_state, 0); ··· 2188 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); 2189 SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent, 2190 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); 2191 + SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO); 2192 2193 SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable, 2194 tg_pt_gp_alua_supported_states, ALUA_U_SUP); ··· 2937 * and ALUA Logical Unit Group and Target Port Group infrastructure. 2938 */ 2939 target_cg = &subsys->su_group; 2940 + target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2, 2941 GFP_KERNEL); 2942 if (!target_cg->default_groups) { 2943 pr_err("Unable to allocate target_cg->default_groups\n");
+100 -8
drivers/target/target_core_device.c
··· 918 return 0; 919 } 920 921 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 922 { 923 if ((flag != 0) && (flag != 1)) { ··· 1201 struct se_lun *core_dev_add_lun( 1202 struct se_portal_group *tpg, 1203 struct se_device *dev, 1204 - u32 lun) 1205 { 1206 - struct se_lun *lun_p; 1207 int rc; 1208 1209 - lun_p = core_tpg_pre_addlun(tpg, lun); 1210 - if (IS_ERR(lun_p)) 1211 - return lun_p; 1212 1213 - rc = core_tpg_post_addlun(tpg, lun_p, 1214 TRANSPORT_LUNFLAGS_READ_WRITE, dev); 1215 if (rc < 0) 1216 return ERR_PTR(rc); 1217 1218 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1219 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1220 - tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, 1221 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); 1222 /* 1223 * Update LUN maps for dynamically added initiators when ··· 1238 spin_unlock_irq(&tpg->acl_node_lock); 1239 } 1240 1241 - return lun_p; 1242 } 1243 1244 /* core_dev_del_lun(): ··· 1504 dev->dev_link_magic = SE_DEV_LINK_MAGIC; 1505 dev->se_hba = hba; 1506 dev->transport = hba->transport; 1507 1508 INIT_LIST_HEAD(&dev->dev_list); 1509 INIT_LIST_HEAD(&dev->dev_sep_list); ··· 1529 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 1530 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 1531 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 1532 1533 dev->t10_wwn.t10_dev = dev; 1534 dev->t10_alua.t10_dev = dev; ··· 1547 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 1548 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 1549 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 1550 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 1551 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 1552 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; ··· 1676 } 1677 1678 core_alua_free_lu_gp_mem(dev); 1679 core_scsi3_free_all_registrations(dev); 1680 se_release_vpd_for_dev(dev); 1681 1682 dev->transport->free_device(dev); 1683 }
··· 918 return 0; 919 } 920 921 + int se_dev_set_pi_prot_type(struct se_device *dev, int flag) 922 + { 923 + int rc, old_prot = dev->dev_attrib.pi_prot_type; 924 + 925 + if (flag != 0 && flag != 1 && flag != 2 && flag != 3) { 926 + pr_err("Illegal value %d for pi_prot_type\n", flag); 927 + return -EINVAL; 928 + } 929 + if (flag == 2) { 930 + pr_err("DIF TYPE2 protection currently not supported\n"); 931 + return -ENOSYS; 932 + } 933 + if (dev->dev_attrib.hw_pi_prot_type) { 934 + pr_warn("DIF protection enabled on underlying hardware," 935 + " ignoring\n"); 936 + return 0; 937 + } 938 + if (!dev->transport->init_prot || !dev->transport->free_prot) { 939 + pr_err("DIF protection not supported by backend: %s\n", 940 + dev->transport->name); 941 + return -ENOSYS; 942 + } 943 + if (!(dev->dev_flags & DF_CONFIGURED)) { 944 + pr_err("DIF protection requires device to be configured\n"); 945 + return -ENODEV; 946 + } 947 + if (dev->export_count) { 948 + pr_err("dev[%p]: Unable to change SE Device PROT type while" 949 + " export_count is %d\n", dev, dev->export_count); 950 + return -EINVAL; 951 + } 952 + 953 + dev->dev_attrib.pi_prot_type = flag; 954 + 955 + if (flag && !old_prot) { 956 + rc = dev->transport->init_prot(dev); 957 + if (rc) { 958 + dev->dev_attrib.pi_prot_type = old_prot; 959 + return rc; 960 + } 961 + 962 + } else if (!flag && old_prot) { 963 + dev->transport->free_prot(dev); 964 + } 965 + pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); 966 + 967 + return 0; 968 + } 969 + 970 + int se_dev_set_pi_prot_format(struct se_device *dev, int flag) 971 + { 972 + int rc; 973 + 974 + if (!flag) 975 + return 0; 976 + 977 + if (flag != 1) { 978 + pr_err("Illegal value %d for pi_prot_format\n", flag); 979 + return -EINVAL; 980 + } 981 + if (!dev->transport->format_prot) { 982 + pr_err("DIF protection format not supported by backend %s\n", 983 + dev->transport->name); 984 + return -ENOSYS; 985 + } 986 + if (!(dev->dev_flags & DF_CONFIGURED)) { 987 + pr_err("DIF protection format requires device to be configured\n"); 988 + return -ENODEV; 989 + } 990 + if (dev->export_count) { 991 + pr_err("dev[%p]: Unable to format SE Device PROT type while" 992 + " export_count is %d\n", dev, dev->export_count); 993 + return -EINVAL; 994 + } 995 + 996 + rc = dev->transport->format_prot(dev); 997 + if (rc) 998 + return rc; 999 + 1000 + pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); 1001 + 1002 + return 0; 1003 + } 1004 + 1005 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 1006 { 1007 if ((flag != 0) && (flag != 1)) { ··· 1117 struct se_lun *core_dev_add_lun( 1118 struct se_portal_group *tpg, 1119 struct se_device *dev, 1120 + u32 unpacked_lun) 1121 { 1122 + struct se_lun *lun; 1123 int rc; 1124 1125 + lun = core_tpg_alloc_lun(tpg, unpacked_lun); 1126 + if (IS_ERR(lun)) 1127 + return lun; 1128 1129 + rc = core_tpg_add_lun(tpg, lun, 1130 TRANSPORT_LUNFLAGS_READ_WRITE, dev); 1131 if (rc < 0) 1132 return ERR_PTR(rc); 1133 1134 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1135 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1136 + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 1137 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); 1138 /* 1139 * Update LUN maps for dynamically added initiators when ··· 1154 spin_unlock_irq(&tpg->acl_node_lock); 1155 } 1156 1157 + return lun; 1158 } 1159 1160 /* core_dev_del_lun(): ··· 1420 dev->dev_link_magic = SE_DEV_LINK_MAGIC; 1421 dev->se_hba = hba; 1422 dev->transport = hba->transport; 1423 + dev->prot_length = sizeof(struct se_dif_v1_tuple); 1424 1425 INIT_LIST_HEAD(&dev->dev_list); 1426 INIT_LIST_HEAD(&dev->dev_sep_list); ··· 1444 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 1445 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 1446 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 1447 + INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 1448 + spin_lock_init(&dev->t10_alua.lba_map_lock); 1449 1450 dev->t10_wwn.t10_dev = dev; 1451 dev->t10_alua.t10_dev = dev; ··· 1460 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 1461 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 1462 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 1463 + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 1464 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 1465 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 1466 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; ··· 1588 } 1589 1590 core_alua_free_lu_gp_mem(dev); 1591 + core_alua_set_lba_map(dev, NULL, 0, 0); 1592 core_scsi3_free_all_registrations(dev); 1593 se_release_vpd_for_dev(dev); 1594 + 1595 + if (dev->transport->free_prot) 1596 + dev->transport->free_prot(dev); 1597 1598 dev->transport->free_device(dev); 1599 }
+1 -1
drivers/target/target_core_fabric_configfs.c
··· 906 lun_cg->default_groups[1] = NULL; 907 908 port_stat_grp = &lun->port_stat_grps.stat_group; 909 - port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, 910 GFP_KERNEL); 911 if (!port_stat_grp->default_groups) { 912 pr_err("Unable to allocate port_stat_grp->default_groups\n");
··· 906 lun_cg->default_groups[1] = NULL; 907 908 port_stat_grp = &lun->port_stat_grps.stat_group; 909 + port_stat_grp->default_groups = kzalloc(sizeof(struct config_group *) * 4, 910 GFP_KERNEL); 911 if (!port_stat_grp->default_groups) { 912 pr_err("Unable to allocate port_stat_grp->default_groups\n");
+255 -1
drivers/target/target_core_file.c
··· 257 kfree(fd_dev); 258 } 259 260 static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl, 261 u32 sgl_nents, int is_write) 262 { ··· 617 enum dma_data_direction data_direction) 618 { 619 struct se_device *dev = cmd->se_dev; 620 int ret = 0; 621 622 /* ··· 626 * physical memory addresses to struct iovec virtual memory. 627 */ 628 if (data_direction == DMA_FROM_DEVICE) { 629 ret = fd_do_rw(cmd, sgl, sgl_nents, 0); 630 } else { 631 ret = fd_do_rw(cmd, sgl, sgl_nents, 1); 632 /* 633 * Perform implicit vfs_fsync_range() for fd_do_writev() ops ··· 684 685 vfs_fsync_range(fd_dev->fd_file, start, end, 1); 686 } 687 } 688 689 - if (ret < 0) 690 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 691 692 if (ret) 693 target_complete_cmd(cmd, SAM_STAT_GOOD); ··· 817 dev->dev_attrib.block_size); 818 } 819 820 static struct sbc_ops fd_sbc_ops = { 821 .execute_rw = fd_execute_rw, 822 .execute_sync_cache = fd_execute_sync_cache, ··· 981 .show_configfs_dev_params = fd_show_configfs_dev_params, 982 .get_device_type = sbc_get_device_type, 983 .get_blocks = fd_get_blocks, 984 }; 985 986 static int __init fileio_module_init(void)
··· 257 kfree(fd_dev); 258 } 259 260 + static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot, 261 + int is_write) 262 + { 263 + struct se_device *se_dev = cmd->se_dev; 264 + struct fd_dev *dev = FD_DEV(se_dev); 265 + struct file *prot_fd = dev->fd_prot_file; 266 + struct scatterlist *sg; 267 + loff_t pos = (cmd->t_task_lba * se_dev->prot_length); 268 + unsigned char *buf; 269 + u32 prot_size, len, size; 270 + int rc, ret = 1, i; 271 + 272 + prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) * 273 + se_dev->prot_length; 274 + 275 + if (!is_write) { 276 + fd_prot->prot_buf = vzalloc(prot_size); 277 + if (!fd_prot->prot_buf) { 278 + pr_err("Unable to allocate fd_prot->prot_buf\n"); 279 + return -ENOMEM; 280 + } 281 + buf = fd_prot->prot_buf; 282 + 283 + fd_prot->prot_sg_nents = cmd->t_prot_nents; 284 + fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) * 285 + fd_prot->prot_sg_nents, GFP_KERNEL); 286 + if (!fd_prot->prot_sg) { 287 + pr_err("Unable to allocate fd_prot->prot_sg\n"); 288 + vfree(fd_prot->prot_buf); 289 + return -ENOMEM; 290 + } 291 + size = prot_size; 292 + 293 + for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) { 294 + 295 + len = min_t(u32, PAGE_SIZE, size); 296 + sg_set_buf(sg, buf, len); 297 + size -= len; 298 + buf += len; 299 + } 300 + } 301 + 302 + if (is_write) { 303 + rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos); 304 + if (rc < 0 || prot_size != rc) { 305 + pr_err("kernel_write() for fd_do_prot_rw failed:" 306 + " %d\n", rc); 307 + ret = -EINVAL; 308 + } 309 + } else { 310 + rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size); 311 + if (rc < 0) { 312 + pr_err("kernel_read() for fd_do_prot_rw failed:" 313 + " %d\n", rc); 314 + ret = -EINVAL; 315 + } 316 + } 317 + 318 + if (is_write || ret < 0) { 319 + kfree(fd_prot->prot_sg); 320 + vfree(fd_prot->prot_buf); 321 + } 322 + 323 + return ret; 324 + } 325 + 326 static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl, 327 u32 sgl_nents, int is_write) 328 { ··· 551 enum dma_data_direction data_direction) 552 { 553 struct se_device *dev = cmd->se_dev; 554 + struct fd_prot fd_prot; 555 + sense_reason_t rc; 556 int ret = 0; 557 558 /* ··· 558 * physical memory addresses to struct iovec virtual memory. 559 */ 560 if (data_direction == DMA_FROM_DEVICE) { 561 + memset(&fd_prot, 0, sizeof(struct fd_prot)); 562 + 563 + if (cmd->prot_type) { 564 + ret = fd_do_prot_rw(cmd, &fd_prot, false); 565 + if (ret < 0) 566 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 567 + } 568 + 569 ret = fd_do_rw(cmd, sgl, sgl_nents, 0); 570 + 571 + if (ret > 0 && cmd->prot_type) { 572 + u32 sectors = cmd->data_length / dev->dev_attrib.block_size; 573 + 574 + rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 575 + 0, fd_prot.prot_sg, 0); 576 + if (rc) { 577 + kfree(fd_prot.prot_sg); 578 + vfree(fd_prot.prot_buf); 579 + return rc; 580 + } 581 + kfree(fd_prot.prot_sg); 582 + vfree(fd_prot.prot_buf); 583 + } 584 } else { 585 + memset(&fd_prot, 0, sizeof(struct fd_prot)); 586 + 587 + if (cmd->prot_type) { 588 + u32 sectors = cmd->data_length / dev->dev_attrib.block_size; 589 + 590 + ret = fd_do_prot_rw(cmd, &fd_prot, false); 591 + if (ret < 0) 592 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 593 + 594 + rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 595 + 0, fd_prot.prot_sg, 0); 596 + if (rc) { 597 + kfree(fd_prot.prot_sg); 598 + vfree(fd_prot.prot_buf); 599 + return rc; 600 + } 601 + } 602 + 603 ret = fd_do_rw(cmd, sgl, sgl_nents, 1); 604 /* 605 * Perform implicit vfs_fsync_range() for fd_do_writev() ops ··· 576 577 vfs_fsync_range(fd_dev->fd_file, start, end, 1); 578 } 579 + 580 + if (ret > 0 && cmd->prot_type) { 581 + ret = fd_do_prot_rw(cmd, &fd_prot, true); 582 + if (ret < 0) 583 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 584 + } 585 } 586 587 + if (ret < 0) { 588 + kfree(fd_prot.prot_sg); 589 + vfree(fd_prot.prot_buf); 590 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 591 + } 592 593 if (ret) 594 target_complete_cmd(cmd, SAM_STAT_GOOD); ··· 700 dev->dev_attrib.block_size); 701 } 702 703 + static int fd_init_prot(struct se_device *dev) 704 + { 705 + struct fd_dev *fd_dev = FD_DEV(dev); 706 + struct file *prot_file, *file = fd_dev->fd_file; 707 + struct inode *inode; 708 + int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; 709 + char buf[FD_MAX_DEV_PROT_NAME]; 710 + 711 + if (!file) { 712 + pr_err("Unable to locate fd_dev->fd_file\n"); 713 + return -ENODEV; 714 + } 715 + 716 + inode = file->f_mapping->host; 717 + if (S_ISBLK(inode->i_mode)) { 718 + pr_err("FILEIO Protection emulation only supported on" 719 + " !S_ISBLK\n"); 720 + return -ENOSYS; 721 + } 722 + 723 + if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) 724 + flags &= ~O_DSYNC; 725 + 726 + snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection", 727 + fd_dev->fd_dev_name); 728 + 729 + prot_file = filp_open(buf, flags, 0600); 730 + if (IS_ERR(prot_file)) { 731 + pr_err("filp_open(%s) failed\n", buf); 732 + ret = PTR_ERR(prot_file); 733 + return ret; 734 + } 735 + fd_dev->fd_prot_file = prot_file; 736 + 737 + return 0; 738 + } 739 + 740 + static void fd_init_format_buf(struct se_device *dev, unsigned char *buf, 741 + u32 unit_size, u32 *ref_tag, u16 app_tag, 742 + bool inc_reftag) 743 + { 744 + unsigned char *p = buf; 745 + int i; 746 + 747 + for (i = 0; i < unit_size; i += dev->prot_length) { 748 + *((u16 *)&p[0]) = 0xffff; 749 + *((__be16 *)&p[2]) = cpu_to_be16(app_tag); 750 + *((__be32 *)&p[4]) = cpu_to_be32(*ref_tag); 751 + 752 + if (inc_reftag) 753 + (*ref_tag)++; 754 + 755 + p += dev->prot_length; 756 + } 757 + } 758 + 759 + static int fd_format_prot(struct se_device *dev) 760 + { 761 + struct fd_dev *fd_dev = FD_DEV(dev); 762 + struct file *prot_fd = fd_dev->fd_prot_file; 763 + sector_t prot_length, prot; 764 + unsigned char *buf; 765 + loff_t pos = 0; 766 + u32 ref_tag = 0; 767 + int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; 768 + int rc, ret = 0, size, len; 769 + bool inc_reftag = false; 770 + 771 + if (!dev->dev_attrib.pi_prot_type) { 772 + pr_err("Unable to format_prot while pi_prot_type == 0\n"); 773 + return -ENODEV; 774 + } 775 + if (!prot_fd) { 776 + pr_err("Unable to locate fd_dev->fd_prot_file\n"); 777 + return -ENODEV; 778 + } 779 + 780 + switch (dev->dev_attrib.pi_prot_type) { 781 + case TARGET_DIF_TYPE3_PROT: 782 + ref_tag = 0xffffffff; 783 + break; 784 + case TARGET_DIF_TYPE2_PROT: 785 + case TARGET_DIF_TYPE1_PROT: 786 + inc_reftag = true; 787 + break; 788 + default: 789 + break; 790 + } 791 + 792 + buf = vzalloc(unit_size); 793 + if (!buf) { 794 + pr_err("Unable to allocate FILEIO prot buf\n"); 795 + return -ENOMEM; 796 + } 797 + 798 + prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length; 799 + size = prot_length; 800 + 801 + pr_debug("Using FILEIO prot_length: %llu\n", 802 + (unsigned long long)prot_length); 803 + 804 + for (prot = 0; prot < prot_length; prot += unit_size) { 805 + 806 + fd_init_format_buf(dev, buf, unit_size, &ref_tag, 0xffff, 807 + inc_reftag); 808 + 809 + len = min(unit_size, size); 810 + 811 + rc = kernel_write(prot_fd, buf, len, pos); 812 + if (rc != len) { 813 + pr_err("vfs_write to prot file failed: %d\n", rc); 814 + ret = -ENODEV; 815 + goto out; 816 + } 817 + pos += len; 818 + size -= len; 819 + } 820 + 821 + out: 822 + vfree(buf); 823 + return ret; 824 + } 825 + 826 + static void fd_free_prot(struct se_device *dev) 827 + { 828 + struct fd_dev *fd_dev = FD_DEV(dev); 829 + 830 + if (!fd_dev->fd_prot_file) 831 + return; 832 + 833 + filp_close(fd_dev->fd_prot_file, NULL); 834 + fd_dev->fd_prot_file = NULL; 835 + } 836 + 837 static struct sbc_ops fd_sbc_ops = { 838 .execute_rw = fd_execute_rw, 839 .execute_sync_cache = fd_execute_sync_cache, ··· 730 .show_configfs_dev_params = fd_show_configfs_dev_params, 731 .get_device_type = sbc_get_device_type, 732 .get_blocks = fd_get_blocks, 733 + .init_prot = fd_init_prot, 734 + .format_prot = fd_format_prot, 735 + .free_prot = fd_free_prot, 736 }; 737 738 static int __init fileio_module_init(void)
+9
drivers/target/target_core_file.h
··· 4 #define FD_VERSION "4.0" 5 6 #define FD_MAX_DEV_NAME 256 7 #define FD_DEVICE_QUEUE_DEPTH 32 8 #define FD_MAX_DEVICE_QUEUE_DEPTH 128 9 #define FD_BLOCKSIZE 512 ··· 19 #define FBDF_HAS_PATH 0x01 20 #define FBDF_HAS_SIZE 0x02 21 #define FDBD_HAS_BUFFERED_IO_WCE 0x04 22 23 struct fd_dev { 24 struct se_device dev; ··· 40 u32 fd_block_size; 41 unsigned long long fd_dev_size; 42 struct file *fd_file; 43 /* FILEIO HBA device is connected to */ 44 struct fd_host *fd_host; 45 } ____cacheline_aligned;
··· 4 #define FD_VERSION "4.0" 5 6 #define FD_MAX_DEV_NAME 256 7 + #define FD_MAX_DEV_PROT_NAME FD_MAX_DEV_NAME + 16 8 #define FD_DEVICE_QUEUE_DEPTH 32 9 #define FD_MAX_DEVICE_QUEUE_DEPTH 128 10 #define FD_BLOCKSIZE 512 ··· 18 #define FBDF_HAS_PATH 0x01 19 #define FBDF_HAS_SIZE 0x02 20 #define FDBD_HAS_BUFFERED_IO_WCE 0x04 21 + #define FDBD_FORMAT_UNIT_SIZE 2048 22 + 23 + struct fd_prot { 24 + unsigned char *prot_buf; 25 + struct scatterlist *prot_sg; 26 + u32 prot_sg_nents; 27 + }; 28 29 struct fd_dev { 30 struct se_device dev; ··· 32 u32 fd_block_size; 33 unsigned long long fd_dev_size; 34 struct file *fd_file; 35 + struct file *fd_prot_file; 36 /* FILEIO HBA device is connected to */ 37 struct fd_host *fd_host; 38 } ____cacheline_aligned;
+90 -3
drivers/target/target_core_iblock.c
··· 91 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 92 struct request_queue *q; 93 struct block_device *bd = NULL; 94 fmode_t mode; 95 int ret = -ENOMEM; 96 ··· 156 if (blk_queue_nonrot(q)) 157 dev->dev_attrib.is_nonrot = 1; 158 159 return 0; 160 161 out_free_bioset: 162 bioset_free(ib_dev->ibd_bio_set); 163 ib_dev->ibd_bio_set = NULL; ··· 203 204 if (ib_dev->ibd_bd != NULL) 205 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 206 - if (ib_dev->ibd_bio_set != NULL) 207 bioset_free(ib_dev->ibd_bio_set); 208 kfree(ib_dev); 209 } 210 ··· 621 return bl; 622 } 623 624 static sense_reason_t 625 iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 626 enum dma_data_direction data_direction) 627 { 628 struct se_device *dev = cmd->se_dev; 629 struct iblock_req *ibr; 630 - struct bio *bio; 631 struct bio_list list; 632 struct scatterlist *sg; 633 u32 sg_num = sgl_nents; ··· 735 if (!bio) 736 goto fail_free_ibr; 737 738 bio_list_init(&list); 739 bio_list_add(&list, bio); 740 ··· 767 /* Always in 512 byte units for Linux/Block */ 768 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 769 sg_num--; 770 } 771 772 iblock_submit_bios(&list, rw); ··· 850 return sbc_parse_cdb(cmd, &iblock_sbc_ops); 851 } 852 853 - bool iblock_get_write_cache(struct se_device *dev) 854 { 855 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 856 struct block_device *bd = ib_dev->ibd_bd;
··· 91 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 92 struct request_queue *q; 93 struct block_device *bd = NULL; 94 + struct blk_integrity *bi; 95 fmode_t mode; 96 int ret = -ENOMEM; 97 ··· 155 if (blk_queue_nonrot(q)) 156 dev->dev_attrib.is_nonrot = 1; 157 158 + bi = bdev_get_integrity(bd); 159 + if (bi) { 160 + struct bio_set *bs = ib_dev->ibd_bio_set; 161 + 162 + if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") || 163 + !strcmp(bi->name, "T10-DIF-TYPE1-IP")) { 164 + pr_err("IBLOCK export of blk_integrity: %s not" 165 + " supported\n", bi->name); 166 + ret = -ENOSYS; 167 + goto out_blkdev_put; 168 + } 169 + 170 + if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) { 171 + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; 172 + } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) { 173 + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT; 174 + } 175 + 176 + if (dev->dev_attrib.pi_prot_type) { 177 + if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) { 178 + pr_err("Unable to allocate bioset for PI\n"); 179 + ret = -ENOMEM; 180 + goto out_blkdev_put; 181 + } 182 + pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n", 183 + bs->bio_integrity_pool); 184 + } 185 + dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type; 186 + } 187 + 188 return 0; 189 190 + out_blkdev_put: 191 + blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 192 out_free_bioset: 193 bioset_free(ib_dev->ibd_bio_set); 194 ib_dev->ibd_bio_set = NULL; ··· 170 171 if (ib_dev->ibd_bd != NULL) 172 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 173 + if (ib_dev->ibd_bio_set != NULL) { 174 + bioset_integrity_free(ib_dev->ibd_bio_set); 175 bioset_free(ib_dev->ibd_bio_set); 176 + } 177 kfree(ib_dev); 178 } 179 ··· 586 return bl; 587 } 588 589 + static int 590 + iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio) 591 + { 592 + struct se_device *dev = cmd->se_dev; 593 + struct blk_integrity *bi; 594 + struct bio_integrity_payload *bip; 595 + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 596 + struct scatterlist *sg; 597 + int i, rc; 598 + 599 + bi = bdev_get_integrity(ib_dev->ibd_bd); 600 + if (!bi) { 601 + pr_err("Unable to locate bio_integrity\n"); 602 + return -ENODEV; 603 + } 604 + 605 + bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents); 606 + if (!bip) { 607 + pr_err("Unable to allocate bio_integrity_payload\n"); 608 + return -ENOMEM; 609 + } 610 + 611 + bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) * 612 + dev->prot_length; 613 + bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; 614 + 615 + pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size, 616 + (unsigned long long)bip->bip_iter.bi_sector); 617 + 618 + for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) { 619 + 620 + rc = bio_integrity_add_page(bio, sg_page(sg), sg->length, 621 + sg->offset); 622 + if (rc != sg->length) { 623 + pr_err("bio_integrity_add_page() failed; %d\n", rc); 624 + return -ENOMEM; 625 + } 626 + 627 + pr_debug("Added bio integrity page: %p length: %d offset; %d\n", 628 + sg_page(sg), sg->length, sg->offset); 629 + } 630 + 631 + return 0; 632 + } 633 + 634 static sense_reason_t 635 iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 636 enum dma_data_direction data_direction) 637 { 638 struct se_device *dev = cmd->se_dev; 639 struct iblock_req *ibr; 640 + struct bio *bio, *bio_start; 641 struct bio_list list; 642 struct scatterlist *sg; 643 u32 sg_num = sgl_nents; ··· 655 if (!bio) 656 goto fail_free_ibr; 657 658 + bio_start = bio; 659 bio_list_init(&list); 660 bio_list_add(&list, bio); 661 ··· 686 /* Always in 512 byte units for Linux/Block */ 687 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 688 sg_num--; 689 + } 690 + 691 + if (cmd->prot_type) { 692 + int rc = iblock_alloc_bip(cmd, bio_start); 693 + if (rc) 694 + goto fail_put_bios; 695 } 696 697 iblock_submit_bios(&list, rw); ··· 763 return sbc_parse_cdb(cmd, &iblock_sbc_ops); 764 } 765 766 + static bool iblock_get_write_cache(struct se_device *dev) 767 { 768 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 769 struct block_device *bd = ib_dev->ibd_bd;
+5 -3
drivers/target/target_core_internal.h
··· 35 int se_dev_set_emulate_tpws(struct se_device *, int); 36 int se_dev_set_emulate_caw(struct se_device *, int); 37 int se_dev_set_emulate_3pc(struct se_device *, int); 38 int se_dev_set_enforce_pr_isids(struct se_device *, int); 39 int se_dev_set_is_nonrot(struct se_device *, int); 40 int se_dev_set_emulate_rest_reord(struct se_device *dev, int); ··· 79 const char *); 80 void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *); 81 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); 82 - struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32); 83 - int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, 84 - u32, void *); 85 struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun); 86 int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); 87
··· 35 int se_dev_set_emulate_tpws(struct se_device *, int); 36 int se_dev_set_emulate_caw(struct se_device *, int); 37 int se_dev_set_emulate_3pc(struct se_device *, int); 38 + int se_dev_set_pi_prot_type(struct se_device *, int); 39 + int se_dev_set_pi_prot_format(struct se_device *, int); 40 int se_dev_set_enforce_pr_isids(struct se_device *, int); 41 int se_dev_set_is_nonrot(struct se_device *, int); 42 int se_dev_set_emulate_rest_reord(struct se_device *dev, int); ··· 77 const char *); 78 void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *); 79 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); 80 + struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32); 81 + int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, 82 + u32, struct se_device *); 83 struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun); 84 int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); 85
+5
drivers/target/target_core_pr.h
··· 43 #define PR_APTPL_MAX_IPORT_LEN 256 44 #define PR_APTPL_MAX_TPORT_LEN 256 45 46 extern struct kmem_cache *t10_pr_reg_cache; 47 48 extern void core_pr_dump_initiator_port(struct t10_pr_registration *,
··· 43 #define PR_APTPL_MAX_IPORT_LEN 256 44 #define PR_APTPL_MAX_TPORT_LEN 256 45 46 + /* 47 + * Function defined in target_core_spc.c 48 + */ 49 + void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *); 50 + 51 extern struct kmem_cache *t10_pr_reg_cache; 52 53 extern void core_pr_dump_initiator_port(struct t10_pr_registration *,
+207 -45
drivers/target/target_core_rd.c
··· 78 hba->hba_ptr = NULL; 79 } 80 81 - /* rd_release_device_space(): 82 - * 83 - * 84 - */ 85 - static void rd_release_device_space(struct rd_dev *rd_dev) 86 { 87 - u32 i, j, page_count = 0, sg_per_table; 88 - struct rd_dev_sg_table *sg_table; 89 struct page *pg; 90 struct scatterlist *sg; 91 92 - if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) 93 - return; 94 - 95 - sg_table = rd_dev->sg_table_array; 96 - 97 - for (i = 0; i < rd_dev->sg_table_count; i++) { 98 sg = sg_table[i].sg_table; 99 sg_per_table = sg_table[i].rd_sg_count; 100 ··· 96 page_count++; 97 } 98 } 99 - 100 kfree(sg); 101 } 102 103 pr_debug("CORE_RD[%u] - Released device space for Ramdisk" 104 " Device ID: %u, pages %u in %u tables total bytes %lu\n", 105 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, 106 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); 107 108 - kfree(sg_table); 109 rd_dev->sg_table_array = NULL; 110 rd_dev->sg_table_count = 0; 111 } ··· 127 * 128 * 129 */ 130 - static int rd_build_device_space(struct rd_dev *rd_dev) 131 { 132 - u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed; 133 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / 134 sizeof(struct scatterlist)); 135 - struct rd_dev_sg_table *sg_table; 136 struct page *pg; 137 struct scatterlist *sg; 138 - 139 - if (rd_dev->rd_page_count <= 0) { 140 - pr_err("Illegal page count: %u for Ramdisk device\n", 141 - rd_dev->rd_page_count); 142 - return -EINVAL; 143 - } 144 - 145 - /* Don't need backing pages for NULLIO */ 146 - if (rd_dev->rd_flags & RDF_NULLIO) 147 - return 0; 148 - 149 - total_sg_needed = rd_dev->rd_page_count; 150 - 151 - sg_tables = (total_sg_needed / max_sg_per_table) + 1; 152 - 153 - sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); 154 - if (!sg_table) { 155 - pr_err("Unable to allocate memory for Ramdisk" 156 - " scatterlist tables\n"); 157 - return -ENOMEM; 158 - } 159 - 160 - rd_dev->sg_table_array = sg_table; 161 - rd_dev->sg_table_count = sg_tables; 162 163 while (total_sg_needed) { 164 sg_per_table = (total_sg_needed > max_sg_per_table) ? ··· 166 } 167 sg_assign_page(&sg[j], pg); 168 sg[j].length = PAGE_SIZE; 169 } 170 171 page_offset += sg_per_table; 172 total_sg_needed -= sg_per_table; 173 } 174 175 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" 176 - " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, 177 - rd_dev->rd_dev_id, rd_dev->rd_page_count, 178 - rd_dev->sg_table_count); 179 180 return 0; 181 } ··· 356 return NULL; 357 } 358 359 static sense_reason_t 360 rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 361 enum dma_data_direction data_direction) ··· 390 u32 rd_page; 391 u32 src_len; 392 u64 tmp; 393 394 if (dev->rd_flags & RDF_NULLIO) { 395 target_complete_cmd(cmd, SAM_STAT_GOOD); ··· 412 dev->rd_dev_id, 413 data_direction == DMA_FROM_DEVICE ? "Read" : "Write", 414 cmd->t_task_lba, rd_size, rd_page, rd_offset); 415 416 src_len = PAGE_SIZE - rd_offset; 417 sg_miter_start(&m, sgl, sgl_nents, ··· 495 rd_sg = table->sg_table; 496 } 497 sg_miter_stop(&m); 498 499 target_complete_cmd(cmd, SAM_STAT_GOOD); 500 return 0; ··· 599 return blocks_long; 600 } 601 602 static struct sbc_ops rd_sbc_ops = { 603 .execute_rw = rd_execute_rw, 604 }; ··· 641 .show_configfs_dev_params = rd_show_configfs_dev_params, 642 .get_device_type = sbc_get_device_type, 643 .get_blocks = rd_get_blocks, 644 }; 645 646 int __init rd_module_init(void)
··· 78 hba->hba_ptr = NULL; 79 } 80 81 + static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, 82 + u32 sg_table_count) 83 { 84 struct page *pg; 85 struct scatterlist *sg; 86 + u32 i, j, page_count = 0, sg_per_table; 87 88 + for (i = 0; i < sg_table_count; i++) { 89 sg = sg_table[i].sg_table; 90 sg_per_table = sg_table[i].rd_sg_count; 91 ··· 105 page_count++; 106 } 107 } 108 kfree(sg); 109 } 110 + 111 + kfree(sg_table); 112 + return page_count; 113 + } 114 + 115 + static void rd_release_device_space(struct rd_dev *rd_dev) 116 + { 117 + u32 page_count; 118 + 119 + if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) 120 + return; 121 + 122 + page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, 123 + rd_dev->sg_table_count); 124 125 pr_debug("CORE_RD[%u] - Released device space for Ramdisk" 126 " Device ID: %u, pages %u in %u tables total bytes %lu\n", 127 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, 128 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); 129 130 rd_dev->sg_table_array = NULL; 131 rd_dev->sg_table_count = 0; 132 } ··· 124 * 125 * 126 */ 127 + static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, 128 + u32 total_sg_needed, unsigned char init_payload) 129 { 130 + u32 i = 0, j, page_offset = 0, sg_per_table; 131 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / 132 sizeof(struct scatterlist)); 133 struct page *pg; 134 struct scatterlist *sg; 135 + unsigned char *p; 136 137 while (total_sg_needed) { 138 sg_per_table = (total_sg_needed > max_sg_per_table) ? ··· 186 } 187 sg_assign_page(&sg[j], pg); 188 sg[j].length = PAGE_SIZE; 189 + 190 + p = kmap(pg); 191 + memset(p, init_payload, PAGE_SIZE); 192 + kunmap(pg); 193 } 194 195 page_offset += sg_per_table; 196 total_sg_needed -= sg_per_table; 197 } 198 199 + return 0; 200 + } 201 + 202 + static int rd_build_device_space(struct rd_dev *rd_dev) 203 + { 204 + struct rd_dev_sg_table *sg_table; 205 + u32 sg_tables, total_sg_needed; 206 + u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / 207 + sizeof(struct scatterlist)); 208 + int rc; 209 + 210 + if (rd_dev->rd_page_count <= 0) { 211 + pr_err("Illegal page count: %u for Ramdisk device\n", 212 + rd_dev->rd_page_count); 213 + return -EINVAL; 214 + } 215 + 216 + /* Don't need backing pages for NULLIO */ 217 + if (rd_dev->rd_flags & RDF_NULLIO) 218 + return 0; 219 + 220 + total_sg_needed = rd_dev->rd_page_count; 221 + 222 + sg_tables = (total_sg_needed / max_sg_per_table) + 1; 223 + 224 + sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); 225 + if (!sg_table) { 226 + pr_err("Unable to allocate memory for Ramdisk" 227 + " scatterlist tables\n"); 228 + return -ENOMEM; 229 + } 230 + 231 + rd_dev->sg_table_array = sg_table; 232 + rd_dev->sg_table_count = sg_tables; 233 + 234 + rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00); 235 + if (rc) 236 + return rc; 237 + 238 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" 239 + " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, 240 + rd_dev->rd_dev_id, rd_dev->rd_page_count, 241 + rd_dev->sg_table_count); 242 + 243 + return 0; 244 + } 245 + 246 + static void rd_release_prot_space(struct rd_dev *rd_dev) 247 + { 248 + u32 page_count; 249 + 250 + if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count) 251 + return; 252 + 253 + page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, 254 + rd_dev->sg_prot_count); 255 + 256 + pr_debug("CORE_RD[%u] - Released protection space for Ramdisk" 257 + " Device ID: %u, pages %u in %u tables total bytes %lu\n", 258 + rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, 259 + rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); 260 + 261 + rd_dev->sg_prot_array = NULL; 262 + rd_dev->sg_prot_count = 0; 263 + } 264 + 265 + static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length) 266 + { 267 + struct rd_dev_sg_table *sg_table; 268 + u32 total_sg_needed, sg_tables; 269 + u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / 270 + sizeof(struct scatterlist)); 271 + int rc; 272 + 273 + if (rd_dev->rd_flags & RDF_NULLIO) 274 + return 0; 275 + 276 + total_sg_needed = rd_dev->rd_page_count / prot_length; 277 + 278 + sg_tables = (total_sg_needed / max_sg_per_table) + 1; 279 + 280 + sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); 281 + if (!sg_table) { 282 + pr_err("Unable to allocate memory for Ramdisk protection" 283 + " scatterlist tables\n"); 284 + return -ENOMEM; 285 + } 286 + 287 + rd_dev->sg_prot_array = sg_table; 288 + rd_dev->sg_prot_count = sg_tables; 289 + 290 + rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff); 291 + if (rc) 292 + return rc; 293 + 294 + pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of" 295 + " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, 296 + rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count); 297 298 return 0; 299 } ··· 278 return NULL; 279 } 280 281 + static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page) 282 + { 283 + struct rd_dev_sg_table *sg_table; 284 + u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / 285 + sizeof(struct scatterlist)); 286 + 287 + i = page / sg_per_table; 288 + if (i < rd_dev->sg_prot_count) { 289 + sg_table = &rd_dev->sg_prot_array[i]; 290 + if ((sg_table->page_start_offset <= page) && 291 + (sg_table->page_end_offset >= page)) 292 + return sg_table; 293 + } 294 + 295 + pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n", 296 + page); 297 + 298 + return NULL; 299 + } 300 + 301 static sense_reason_t 302 rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 303 enum dma_data_direction data_direction) ··· 292 u32 rd_page; 293 u32 src_len; 294 u64 tmp; 295 + sense_reason_t rc; 296 297 if (dev->rd_flags & RDF_NULLIO) { 298 target_complete_cmd(cmd, SAM_STAT_GOOD); ··· 313 dev->rd_dev_id, 314 data_direction == DMA_FROM_DEVICE ? "Read" : "Write", 315 cmd->t_task_lba, rd_size, rd_page, rd_offset); 316 + 317 + if (cmd->prot_type && data_direction == DMA_TO_DEVICE) { 318 + struct rd_dev_sg_table *prot_table; 319 + struct scatterlist *prot_sg; 320 + u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; 321 + u32 prot_offset, prot_page; 322 + 323 + tmp = cmd->t_task_lba * se_dev->prot_length; 324 + prot_offset = do_div(tmp, PAGE_SIZE); 325 + prot_page = tmp; 326 + 327 + prot_table = rd_get_prot_table(dev, prot_page); 328 + if (!prot_table) 329 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 330 + 331 + prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; 332 + 333 + rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0, 334 + prot_sg, prot_offset); 335 + if (rc) 336 + return rc; 337 + } 338 339 src_len = PAGE_SIZE - rd_offset; 340 sg_miter_start(&m, sgl, sgl_nents, ··· 374 rd_sg = table->sg_table; 375 } 376 sg_miter_stop(&m); 377 + 378 + if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) { 379 + struct rd_dev_sg_table *prot_table; 380 + struct scatterlist *prot_sg; 381 + u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; 382 + u32 prot_offset, prot_page; 383 + 384 + tmp = cmd->t_task_lba * se_dev->prot_length; 385 + prot_offset = do_div(tmp, PAGE_SIZE); 386 + prot_page = tmp; 387 + 388 + prot_table = rd_get_prot_table(dev, prot_page); 389 + if (!prot_table) 390 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 391 + 392 + prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; 393 + 394 + rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, 395 + prot_sg, prot_offset); 396 + if (rc) 397 + return rc; 398 + } 399 400 target_complete_cmd(cmd, SAM_STAT_GOOD); 401 return 0; ··· 456 return blocks_long; 457 } 458 459 + static int rd_init_prot(struct se_device *dev) 460 + { 461 + struct rd_dev *rd_dev = RD_DEV(dev); 462 + 463 + if (!dev->dev_attrib.pi_prot_type) 464 + return 0; 465 + 466 + return rd_build_prot_space(rd_dev, dev->prot_length); 467 + } 468 + 469 + static void rd_free_prot(struct se_device *dev) 470 + { 471 + struct rd_dev *rd_dev = RD_DEV(dev); 472 + 473 + rd_release_prot_space(rd_dev); 474 + } 475 + 476 static struct sbc_ops rd_sbc_ops = { 477 .execute_rw = rd_execute_rw, 478 }; ··· 481 .show_configfs_dev_params = rd_show_configfs_dev_params, 482 .get_device_type = sbc_get_device_type, 483 .get_blocks = rd_get_blocks, 484 + .init_prot = rd_init_prot, 485 + .free_prot = rd_free_prot, 486 }; 487 488 int __init rd_module_init(void)
+4
drivers/target/target_core_rd.h
··· 33 u32 rd_page_count; 34 /* Number of SG tables in sg_table_array */ 35 u32 sg_table_count; 36 /* Array of rd_dev_sg_table_t containing scatterlists */ 37 struct rd_dev_sg_table *sg_table_array; 38 /* Ramdisk HBA device is connected to */ 39 struct rd_host *rd_host; 40 } ____cacheline_aligned;
··· 33 u32 rd_page_count; 34 /* Number of SG tables in sg_table_array */ 35 u32 sg_table_count; 36 + /* Number of SG tables in sg_prot_array */ 37 + u32 sg_prot_count; 38 /* Array of rd_dev_sg_table_t containing scatterlists */ 39 struct rd_dev_sg_table *sg_table_array; 40 + /* Array of rd_dev_sg_table containing protection scatterlists */ 41 + struct rd_dev_sg_table *sg_prot_array; 42 /* Ramdisk HBA device is connected to */ 43 struct rd_host *rd_host; 44 } ____cacheline_aligned;
+251 -1
drivers/target/target_core_sbc.c
··· 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/ratelimit.h> 26 #include <asm/unaligned.h> 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_tcq.h> ··· 34 35 #include "target_core_internal.h" 36 #include "target_core_ua.h" 37 - 38 39 static sense_reason_t 40 sbc_emulate_readcapacity(struct se_cmd *cmd) ··· 106 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 107 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 108 buf[11] = dev->dev_attrib.block_size & 0xff; 109 110 if (dev->transport->get_lbppbe) 111 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; ··· 569 return TCM_NO_SENSE; 570 } 571 572 sense_reason_t 573 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 574 { ··· 627 case READ_10: 628 sectors = transport_get_sectors_10(cdb); 629 cmd->t_task_lba = transport_lba_32(cdb); 630 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 631 cmd->execute_rw = ops->execute_rw; 632 cmd->execute_cmd = sbc_execute_rw; ··· 638 case READ_12: 639 sectors = transport_get_sectors_12(cdb); 640 cmd->t_task_lba = transport_lba_32(cdb); 641 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 642 cmd->execute_rw = ops->execute_rw; 643 cmd->execute_cmd = sbc_execute_rw; ··· 649 case READ_16: 650 sectors = transport_get_sectors_16(cdb); 651 cmd->t_task_lba = transport_lba_64(cdb); 652 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 653 cmd->execute_rw = ops->execute_rw; 654 cmd->execute_cmd = sbc_execute_rw; ··· 668 case WRITE_VERIFY: 669 sectors = transport_get_sectors_10(cdb); 670 cmd->t_task_lba = transport_lba_32(cdb); 671 if (cdb[1] & 0x8) 672 cmd->se_cmd_flags |= SCF_FUA; 673 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; ··· 681 case WRITE_12: 682 sectors = transport_get_sectors_12(cdb); 683 cmd->t_task_lba = transport_lba_32(cdb); 684 if (cdb[1] & 0x8) 685 cmd->se_cmd_flags |= SCF_FUA; 686 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; ··· 694 case WRITE_16: 695 sectors = transport_get_sectors_16(cdb); 696 cmd->t_task_lba = transport_lba_64(cdb); 697 if (cdb[1] & 0x8) 698 cmd->se_cmd_flags |= SCF_FUA; 699 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; ··· 798 switch (cmd->t_task_cdb[1] & 0x1f) { 799 case SAI_READ_CAPACITY_16: 800 cmd->execute_cmd = sbc_emulate_readcapacity_16; 801 break; 802 default: 803 pr_err("Unsupported SA: 0x%02x\n", ··· 1030 return ret; 1031 } 1032 EXPORT_SYMBOL(sbc_execute_unmap);
··· 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/ratelimit.h> 26 + #include <linux/crc-t10dif.h> 27 #include <asm/unaligned.h> 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_tcq.h> ··· 33 34 #include "target_core_internal.h" 35 #include "target_core_ua.h" 36 + #include "target_core_alua.h" 37 38 static sense_reason_t 39 sbc_emulate_readcapacity(struct se_cmd *cmd) ··· 105 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 106 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 107 buf[11] = dev->dev_attrib.block_size & 0xff; 108 + /* 109 + * Set P_TYPE and PROT_EN bits for DIF support 110 + */ 111 + if (dev->dev_attrib.pi_prot_type) 112 + buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; 113 114 if (dev->transport->get_lbppbe) 115 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; ··· 563 return TCM_NO_SENSE; 564 } 565 566 + static bool 567 + sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 568 + u32 sectors) 569 + { 570 + if (!cmd->t_prot_sg || !cmd->t_prot_nents) 571 + return true; 572 + 573 + switch (dev->dev_attrib.pi_prot_type) { 574 + case TARGET_DIF_TYPE3_PROT: 575 + if (!(cdb[1] & 0xe0)) 576 + return true; 577 + 578 + cmd->reftag_seed = 0xffffffff; 579 + break; 580 + case TARGET_DIF_TYPE2_PROT: 581 + if (cdb[1] & 0xe0) 582 + return false; 583 + 584 + cmd->reftag_seed = cmd->t_task_lba; 585 + break; 586 + case TARGET_DIF_TYPE1_PROT: 587 + if (!(cdb[1] & 0xe0)) 588 + return true; 589 + 590 + cmd->reftag_seed = cmd->t_task_lba; 591 + break; 592 + case TARGET_DIF_TYPE0_PROT: 593 + default: 594 + return true; 595 + } 596 + 597 + cmd->prot_type = dev->dev_attrib.pi_prot_type; 598 + cmd->prot_length = dev->prot_length * sectors; 599 + cmd->prot_handover = PROT_SEPERATED; 600 + 601 + return true; 602 + } 603 + 604 sense_reason_t 605 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 606 { ··· 583 case READ_10: 584 sectors = transport_get_sectors_10(cdb); 585 cmd->t_task_lba = transport_lba_32(cdb); 586 + 587 + if (!sbc_check_prot(dev, cmd, cdb, sectors)) 588 + return TCM_UNSUPPORTED_SCSI_OPCODE; 589 + 590 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 591 cmd->execute_rw = ops->execute_rw; 592 cmd->execute_cmd = sbc_execute_rw; ··· 590 case READ_12: 591 sectors = transport_get_sectors_12(cdb); 592 cmd->t_task_lba = transport_lba_32(cdb); 593 + 594 + if (!sbc_check_prot(dev, cmd, cdb, sectors)) 595 + return TCM_UNSUPPORTED_SCSI_OPCODE; 596 + 597 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 598 cmd->execute_rw = ops->execute_rw; 599 cmd->execute_cmd = sbc_execute_rw; ··· 597 case READ_16: 598 sectors = transport_get_sectors_16(cdb); 599 cmd->t_task_lba = transport_lba_64(cdb); 600 + 601 + if (!sbc_check_prot(dev, cmd, cdb, sectors)) 602 + return TCM_UNSUPPORTED_SCSI_OPCODE; 603 + 604 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 605 cmd->execute_rw = ops->execute_rw; 606 cmd->execute_cmd = sbc_execute_rw; ··· 612 case WRITE_VERIFY: 613 sectors = transport_get_sectors_10(cdb); 614 cmd->t_task_lba = transport_lba_32(cdb); 615 + 616 + if (!sbc_check_prot(dev, cmd, cdb, sectors)) 617 + return TCM_UNSUPPORTED_SCSI_OPCODE; 618 + 619 if (cdb[1] & 0x8) 620 cmd->se_cmd_flags |= SCF_FUA; 621 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; ··· 621 case WRITE_12: 622 sectors = transport_get_sectors_12(cdb); 623 cmd->t_task_lba = transport_lba_32(cdb); 624 + 625 + if (!sbc_check_prot(dev, cmd, cdb, sectors)) 626 + return TCM_UNSUPPORTED_SCSI_OPCODE; 627 + 628 if (cdb[1] & 0x8) 629 cmd->se_cmd_flags |= SCF_FUA; 630 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; ··· 630 case WRITE_16: 631 sectors = transport_get_sectors_16(cdb); 632 cmd->t_task_lba = transport_lba_64(cdb); 633 + 634 + if (!sbc_check_prot(dev, cmd, cdb, sectors)) 635 + return TCM_UNSUPPORTED_SCSI_OPCODE; 636 + 637 if (cdb[1] & 0x8) 638 cmd->se_cmd_flags |= SCF_FUA; 639 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; ··· 730 switch (cmd->t_task_cdb[1] & 0x1f) { 731 case SAI_READ_CAPACITY_16: 732 cmd->execute_cmd = sbc_emulate_readcapacity_16; 733 + break; 734 + case SAI_REPORT_REFERRALS: 735 + cmd->execute_cmd = target_emulate_report_referrals; 736 break; 737 default: 738 pr_err("Unsupported SA: 0x%02x\n", ··· 959 return ret; 960 } 961 EXPORT_SYMBOL(sbc_execute_unmap); 962 + 963 + static sense_reason_t 964 + sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, 965 + const void *p, sector_t sector, unsigned int ei_lba) 966 + { 967 + int block_size = dev->dev_attrib.block_size; 968 + __be16 csum; 969 + 970 + csum = cpu_to_be16(crc_t10dif(p, block_size)); 971 + 972 + if (sdt->guard_tag != csum) { 973 + pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" 974 + " csum 0x%04x\n", (unsigned long long)sector, 975 + be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 976 + return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 977 + } 978 + 979 + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT && 980 + be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 981 + pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" 982 + " sector MSB: 0x%08x\n", (unsigned long long)sector, 983 + be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); 984 + return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 985 + } 986 + 987 + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT && 988 + be32_to_cpu(sdt->ref_tag) != ei_lba) { 989 + pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" 990 + " ei_lba: 0x%08x\n", (unsigned long long)sector, 991 + be32_to_cpu(sdt->ref_tag), ei_lba); 992 + return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 993 + } 994 + 995 + return 0; 996 + } 997 + 998 + static void 999 + sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, 1000 + struct scatterlist *sg, int sg_off) 1001 + { 1002 + struct se_device *dev = cmd->se_dev; 1003 + struct scatterlist *psg; 1004 + void *paddr, *addr; 1005 + unsigned int i, len, left; 1006 + 1007 + left = sectors * dev->prot_length; 1008 + 1009 + for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1010 + 1011 + len = min(psg->length, left); 1012 + paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1013 + addr = kmap_atomic(sg_page(sg)) + sg_off; 1014 + 1015 + if (read) 1016 + memcpy(paddr, addr, len); 1017 + else 1018 + memcpy(addr, paddr, len); 1019 + 1020 + left -= len; 1021 + kunmap_atomic(paddr); 1022 + kunmap_atomic(addr); 1023 + } 1024 + } 1025 + 1026 + sense_reason_t 1027 + sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1028 + unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1029 + { 1030 + struct se_device *dev = cmd->se_dev; 1031 + struct se_dif_v1_tuple *sdt; 1032 + struct scatterlist *dsg, *psg = cmd->t_prot_sg; 1033 + sector_t sector = start; 1034 + void *daddr, *paddr; 1035 + int i, j, offset = 0; 1036 + sense_reason_t rc; 1037 + 1038 + for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1039 + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1040 + paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1041 + 1042 + for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1043 + 1044 + if (offset >= psg->length) { 1045 + kunmap_atomic(paddr); 1046 + psg = sg_next(psg); 1047 + paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1048 + offset = 0; 1049 + } 1050 + 1051 + sdt = paddr + offset; 1052 + 1053 + pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x" 1054 + " app_tag: 0x%04x ref_tag: %u\n", 1055 + (unsigned long long)sector, sdt->guard_tag, 1056 + sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1057 + 1058 + rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 1059 + ei_lba); 1060 + if (rc) { 1061 + kunmap_atomic(paddr); 1062 + kunmap_atomic(daddr); 1063 + cmd->bad_sector = sector; 1064 + return rc; 1065 + } 1066 + 1067 + sector++; 1068 + ei_lba++; 1069 + offset += sizeof(struct se_dif_v1_tuple); 1070 + } 1071 + 1072 + kunmap_atomic(paddr); 1073 + kunmap_atomic(daddr); 1074 + } 1075 + sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); 1076 + 1077 + return 0; 1078 + } 1079 + EXPORT_SYMBOL(sbc_dif_verify_write); 1080 + 1081 + sense_reason_t 1082 + sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1083 + unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1084 + { 1085 + struct se_device *dev = cmd->se_dev; 1086 + struct se_dif_v1_tuple *sdt; 1087 + struct scatterlist *dsg; 1088 + sector_t sector = start; 1089 + void *daddr, *paddr; 1090 + int i, j, offset = sg_off; 1091 + sense_reason_t rc; 1092 + 1093 + for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1094 + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1095 + paddr = kmap_atomic(sg_page(sg)) + sg->offset; 1096 + 1097 + for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1098 + 1099 + if (offset >= sg->length) { 1100 + kunmap_atomic(paddr); 1101 + sg = sg_next(sg); 1102 + paddr = kmap_atomic(sg_page(sg)) + sg->offset; 1103 + offset = 0; 1104 + } 1105 + 1106 + sdt = paddr + offset; 1107 + 1108 + pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" 1109 + " app_tag: 0x%04x ref_tag: %u\n", 1110 + (unsigned long long)sector, sdt->guard_tag, 1111 + sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1112 + 1113 + if (sdt->app_tag == cpu_to_be16(0xffff)) { 1114 + sector++; 1115 + offset += sizeof(struct se_dif_v1_tuple); 1116 + continue; 1117 + } 1118 + 1119 + rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 1120 + ei_lba); 1121 + if (rc) { 1122 + kunmap_atomic(paddr); 1123 + kunmap_atomic(daddr); 1124 + cmd->bad_sector = sector; 1125 + return rc; 1126 + } 1127 + 1128 + sector++; 1129 + ei_lba++; 1130 + offset += sizeof(struct se_dif_v1_tuple); 1131 + } 1132 + 1133 + kunmap_atomic(paddr); 1134 + kunmap_atomic(daddr); 1135 + } 1136 + sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); 1137 + 1138 + return 0; 1139 + } 1140 + EXPORT_SYMBOL(sbc_dif_verify_read);
+100 -14
drivers/target/target_core_spc.c
··· 100 */ 101 if (dev->dev_attrib.emulate_3pc) 102 buf[5] |= 0x8; 103 104 buf[7] = 0x2; /* CmdQue=1 */ 105 ··· 272 port = lun->lun_sep; 273 if (port) { 274 struct t10_alua_lu_gp *lu_gp; 275 - u32 padding, scsi_name_len; 276 u16 lu_gp_id = 0; 277 u16 tg_pt_gp_id = 0; 278 u16 tpgt; ··· 370 * section 7.5.1 Table 362 371 */ 372 check_scsi_name: 373 - scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg)); 374 - /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */ 375 - scsi_name_len += 10; 376 - /* Check for 4-byte padding */ 377 - padding = ((-scsi_name_len) & 3); 378 - if (padding != 0) 379 - scsi_name_len += padding; 380 - /* Header size + Designation descriptor */ 381 - scsi_name_len += 4; 382 - 383 buf[off] = 384 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 385 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ ··· 397 * shall be no larger than 256 and shall be a multiple 398 * of four. 399 */ 400 if (padding) 401 scsi_name_len += padding; 402 403 buf[off-1] = scsi_name_len; 404 off += scsi_name_len; 405 /* Header size + Designation descriptor */ 406 len += (scsi_name_len + 4); 407 } 408 buf[2] = ((len >> 8) & 0xff); 409 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ ··· 475 struct se_device *dev = cmd->se_dev; 476 477 buf[3] = 0x3c; 478 /* Set HEADSUP, ORDSUP, SIMPSUP */ 479 buf[5] = 0x07; 480 481 /* If WriteCache emulation is enabled, set V_SUP */ 482 if (spc_check_dev_wce(dev)) 483 buf[6] = 0x01; 484 return 0; 485 } 486 ··· 653 return 0; 654 } 655 656 static sense_reason_t 657 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); 658 ··· 681 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, 682 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, 683 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, 684 }; 685 686 /* supported vital product data pages */ ··· 711 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; 712 unsigned char *rbuf; 713 unsigned char *cdb = cmd->t_task_cdb; 714 - unsigned char buf[SE_INQUIRY_BUF]; 715 sense_reason_t ret; 716 int p; 717 718 - memset(buf, 0, SE_INQUIRY_BUF); 719 720 if (dev == tpg->tpg_virt_lun0.lun_se_dev) 721 buf[0] = 0x3f; /* Not connected */ ··· 752 out: 753 rbuf = transport_kmap_data_sg(cmd); 754 if (rbuf) { 755 - memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 756 transport_kunmap_data_sg(cmd); 757 } 758 759 if (!ret) 760 target_complete_cmd(cmd, GOOD); ··· 858 * status (see SAM-4). 859 */ 860 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; 861 p[8] = 0xff; 862 p[9] = 0xff; 863 p[11] = 30;
··· 100 */ 101 if (dev->dev_attrib.emulate_3pc) 102 buf[5] |= 0x8; 103 + /* 104 + * Set Protection (PROTECT) bit when DIF has been enabled. 105 + */ 106 + if (dev->dev_attrib.pi_prot_type) 107 + buf[5] |= 0x1; 108 109 buf[7] = 0x2; /* CmdQue=1 */ 110 ··· 267 port = lun->lun_sep; 268 if (port) { 269 struct t10_alua_lu_gp *lu_gp; 270 + u32 padding, scsi_name_len, scsi_target_len; 271 u16 lu_gp_id = 0; 272 u16 tg_pt_gp_id = 0; 273 u16 tpgt; ··· 365 * section 7.5.1 Table 362 366 */ 367 check_scsi_name: 368 buf[off] = 369 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 370 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ ··· 402 * shall be no larger than 256 and shall be a multiple 403 * of four. 404 */ 405 + padding = ((-scsi_name_len) & 3); 406 if (padding) 407 scsi_name_len += padding; 408 + if (scsi_name_len > 256) 409 + scsi_name_len = 256; 410 411 buf[off-1] = scsi_name_len; 412 off += scsi_name_len; 413 /* Header size + Designation descriptor */ 414 len += (scsi_name_len + 4); 415 + 416 + /* 417 + * Target device designator 418 + */ 419 + buf[off] = 420 + (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 421 + buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 422 + buf[off] = 0x80; /* Set PIV=1 */ 423 + /* Set ASSOCIATION == target device: 10b */ 424 + buf[off] |= 0x20; 425 + /* DESIGNATOR TYPE == SCSI name string */ 426 + buf[off++] |= 0x8; 427 + off += 2; /* Skip over Reserved and length */ 428 + /* 429 + * SCSI name string identifer containing, $FABRIC_MOD 430 + * dependent information. For LIO-Target and iSCSI 431 + * Target Port, this means "<iSCSI name>" in 432 + * UTF-8 encoding. 433 + */ 434 + scsi_target_len = sprintf(&buf[off], "%s", 435 + tpg->se_tpg_tfo->tpg_get_wwn(tpg)); 436 + scsi_target_len += 1 /* Include NULL terminator */; 437 + /* 438 + * The null-terminated, null-padded (see 4.4.2) SCSI 439 + * NAME STRING field contains a UTF-8 format string. 440 + * The number of bytes in the SCSI NAME STRING field 441 + * (i.e., the value in the DESIGNATOR LENGTH field) 442 + * shall be no larger than 256 and shall be a multiple 443 + * of four. 444 + */ 445 + padding = ((-scsi_target_len) & 3); 446 + if (padding) 447 + scsi_target_len += padding; 448 + if (scsi_name_len > 256) 449 + scsi_name_len = 256; 450 + 451 + buf[off-1] = scsi_target_len; 452 + off += scsi_target_len; 453 + 454 + /* Header size + Designation descriptor */ 455 + len += (scsi_target_len + 4); 456 } 457 buf[2] = ((len >> 8) & 0xff); 458 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ ··· 436 struct se_device *dev = cmd->se_dev; 437 438 buf[3] = 0x3c; 439 + /* 440 + * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK 441 + * only for TYPE3 protection. 442 + */ 443 + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) 444 + buf[4] = 0x5; 445 + else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT) 446 + buf[4] = 0x4; 447 + 448 /* Set HEADSUP, ORDSUP, SIMPSUP */ 449 buf[5] = 0x07; 450 451 /* If WriteCache emulation is enabled, set V_SUP */ 452 if (spc_check_dev_wce(dev)) 453 buf[6] = 0x01; 454 + /* If an LBA map is present set R_SUP */ 455 + spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); 456 + if (!list_empty(&dev->t10_alua.lba_map_list)) 457 + buf[8] = 0x10; 458 + spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock); 459 return 0; 460 } 461 ··· 600 return 0; 601 } 602 603 + /* Referrals VPD page */ 604 + static sense_reason_t 605 + spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf) 606 + { 607 + struct se_device *dev = cmd->se_dev; 608 + 609 + buf[0] = dev->transport->get_device_type(dev); 610 + buf[3] = 0x0c; 611 + put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]); 612 + put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]); 613 + 614 + return 0; 615 + } 616 + 617 static sense_reason_t 618 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); 619 ··· 614 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, 615 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, 616 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, 617 + { .page = 0xb3, .emulate = spc_emulate_evpd_b3 }, 618 }; 619 620 /* supported vital product data pages */ ··· 643 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; 644 unsigned char *rbuf; 645 unsigned char *cdb = cmd->t_task_cdb; 646 + unsigned char *buf; 647 sense_reason_t ret; 648 int p; 649 650 + buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); 651 + if (!buf) { 652 + pr_err("Unable to allocate response buffer for INQUIRY\n"); 653 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 654 + } 655 656 if (dev == tpg->tpg_virt_lun0.lun_se_dev) 657 buf[0] = 0x3f; /* Not connected */ ··· 680 out: 681 rbuf = transport_kmap_data_sg(cmd); 682 if (rbuf) { 683 + memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length)); 684 transport_kunmap_data_sg(cmd); 685 } 686 + kfree(buf); 687 688 if (!ret) 689 target_complete_cmd(cmd, GOOD); ··· 785 * status (see SAM-4). 786 */ 787 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; 788 + /* 789 + * From spc4r30, section 7.5.7 Control mode page 790 + * 791 + * Application Tag Owner (ATO) bit set to one. 792 + * 793 + * If the ATO bit is set to one the device server shall not modify the 794 + * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection 795 + * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE 796 + * TAG field. 797 + */ 798 + if (dev->dev_attrib.pi_prot_type) 799 + p[5] |= 0x80; 800 + 801 p[8] = 0xff; 802 p[9] = 0xff; 803 p[11] = 30;
+5 -5
drivers/target/target_core_tpg.c
··· 656 spin_lock_init(&lun->lun_sep_lock); 657 init_completion(&lun->lun_ref_comp); 658 659 - ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); 660 if (ret < 0) 661 return ret; 662 ··· 781 } 782 EXPORT_SYMBOL(core_tpg_deregister); 783 784 - struct se_lun *core_tpg_pre_addlun( 785 struct se_portal_group *tpg, 786 u32 unpacked_lun) 787 { ··· 811 return lun; 812 } 813 814 - int core_tpg_post_addlun( 815 struct se_portal_group *tpg, 816 struct se_lun *lun, 817 u32 lun_access, 818 - void *lun_ptr) 819 { 820 int ret; 821 ··· 823 if (ret < 0) 824 return ret; 825 826 - ret = core_dev_export(lun_ptr, tpg, lun); 827 if (ret < 0) { 828 percpu_ref_cancel_init(&lun->lun_ref); 829 return ret;
··· 656 spin_lock_init(&lun->lun_sep_lock); 657 init_completion(&lun->lun_ref_comp); 658 659 + ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev); 660 if (ret < 0) 661 return ret; 662 ··· 781 } 782 EXPORT_SYMBOL(core_tpg_deregister); 783 784 + struct se_lun *core_tpg_alloc_lun( 785 struct se_portal_group *tpg, 786 u32 unpacked_lun) 787 { ··· 811 return lun; 812 } 813 814 + int core_tpg_add_lun( 815 struct se_portal_group *tpg, 816 struct se_lun *lun, 817 u32 lun_access, 818 + struct se_device *dev) 819 { 820 int ret; 821 ··· 823 if (ret < 0) 824 return ret; 825 826 + ret = core_dev_export(dev, tpg, lun); 827 if (ret < 0) { 828 percpu_ref_cancel_init(&lun->lun_ref); 829 return ret;
+90 -5
drivers/target/target_core_transport.c
··· 62 struct kmem_cache *t10_alua_lu_gp_mem_cache; 63 struct kmem_cache *t10_alua_tg_pt_gp_cache; 64 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 65 66 static void transport_complete_task_attr(struct se_cmd *cmd); 67 static void transport_handle_queue_full(struct se_cmd *cmd, ··· 130 "mem_t failed\n"); 131 goto out_free_tg_pt_gp_cache; 132 } 133 134 target_completion_wq = alloc_workqueue("target_completion", 135 WQ_MEM_RECLAIM, 0); 136 if (!target_completion_wq) 137 - goto out_free_tg_pt_gp_mem_cache; 138 139 return 0; 140 141 out_free_tg_pt_gp_mem_cache: 142 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 143 out_free_tg_pt_gp_cache: ··· 188 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 189 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 190 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 191 } 192 193 /* This code ensures unique mib indexes are handed out. */ ··· 594 { 595 struct se_lun *lun = cmd->se_lun; 596 597 - if (!lun || !cmd->lun_ref_active) 598 return; 599 600 - percpu_ref_put(&lun->lun_ref); 601 } 602 603 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) ··· 1311 * @sgl_count: scatterlist count for unidirectional mapping 1312 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1313 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1314 * 1315 * Returns non zero to signal active I/O shutdown failure. All other 1316 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, ··· 1325 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1326 u32 data_length, int task_attr, int data_dir, int flags, 1327 struct scatterlist *sgl, u32 sgl_count, 1328 - struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1329 { 1330 struct se_portal_group *se_tpg; 1331 sense_reason_t rc; ··· 1367 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1368 target_put_sess_cmd(se_sess, se_cmd); 1369 return 0; 1370 } 1371 1372 rc = target_setup_cmd_from_cdb(se_cmd, cdb); ··· 1418 return 0; 1419 } 1420 } 1421 /* 1422 * Check if we need to delay processing because of ALUA 1423 * Active/NonOptimized primary access state.. ··· 1458 { 1459 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1460 unpacked_lun, data_length, task_attr, data_dir, 1461 - flags, NULL, 0, NULL, 0); 1462 } 1463 EXPORT_SYMBOL(target_submit_cmd); 1464 ··· 2494 return 0; 2495 } 2496 2497 int 2498 transport_send_check_condition_and_sense(struct se_cmd *cmd, 2499 sense_reason_t reason, int from_transport) ··· 2699 /* MISCOMPARE DURING VERIFY OPERATION */ 2700 buffer[SPC_ASC_KEY_OFFSET] = 0x1d; 2701 buffer[SPC_ASCQ_KEY_OFFSET] = 0x00; 2702 break; 2703 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 2704 default:
··· 62 struct kmem_cache *t10_alua_lu_gp_mem_cache; 63 struct kmem_cache *t10_alua_tg_pt_gp_cache; 64 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 65 + struct kmem_cache *t10_alua_lba_map_cache; 66 + struct kmem_cache *t10_alua_lba_map_mem_cache; 67 68 static void transport_complete_task_attr(struct se_cmd *cmd); 69 static void transport_handle_queue_full(struct se_cmd *cmd, ··· 128 "mem_t failed\n"); 129 goto out_free_tg_pt_gp_cache; 130 } 131 + t10_alua_lba_map_cache = kmem_cache_create( 132 + "t10_alua_lba_map_cache", 133 + sizeof(struct t10_alua_lba_map), 134 + __alignof__(struct t10_alua_lba_map), 0, NULL); 135 + if (!t10_alua_lba_map_cache) { 136 + pr_err("kmem_cache_create() for t10_alua_lba_map_" 137 + "cache failed\n"); 138 + goto out_free_tg_pt_gp_mem_cache; 139 + } 140 + t10_alua_lba_map_mem_cache = kmem_cache_create( 141 + "t10_alua_lba_map_mem_cache", 142 + sizeof(struct t10_alua_lba_map_member), 143 + __alignof__(struct t10_alua_lba_map_member), 0, NULL); 144 + if (!t10_alua_lba_map_mem_cache) { 145 + pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 146 + "cache failed\n"); 147 + goto out_free_lba_map_cache; 148 + } 149 150 target_completion_wq = alloc_workqueue("target_completion", 151 WQ_MEM_RECLAIM, 0); 152 if (!target_completion_wq) 153 + goto out_free_lba_map_mem_cache; 154 155 return 0; 156 157 + out_free_lba_map_mem_cache: 158 + kmem_cache_destroy(t10_alua_lba_map_mem_cache); 159 + out_free_lba_map_cache: 160 + kmem_cache_destroy(t10_alua_lba_map_cache); 161 out_free_tg_pt_gp_mem_cache: 162 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 163 out_free_tg_pt_gp_cache: ··· 164 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 165 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 166 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 167 + kmem_cache_destroy(t10_alua_lba_map_cache); 168 + kmem_cache_destroy(t10_alua_lba_map_mem_cache); 169 } 170 171 /* This code ensures unique mib indexes are handed out. */ ··· 568 { 569 struct se_lun *lun = cmd->se_lun; 570 571 + if (!lun) 572 return; 573 574 + if (cmpxchg(&cmd->lun_ref_active, true, false)) 575 + percpu_ref_put(&lun->lun_ref); 576 } 577 578 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) ··· 1284 * @sgl_count: scatterlist count for unidirectional mapping 1285 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1286 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1287 + * @sgl_prot: struct scatterlist memory protection information 1288 + * @sgl_prot_count: scatterlist count for protection information 1289 * 1290 * Returns non zero to signal active I/O shutdown failure. All other 1291 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, ··· 1296 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1297 u32 data_length, int task_attr, int data_dir, int flags, 1298 struct scatterlist *sgl, u32 sgl_count, 1299 + struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1300 + struct scatterlist *sgl_prot, u32 sgl_prot_count) 1301 { 1302 struct se_portal_group *se_tpg; 1303 sense_reason_t rc; ··· 1337 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1338 target_put_sess_cmd(se_sess, se_cmd); 1339 return 0; 1340 + } 1341 + /* 1342 + * Save pointers for SGLs containing protection information, 1343 + * if present. 1344 + */ 1345 + if (sgl_prot_count) { 1346 + se_cmd->t_prot_sg = sgl_prot; 1347 + se_cmd->t_prot_nents = sgl_prot_count; 1348 } 1349 1350 rc = target_setup_cmd_from_cdb(se_cmd, cdb); ··· 1380 return 0; 1381 } 1382 } 1383 + 1384 /* 1385 * Check if we need to delay processing because of ALUA 1386 * Active/NonOptimized primary access state.. ··· 1419 { 1420 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1421 unpacked_lun, data_length, task_attr, data_dir, 1422 + flags, NULL, 0, NULL, 0, NULL, 0); 1423 } 1424 EXPORT_SYMBOL(target_submit_cmd); 1425 ··· 2455 return 0; 2456 } 2457 2458 + static 2459 + void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector) 2460 + { 2461 + /* Place failed LBA in sense data information descriptor 0. */ 2462 + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc; 2463 + buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */ 2464 + buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa; 2465 + buffer[SPC_VALIDITY_OFFSET] = 0x80; 2466 + 2467 + /* Descriptor Information: failing sector */ 2468 + put_unaligned_be64(bad_sector, &buffer[12]); 2469 + } 2470 + 2471 int 2472 transport_send_check_condition_and_sense(struct se_cmd *cmd, 2473 sense_reason_t reason, int from_transport) ··· 2647 /* MISCOMPARE DURING VERIFY OPERATION */ 2648 buffer[SPC_ASC_KEY_OFFSET] = 0x1d; 2649 buffer[SPC_ASCQ_KEY_OFFSET] = 0x00; 2650 + break; 2651 + case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 2652 + /* CURRENT ERROR */ 2653 + buffer[0] = 0x70; 2654 + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2655 + /* ILLEGAL REQUEST */ 2656 + buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2657 + /* LOGICAL BLOCK GUARD CHECK FAILED */ 2658 + buffer[SPC_ASC_KEY_OFFSET] = 0x10; 2659 + buffer[SPC_ASCQ_KEY_OFFSET] = 0x01; 2660 + transport_err_sector_info(buffer, cmd->bad_sector); 2661 + break; 2662 + case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 2663 + /* CURRENT ERROR */ 2664 + buffer[0] = 0x70; 2665 + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2666 + /* ILLEGAL REQUEST */ 2667 + buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2668 + /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 2669 + buffer[SPC_ASC_KEY_OFFSET] = 0x10; 2670 + buffer[SPC_ASCQ_KEY_OFFSET] = 0x02; 2671 + transport_err_sector_info(buffer, cmd->bad_sector); 2672 + break; 2673 + case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 2674 + /* CURRENT ERROR */ 2675 + buffer[0] = 0x70; 2676 + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2677 + /* ILLEGAL REQUEST */ 2678 + buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2679 + /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 2680 + buffer[SPC_ASC_KEY_OFFSET] = 0x10; 2681 + buffer[SPC_ASCQ_KEY_OFFSET] = 0x03; 2682 + transport_err_sector_info(buffer, cmd->bad_sector); 2683 break; 2684 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 2685 default:
-1
drivers/target/target_core_ua.c
··· 98 pr_err("Unable to allocate struct se_ua\n"); 99 return -ENOMEM; 100 } 101 - INIT_LIST_HEAD(&ua->ua_dev_list); 102 INIT_LIST_HEAD(&ua->ua_nacl_list); 103 104 ua->ua_nacl = nacl;
··· 98 pr_err("Unable to allocate struct se_ua\n"); 99 return -ENOMEM; 100 } 101 INIT_LIST_HEAD(&ua->ua_nacl_list); 102 103 ua->ua_nacl = nacl;
-4
drivers/target/target_core_xcopy.c
··· 40 41 static struct workqueue_struct *xcopy_wq = NULL; 42 /* 43 - * From target_core_spc.c 44 - */ 45 - extern void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *); 46 - /* 47 * From target_core_device.c 48 */ 49 extern struct mutex g_device_mutex;
··· 40 41 static struct workqueue_struct *xcopy_wq = NULL; 42 /* 43 * From target_core_device.c 44 */ 45 extern struct mutex g_device_mutex;
+1 -1
drivers/target/tcm_fc/tfc_cmd.c
··· 438 struct se_session *se_sess = sess->se_sess; 439 int tag; 440 441 - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); 442 if (tag < 0) 443 goto busy; 444
··· 438 struct se_session *se_sess = sess->se_sess; 439 int tag; 440 441 + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 442 if (tag < 0) 443 goto busy; 444
+3 -3
drivers/target/tcm_fc/tfc_conf.c
··· 267 return found; 268 } 269 270 - struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) 271 { 272 struct ft_node_acl *acl; 273 ··· 552 .fabric_drop_nodeacl = &ft_del_acl, 553 }; 554 555 - int ft_register_configfs(void) 556 { 557 struct target_fabric_configfs *fabric; 558 int ret; ··· 599 return 0; 600 } 601 602 - void ft_deregister_configfs(void) 603 { 604 if (!ft_configfs) 605 return;
··· 267 return found; 268 } 269 270 + static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) 271 { 272 struct ft_node_acl *acl; 273 ··· 552 .fabric_drop_nodeacl = &ft_del_acl, 553 }; 554 555 + static int ft_register_configfs(void) 556 { 557 struct target_fabric_configfs *fabric; 558 int ret; ··· 599 return 0; 600 } 601 602 + static void ft_deregister_configfs(void) 603 { 604 if (!ft_configfs) 605 return;
+2 -2
drivers/vhost/scsi.c
··· 728 } 729 se_sess = tv_nexus->tvn_se_sess; 730 731 - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); 732 if (tag < 0) { 733 pr_err("Unable to obtain tag for tcm_vhost_cmd\n"); 734 return ERR_PTR(-ENOMEM); ··· 889 cmd->tvc_lun, cmd->tvc_exp_data_len, 890 cmd->tvc_task_attr, cmd->tvc_data_direction, 891 TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, 892 - sg_bidi_ptr, sg_no_bidi); 893 if (rc < 0) { 894 transport_send_check_condition_and_sense(se_cmd, 895 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
··· 728 } 729 se_sess = tv_nexus->tvn_se_sess; 730 731 + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 732 if (tag < 0) { 733 pr_err("Unable to obtain tag for tcm_vhost_cmd\n"); 734 return ERR_PTR(-ENOMEM); ··· 889 cmd->tvc_lun, cmd->tvc_exp_data_len, 890 cmd->tvc_task_attr, cmd->tvc_data_direction, 891 TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, 892 + sg_bidi_ptr, sg_no_bidi, NULL, 0); 893 if (rc < 0) { 894 transport_send_check_condition_and_sense(se_cmd, 895 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+2 -1
include/linux/percpu_ida.h
··· 4 #include <linux/types.h> 5 #include <linux/bitops.h> 6 #include <linux/init.h> 7 #include <linux/spinlock_types.h> 8 #include <linux/wait.h> 9 #include <linux/cpumask.h> ··· 62 /* Max size of percpu freelist, */ 63 #define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2) 64 65 - int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp); 66 void percpu_ida_free(struct percpu_ida *pool, unsigned tag); 67 68 void percpu_ida_destroy(struct percpu_ida *pool);
··· 4 #include <linux/types.h> 5 #include <linux/bitops.h> 6 #include <linux/init.h> 7 + #include <linux/sched.h> 8 #include <linux/spinlock_types.h> 9 #include <linux/wait.h> 10 #include <linux/cpumask.h> ··· 61 /* Max size of percpu freelist, */ 62 #define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2) 63 64 + int percpu_ida_alloc(struct percpu_ida *pool, int state); 65 void percpu_ida_free(struct percpu_ida *pool, unsigned tag); 66 67 void percpu_ida_destroy(struct percpu_ida *pool);
+1
include/scsi/scsi.h
··· 155 /* values for service action in */ 156 #define SAI_READ_CAPACITY_16 0x10 157 #define SAI_GET_LBA_STATUS 0x12 158 /* values for VARIABLE_LENGTH_CMD service action codes 159 * see spc4r17 Section D.3.5, table D.7 and D.8 */ 160 #define VLC_SA_RECEIVE_CREDENTIAL 0x1800
··· 155 /* values for service action in */ 156 #define SAI_READ_CAPACITY_16 0x10 157 #define SAI_GET_LBA_STATUS 0x12 158 + #define SAI_REPORT_REFERRALS 0x13 159 /* values for VARIABLE_LENGTH_CMD service action codes 160 * see spc4r17 Section D.3.5, table D.7 and D.8 */ 161 #define VLC_SA_RECEIVE_CREDENTIAL 0x1800
+1 -1
include/target/iscsi/iscsi_transport.h
··· 94 /* 95 * From iscsi_target_util.c 96 */ 97 - extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); 98 extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, 99 unsigned char *, __be32); 100 extern void iscsit_release_cmd(struct iscsi_cmd *);
··· 94 /* 95 * From iscsi_target_util.c 96 */ 97 + extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int); 98 extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, 99 unsigned char *, __be32); 100 extern void iscsit_release_cmd(struct iscsi_cmd *);
+7
include/target/target_core_backend.h
··· 41 unsigned int (*get_io_opt)(struct se_device *); 42 unsigned char *(*get_sense_buffer)(struct se_cmd *); 43 bool (*get_write_cache)(struct se_device *); 44 }; 45 46 struct sbc_ops { ··· 73 sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv, 74 sector_t lba, sector_t nolb), 75 void *priv); 76 77 void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); 78 int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
··· 41 unsigned int (*get_io_opt)(struct se_device *); 42 unsigned char *(*get_sense_buffer)(struct se_cmd *); 43 bool (*get_write_cache)(struct se_device *); 44 + int (*init_prot)(struct se_device *); 45 + int (*format_prot)(struct se_device *); 46 + void (*free_prot)(struct se_device *); 47 }; 48 49 struct sbc_ops { ··· 70 sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv, 71 sector_t lba, sector_t nolb), 72 void *priv); 73 + sense_reason_t sbc_dif_verify_write(struct se_cmd *, sector_t, unsigned int, 74 + unsigned int, struct scatterlist *, int); 75 + sense_reason_t sbc_dif_verify_read(struct se_cmd *, sector_t, unsigned int, 76 + unsigned int, struct scatterlist *, int); 77 78 void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); 79 int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
+76 -6
include/target/target_core_base.h
··· 37 /* Used by transport_send_check_condition_and_sense() */ 38 #define SPC_SENSE_KEY_OFFSET 2 39 #define SPC_ADD_SENSE_LEN_OFFSET 7 40 #define SPC_ASC_KEY_OFFSET 12 41 #define SPC_ASCQ_KEY_OFFSET 13 42 #define TRANSPORT_IQN_LEN 224 ··· 115 /* Queue Algorithm Modifier default for restricted reordering in control mode page */ 116 #define DA_EMULATE_REST_REORD 0 117 118 - #define SE_INQUIRY_BUF 512 119 #define SE_MODE_PAGE_BUF 512 120 #define SE_SENSE_BUF 96 121 ··· 208 TCM_OUT_OF_RESOURCES = R(0x12), 209 TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13), 210 TCM_MISCOMPARE_VERIFY = R(0x14), 211 #undef R 212 }; 213 ··· 253 254 struct se_cmd; 255 256 struct t10_alua { 257 /* ALUA Target Port Group ID */ 258 u16 alua_tg_pt_gps_counter; 259 u32 alua_tg_pt_gps_count; 260 spinlock_t tg_pt_gps_lock; 261 struct se_device *t10_dev; 262 /* Used for default ALUA Target Port Group */ ··· 308 u16 tg_pt_gp_id; 309 int tg_pt_gp_valid_id; 310 int tg_pt_gp_alua_supported_states; 311 int tg_pt_gp_alua_access_status; 312 int tg_pt_gp_alua_access_type; 313 int tg_pt_gp_nonop_delay_msecs; ··· 317 int tg_pt_gp_implicit_trans_secs; 318 int tg_pt_gp_pref; 319 int tg_pt_gp_write_metadata; 320 - /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */ 321 - #define ALUA_MD_BUF_LEN 1024 322 - u32 tg_pt_gp_md_buf_len; 323 u32 tg_pt_gp_members; 324 atomic_t tg_pt_gp_alua_access_state; 325 atomic_t tg_pt_gp_ref_cnt; ··· 326 struct config_group tg_pt_gp_group; 327 struct list_head tg_pt_gp_list; 328 struct list_head tg_pt_gp_mem_list; 329 }; 330 331 struct t10_alua_tg_pt_gp_member { ··· 441 struct list_head tmr_list; 442 }; 443 444 struct se_cmd { 445 /* SAM response code being sent to initiator */ 446 u8 scsi_status; ··· 552 void *priv; 553 554 /* Used for lun->lun_ref counting */ 555 - bool lun_ref_active; 556 }; 557 558 struct se_ua { 559 u8 ua_asc; 560 u8 ua_ascq; 561 struct se_node_acl *ua_nacl; 562 - struct list_head ua_dev_list; 563 struct list_head ua_nacl_list; 564 }; 565 ··· 670 int emulate_tpws; 671 int emulate_caw; 672 int emulate_3pc; 673 int enforce_pr_isids; 674 int is_nonrot; 675 int emulate_rest_reord; ··· 804 /* Linked list for struct se_hba struct se_device list */ 805 struct list_head dev_list; 806 struct se_lun xcopy_lun; 807 }; 808 809 struct se_hba {
··· 37 /* Used by transport_send_check_condition_and_sense() */ 38 #define SPC_SENSE_KEY_OFFSET 2 39 #define SPC_ADD_SENSE_LEN_OFFSET 7 40 + #define SPC_DESC_TYPE_OFFSET 8 41 + #define SPC_ADDITIONAL_DESC_LEN_OFFSET 9 42 + #define SPC_VALIDITY_OFFSET 10 43 #define SPC_ASC_KEY_OFFSET 12 44 #define SPC_ASCQ_KEY_OFFSET 13 45 #define TRANSPORT_IQN_LEN 224 ··· 112 /* Queue Algorithm Modifier default for restricted reordering in control mode page */ 113 #define DA_EMULATE_REST_REORD 0 114 115 + #define SE_INQUIRY_BUF 1024 116 #define SE_MODE_PAGE_BUF 512 117 #define SE_SENSE_BUF 96 118 ··· 205 TCM_OUT_OF_RESOURCES = R(0x12), 206 TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13), 207 TCM_MISCOMPARE_VERIFY = R(0x14), 208 + TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15), 209 + TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16), 210 + TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17), 211 #undef R 212 }; 213 ··· 247 248 struct se_cmd; 249 250 + struct t10_alua_lba_map_member { 251 + struct list_head lba_map_mem_list; 252 + int lba_map_mem_alua_state; 253 + int lba_map_mem_alua_pg_id; 254 + }; 255 + 256 + struct t10_alua_lba_map { 257 + u64 lba_map_first_lba; 258 + u64 lba_map_last_lba; 259 + struct list_head lba_map_list; 260 + struct list_head lba_map_mem_list; 261 + }; 262 + 263 struct t10_alua { 264 /* ALUA Target Port Group ID */ 265 u16 alua_tg_pt_gps_counter; 266 u32 alua_tg_pt_gps_count; 267 + /* Referrals support */ 268 + spinlock_t lba_map_lock; 269 + u32 lba_map_segment_size; 270 + u32 lba_map_segment_multiplier; 271 + struct list_head lba_map_list; 272 spinlock_t tg_pt_gps_lock; 273 struct se_device *t10_dev; 274 /* Used for default ALUA Target Port Group */ ··· 284 u16 tg_pt_gp_id; 285 int tg_pt_gp_valid_id; 286 int tg_pt_gp_alua_supported_states; 287 + int tg_pt_gp_alua_pending_state; 288 + int tg_pt_gp_alua_previous_state; 289 int tg_pt_gp_alua_access_status; 290 int tg_pt_gp_alua_access_type; 291 int tg_pt_gp_nonop_delay_msecs; ··· 291 int tg_pt_gp_implicit_trans_secs; 292 int tg_pt_gp_pref; 293 int tg_pt_gp_write_metadata; 294 u32 tg_pt_gp_members; 295 atomic_t tg_pt_gp_alua_access_state; 296 atomic_t tg_pt_gp_ref_cnt; ··· 303 struct config_group tg_pt_gp_group; 304 struct list_head tg_pt_gp_list; 305 struct list_head tg_pt_gp_mem_list; 306 + struct se_port *tg_pt_gp_alua_port; 307 + struct se_node_acl *tg_pt_gp_alua_nacl; 308 + struct delayed_work tg_pt_gp_transition_work; 309 + struct completion *tg_pt_gp_transition_complete; 310 }; 311 312 struct t10_alua_tg_pt_gp_member { ··· 414 struct list_head tmr_list; 415 }; 416 417 + enum target_prot_op { 418 + TARGET_PROT_NORMAL = 0, 419 + TARGET_PROT_DIN_INSERT, 420 + TARGET_PROT_DOUT_INSERT, 421 + TARGET_PROT_DIN_STRIP, 422 + TARGET_PROT_DOUT_STRIP, 423 + TARGET_PROT_DIN_PASS, 424 + TARGET_PROT_DOUT_PASS, 425 + }; 426 + 427 + enum target_prot_ho { 428 + PROT_SEPERATED, 429 + PROT_INTERLEAVED, 430 + }; 431 + 432 + enum target_prot_type { 433 + TARGET_DIF_TYPE0_PROT, 434 + TARGET_DIF_TYPE1_PROT, 435 + TARGET_DIF_TYPE2_PROT, 436 + TARGET_DIF_TYPE3_PROT, 437 + }; 438 + 439 + struct se_dif_v1_tuple { 440 + __be16 guard_tag; 441 + __be16 app_tag; 442 + __be32 ref_tag; 443 + }; 444 + 445 struct se_cmd { 446 /* SAM response code being sent to initiator */ 447 u8 scsi_status; ··· 497 void *priv; 498 499 /* Used for lun->lun_ref counting */ 500 + int lun_ref_active; 501 + 502 + /* DIF related members */ 503 + enum target_prot_op prot_op; 504 + enum target_prot_type prot_type; 505 + u32 prot_length; 506 + u32 reftag_seed; 507 + struct scatterlist *t_prot_sg; 508 + unsigned int t_prot_nents; 509 + enum target_prot_ho prot_handover; 510 + sense_reason_t pi_err; 511 + sector_t bad_sector; 512 }; 513 514 struct se_ua { 515 u8 ua_asc; 516 u8 ua_ascq; 517 struct se_node_acl *ua_nacl; 518 struct list_head ua_nacl_list; 519 }; 520 ··· 605 int emulate_tpws; 606 int emulate_caw; 607 int emulate_3pc; 608 + int pi_prot_format; 609 + enum target_prot_type pi_prot_type; 610 + enum target_prot_type hw_pi_prot_type; 611 int enforce_pr_isids; 612 int is_nonrot; 613 int emulate_rest_reord; ··· 736 /* Linked list for struct se_hba struct se_device list */ 737 struct list_head dev_list; 738 struct se_lun xcopy_lun; 739 + /* Protection Information */ 740 + int prot_length; 741 }; 742 743 struct se_hba {
+2 -1
include/target/target_core_fabric.h
··· 105 sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); 106 int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *, 107 unsigned char *, unsigned char *, u32, u32, int, int, int, 108 - struct scatterlist *, u32, struct scatterlist *, u32); 109 int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, 110 unsigned char *, u32, u32, int, int, int); 111 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
··· 105 sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); 106 int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *, 107 unsigned char *, unsigned char *, u32, u32, int, int, int, 108 + struct scatterlist *, u32, struct scatterlist *, u32, 109 + struct scatterlist *, u32); 110 int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, 111 unsigned char *, u32, u32, int, int, int); 112 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
+14 -7
lib/percpu_ida.c
··· 132 /** 133 * percpu_ida_alloc - allocate a tag 134 * @pool: pool to allocate from 135 - * @gfp: gfp flags 136 * 137 * Returns a tag - an integer in the range [0..nr_tags) (passed to 138 * tag_pool_init()), or otherwise -ENOSPC on allocation failure. 139 * 140 * Safe to be called from interrupt context (assuming it isn't passed 141 - * __GFP_WAIT, of course). 142 * 143 * @gfp indicates whether or not to wait until a free id is available (it's not 144 * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep 145 * however long it takes until another thread frees an id (same semantics as a 146 * mempool). 147 * 148 - * Will not fail if passed __GFP_WAIT. 149 */ 150 - int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) 151 { 152 DEFINE_WAIT(wait); 153 struct percpu_ida_cpu *tags; ··· 174 * 175 * global lock held and irqs disabled, don't need percpu lock 176 */ 177 - prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); 178 179 if (!tags->nr_free) 180 alloc_global_tags(pool, tags); ··· 192 spin_unlock(&pool->lock); 193 local_irq_restore(flags); 194 195 - if (tag >= 0 || !(gfp & __GFP_WAIT)) 196 break; 197 198 schedule(); 199 200 local_irq_save(flags); 201 tags = this_cpu_ptr(pool->tag_cpu); 202 } 203 204 - finish_wait(&pool->wait, &wait); 205 return tag; 206 } 207 EXPORT_SYMBOL_GPL(percpu_ida_alloc);
··· 132 /** 133 * percpu_ida_alloc - allocate a tag 134 * @pool: pool to allocate from 135 + * @state: task state for prepare_to_wait 136 * 137 * Returns a tag - an integer in the range [0..nr_tags) (passed to 138 * tag_pool_init()), or otherwise -ENOSPC on allocation failure. 139 * 140 * Safe to be called from interrupt context (assuming it isn't passed 141 + * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course). 142 * 143 * @gfp indicates whether or not to wait until a free id is available (it's not 144 * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep 145 * however long it takes until another thread frees an id (same semantics as a 146 * mempool). 147 * 148 + * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE. 149 */ 150 + int percpu_ida_alloc(struct percpu_ida *pool, int state) 151 { 152 DEFINE_WAIT(wait); 153 struct percpu_ida_cpu *tags; ··· 174 * 175 * global lock held and irqs disabled, don't need percpu lock 176 */ 177 + if (state != TASK_RUNNING) 178 + prepare_to_wait(&pool->wait, &wait, state); 179 180 if (!tags->nr_free) 181 alloc_global_tags(pool, tags); ··· 191 spin_unlock(&pool->lock); 192 local_irq_restore(flags); 193 194 + if (tag >= 0 || state == TASK_RUNNING) 195 break; 196 + 197 + if (signal_pending_state(state, current)) { 198 + tag = -ERESTARTSYS; 199 + break; 200 + } 201 202 schedule(); 203 204 local_irq_save(flags); 205 tags = this_cpu_ptr(pool->tag_cpu); 206 } 207 + if (state != TASK_RUNNING) 208 + finish_wait(&pool->wait, &wait); 209 210 return tag; 211 } 212 EXPORT_SYMBOL_GPL(percpu_ida_alloc);