Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target updates from Nicholas Bellinger:
"It's been usually busy for summer, with most of the efforts centered
around TCMU developments and various target-core + fabric driver bug
fixing activities. Not particularly large in terms of LoC, but lots of
smaller patches from many different folks.

The highlights include:

- ibmvscsis logical partition manager support (Michael Cyr + Bryant
Ly)

- Convert target/iblock WRITE_SAME to blkdev_issue_zeroout (hch +
nab)

- Add support for TMR percpu LUN reference counting (nab)

- Fix a potential deadlock between EXTENDED_COPY and iscsi shutdown
(Bart)

- Fix COMPARE_AND_WRITE caw_sem leak during se_cmd quiesce (Jiang Yi)

- Fix TMCU module removal (Xiubo Li)

- Fix iser-target OOPs during login failure (Andrea Righi + Sagi)

- Breakup target-core free_device backend driver callback (mnc)

- Perform TCMU add/delete/reconfig synchronously (mnc)

- Fix TCMU multiple UIO open/close sequences (mnc)

- Fix TCMU CHECK_CONDITION sense handling (mnc)

- Fix target-core SAM_STAT_BUSY + TASK_SET_FULL handling (mnc + nab)

- Introduce TYPE_ZBC support in PSCSI (Damien Le Moal)

- Fix possible TCMU memory leak + OOPs when recalculating cmd base
size (Xiubo Li + Bryant Ly + Damien Le Moal + mnc)

- Add login_keys_workaround attribute for non RFC initiators (Robert
LeBlanc + Arun Easi + nab)"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (68 commits)
iscsi-target: Add login_keys_workaround attribute for non RFC initiators
Revert "qla2xxx: Fix incorrect tcm_qla2xxx_free_cmd use during TMR ABORT"
tcmu: clean up the code and with one small fix
tcmu: Fix possbile memory leak / OOPs when recalculating cmd base size
target: export lio pgr/alua support as device attr
target: Fix return sense reason in target_scsi3_emulate_pr_out
target: Fix cmd size for PR-OUT in passthrough_parse_cdb
tcmu: Fix dev_config_store
target: pscsi: Introduce TYPE_ZBC support
target: Use macro for WRITE_VERIFY_32 operation codes
target: fix SAM_STAT_BUSY/TASK_SET_FULL handling
target: remove transport_complete
pscsi: finish cmd processing from pscsi_req_done
tcmu: fix sense handling during completion
target: add helper to copy sense to se_cmd buffer
target: do not require a transport_complete for SCF_TRANSPORT_TASK_SENSE
target: make device_mutex and device_list static
tcmu: Fix flushing cmd entry dcache page
tcmu: fix multiple uio open/close sequences
tcmu: drop configured check in destroy
...

+1364 -706
+1 -1
drivers/infiniband/ulp/isert/ib_isert.c
··· 1452 1452 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1453 1453 { 1454 1454 struct isert_conn *isert_conn = wc->qp->qp_context; 1455 - struct ib_device *ib_dev = isert_conn->cm_id->device; 1455 + struct ib_device *ib_dev = isert_conn->device->ib_device; 1456 1456 1457 1457 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1458 1458 isert_print_wc(wc, "login recv");
+2 -2
drivers/infiniband/ulp/srpt/ib_srpt.c
··· 1157 1157 } 1158 1158 spin_unlock_irqrestore(&ioctx->spinlock, flags); 1159 1159 1160 - pr_debug("Aborting cmd with state %d and tag %lld\n", state, 1161 - ioctx->cmd.tag); 1160 + pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state, 1161 + ioctx->state, ioctx->cmd.tag); 1162 1162 1163 1163 switch (state) { 1164 1164 case SRPT_STATE_NEW:
+143 -13
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
··· 155 155 qrc = h_free_crq(vscsi->dds.unit_id); 156 156 switch (qrc) { 157 157 case H_SUCCESS: 158 + spin_lock_bh(&vscsi->intr_lock); 159 + vscsi->flags &= ~PREP_FOR_SUSPEND_FLAGS; 160 + spin_unlock_bh(&vscsi->intr_lock); 158 161 break; 159 162 160 163 case H_HARDWARE: ··· 424 421 spin_lock_bh(&vscsi->intr_lock); 425 422 new_state = vscsi->new_state; 426 423 vscsi->new_state = 0; 424 + 425 + vscsi->flags |= DISCONNECT_SCHEDULED; 426 + vscsi->flags &= ~SCHEDULE_DISCONNECT; 427 427 428 428 pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags, 429 429 vscsi->state); ··· 808 802 long rc = ADAPT_SUCCESS; 809 803 uint format; 810 804 805 + rc = h_vioctl(vscsi->dds.unit_id, H_ENABLE_PREPARE_FOR_SUSPEND, 30000, 806 + 0, 0, 0, 0); 807 + if (rc == H_SUCCESS) 808 + vscsi->flags |= PREP_FOR_SUSPEND_ENABLED; 809 + else if (rc != H_NOT_FOUND) 810 + pr_err("Error from Enable Prepare for Suspend: %ld\n", rc); 811 + 811 812 vscsi->flags &= PRESERVE_FLAG_FIELDS; 812 813 vscsi->rsp_q_timer.timer_pops = 0; 813 814 vscsi->debit = 0; ··· 964 951 } 965 952 966 953 /** 954 + * ibmvscsis_ready_for_suspend() - Helper function to call VIOCTL 955 + * @vscsi: Pointer to our adapter structure 956 + * @idle: Indicates whether we were called from adapter_idle. This 957 + * is important to know if we need to do a disconnect, since if 958 + * we're called from adapter_idle, we're still processing the 959 + * current disconnect, so we can't just call post_disconnect. 960 + * 961 + * This function is called when the adapter is idle when phyp has sent 962 + * us a Prepare for Suspend Transport Event. 963 + * 964 + * EXECUTION ENVIRONMENT: 965 + * Process or interrupt environment called with interrupt lock held 966 + */ 967 + static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle) 968 + { 969 + long rc = 0; 970 + struct viosrp_crq *crq; 971 + 972 + /* See if there is a Resume event in the queue */ 973 + crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 974 + 975 + pr_debug("ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n", 976 + vscsi->flags, vscsi->state, (int)crq->valid); 977 + 978 + if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) { 979 + rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0, 980 + 0, 0); 981 + if (rc) { 982 + pr_err("Ready for Suspend Vioctl failed: %ld\n", rc); 983 + rc = 0; 984 + } 985 + } else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) && 986 + (vscsi->flags & PREP_FOR_SUSPEND_ABORTED)) || 987 + ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) || 988 + (crq->format != RESUME_FROM_SUSP)))) { 989 + if (idle) { 990 + vscsi->state = ERR_DISCONNECT_RECONNECT; 991 + ibmvscsis_reset_queue(vscsi); 992 + rc = -1; 993 + } else if (vscsi->state == CONNECTED) { 994 + ibmvscsis_post_disconnect(vscsi, 995 + ERR_DISCONNECT_RECONNECT, 0); 996 + } 997 + 998 + vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE; 999 + 1000 + if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) || 1001 + (crq->format != RESUME_FROM_SUSP))) 1002 + pr_err("Invalid element in CRQ after Prepare for Suspend"); 1003 + } 1004 + 1005 + vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED); 1006 + 1007 + return rc; 1008 + } 1009 + 1010 + /** 967 1011 * ibmvscsis_trans_event() - Handle a Transport Event 968 1012 * @vscsi: Pointer to our adapter structure 969 1013 * @crq: Pointer to CRQ entry containing the Transport Event ··· 1044 974 case PARTNER_FAILED: 1045 975 case PARTNER_DEREGISTER: 1046 976 ibmvscsis_delete_client_info(vscsi, true); 1047 - break; 1048 - 1049 - default: 1050 - rc = ERROR; 1051 - dev_err(&vscsi->dev, "trans_event: invalid format %d\n", 1052 - (uint)crq->format); 1053 - ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 1054 - RESPONSE_Q_DOWN); 1055 - break; 1056 - } 1057 - 1058 - if (rc == ADAPT_SUCCESS) { 977 + if (crq->format == MIGRATED) 978 + vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE; 1059 979 switch (vscsi->state) { 1060 980 case NO_QUEUE: 1061 981 case ERR_DISCONNECTED: ··· 1094 1034 vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT); 1095 1035 break; 1096 1036 } 1037 + break; 1038 + 1039 + case PREPARE_FOR_SUSPEND: 1040 + pr_debug("Prep for Suspend, crq status = 0x%x\n", 1041 + (int)crq->status); 1042 + switch (vscsi->state) { 1043 + case ERR_DISCONNECTED: 1044 + case WAIT_CONNECTION: 1045 + case CONNECTED: 1046 + ibmvscsis_ready_for_suspend(vscsi, false); 1047 + break; 1048 + case SRP_PROCESSING: 1049 + vscsi->resume_state = vscsi->state; 1050 + vscsi->flags |= PREP_FOR_SUSPEND_PENDING; 1051 + if (crq->status == CRQ_ENTRY_OVERWRITTEN) 1052 + vscsi->flags |= PREP_FOR_SUSPEND_OVERWRITE; 1053 + ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0); 1054 + break; 1055 + case NO_QUEUE: 1056 + case UNDEFINED: 1057 + case UNCONFIGURING: 1058 + case WAIT_ENABLED: 1059 + case ERR_DISCONNECT: 1060 + case ERR_DISCONNECT_RECONNECT: 1061 + case WAIT_IDLE: 1062 + pr_err("Invalid state for Prepare for Suspend Trans Event: 0x%x\n", 1063 + vscsi->state); 1064 + break; 1065 + } 1066 + break; 1067 + 1068 + case RESUME_FROM_SUSP: 1069 + pr_debug("Resume from Suspend, crq status = 0x%x\n", 1070 + (int)crq->status); 1071 + if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) { 1072 + vscsi->flags |= PREP_FOR_SUSPEND_ABORTED; 1073 + } else { 1074 + if ((crq->status == CRQ_ENTRY_OVERWRITTEN) || 1075 + (vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE)) { 1076 + ibmvscsis_post_disconnect(vscsi, 1077 + ERR_DISCONNECT_RECONNECT, 1078 + 0); 1079 + vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE; 1080 + } 1081 + } 1082 + break; 1083 + 1084 + default: 1085 + rc = ERROR; 1086 + dev_err(&vscsi->dev, "trans_event: invalid format %d\n", 1087 + (uint)crq->format); 1088 + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 1089 + RESPONSE_Q_DOWN); 1090 + break; 1097 1091 } 1098 1092 1099 1093 rc = vscsi->flags & SCHEDULE_DISCONNECT; ··· 1315 1201 static void ibmvscsis_adapter_idle(struct scsi_info *vscsi) 1316 1202 { 1317 1203 int free_qs = false; 1204 + long rc = 0; 1318 1205 1319 1206 pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags, 1320 1207 vscsi->state); ··· 1355 1240 vscsi->rsp_q_timer.timer_pops = 0; 1356 1241 vscsi->debit = 0; 1357 1242 vscsi->credit = 0; 1358 - if (vscsi->flags & TRANS_EVENT) { 1243 + if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) { 1244 + vscsi->state = vscsi->resume_state; 1245 + vscsi->resume_state = 0; 1246 + rc = ibmvscsis_ready_for_suspend(vscsi, true); 1247 + vscsi->flags &= ~DISCONNECT_SCHEDULED; 1248 + if (rc) 1249 + break; 1250 + } else if (vscsi->flags & TRANS_EVENT) { 1359 1251 vscsi->state = WAIT_CONNECTION; 1360 1252 vscsi->flags &= PRESERVE_FLAG_FIELDS; 1361 1253 } else { ··· 3914 3792 { 3915 3793 struct ibmvscsis_tport *tport = 3916 3794 container_of(wwn, struct ibmvscsis_tport, tport_wwn); 3795 + u16 tpgt; 3917 3796 int rc; 3797 + 3798 + if (strstr(name, "tpgt_") != name) 3799 + return ERR_PTR(-EINVAL); 3800 + rc = kstrtou16(name + 5, 0, &tpgt); 3801 + if (rc) 3802 + return ERR_PTR(rc); 3803 + tport->tport_tpgt = tpgt; 3918 3804 3919 3805 tport->releasing = false; 3920 3806
+23 -2
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
··· 262 262 #define DISCONNECT_SCHEDULED 0x00800 263 263 /* remove function is sleeping */ 264 264 #define CFG_SLEEPING 0x01000 265 + /* Register for Prepare for Suspend Transport Events */ 266 + #define PREP_FOR_SUSPEND_ENABLED 0x02000 267 + /* Prepare for Suspend event sent */ 268 + #define PREP_FOR_SUSPEND_PENDING 0x04000 269 + /* Resume from Suspend event sent */ 270 + #define PREP_FOR_SUSPEND_ABORTED 0x08000 271 + /* Prepare for Suspend event overwrote another CRQ entry */ 272 + #define PREP_FOR_SUSPEND_OVERWRITE 0x10000 265 273 u32 flags; 266 274 /* adapter lock */ 267 275 spinlock_t intr_lock; ··· 280 272 /* used in crq, to tag what iu the response is for */ 281 273 u64 empty_iu_tag; 282 274 uint new_state; 275 + uint resume_state; 283 276 /* control block for the response queue timer */ 284 277 struct timer_cb rsp_q_timer; 285 278 /* keep last client to enable proper accounting */ ··· 333 324 #define TARGET_STOP(VSCSI) (long)(((VSCSI)->state & DONT_PROCESS_STATE) | \ 334 325 ((VSCSI)->flags & BLOCK)) 335 326 327 + #define PREP_FOR_SUSPEND_FLAGS (PREP_FOR_SUSPEND_ENABLED | \ 328 + PREP_FOR_SUSPEND_PENDING | \ 329 + PREP_FOR_SUSPEND_ABORTED | \ 330 + PREP_FOR_SUSPEND_OVERWRITE) 331 + 336 332 /* flag bit that are not reset during disconnect */ 337 - #define PRESERVE_FLAG_FIELDS 0 333 + #define PRESERVE_FLAG_FIELDS (PREP_FOR_SUSPEND_FLAGS) 338 334 339 335 #define vio_iu(IUE) ((union viosrp_iu *)((IUE)->sbuf->buf)) 340 336 ··· 347 333 #define WRITE_CMD(cdb) (((cdb)[0] & 0x1F) == 0xA) 348 334 349 335 #ifndef H_GET_PARTNER_INFO 350 - #define H_GET_PARTNER_INFO 0x0000000000000008LL 336 + #define H_GET_PARTNER_INFO 0x0000000000000008LL 351 337 #endif 338 + #ifndef H_ENABLE_PREPARE_FOR_SUSPEND 339 + #define H_ENABLE_PREPARE_FOR_SUSPEND 0x000000000000001DLL 340 + #endif 341 + #ifndef H_READY_FOR_SUSPEND 342 + #define H_READY_FOR_SUSPEND 0x000000000000001ELL 343 + #endif 344 + 352 345 353 346 #define h_copy_rdma(l, sa, sb, da, db) \ 354 347 plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
+4 -1
drivers/scsi/ibmvscsi_tgt/libsrp.h
··· 30 30 UNUSED_FORMAT = 0, 31 31 PARTNER_FAILED = 1, 32 32 PARTNER_DEREGISTER = 2, 33 - MIGRATED = 6 33 + MIGRATED = 6, 34 + PREPARE_FOR_SUSPEND = 9, 35 + RESUME_FROM_SUSP = 0xA 34 36 }; 35 37 36 38 enum srp_status { 39 + CRQ_ENTRY_OVERWRITTEN = 0x20, 37 40 HEADER_DESCRIPTOR = 0xF1, 38 41 PING = 0xF5, 39 42 PING_RESPONSE = 0xF6
+9 -29
drivers/scsi/qla2xxx/qla_target.c
··· 1874 1874 struct abts_recv_from_24xx *abts, struct fc_port *sess) 1875 1875 { 1876 1876 struct qla_hw_data *ha = vha->hw; 1877 - struct se_session *se_sess = sess->se_sess; 1878 1877 struct qla_tgt_mgmt_cmd *mcmd; 1879 - struct qla_tgt_cmd *cmd; 1880 - struct se_cmd *se_cmd; 1881 1878 int rc; 1882 - bool found_lun = false; 1883 - unsigned long flags; 1884 1879 1885 - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 1886 - list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1887 - if (se_cmd->tag == abts->exchange_addr_to_abort) { 1888 - found_lun = true; 1889 - break; 1890 - } 1891 - } 1892 - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 1893 - 1894 - /* cmd not in LIO lists, look in qla list */ 1895 - if (!found_lun) { 1896 - if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) { 1897 - /* send TASK_ABORT response immediately */ 1898 - qlt_24xx_send_abts_resp(ha->base_qpair, abts, 1899 - FCP_TMF_CMPL, false); 1900 - return 0; 1901 - } else { 1902 - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081, 1903 - "unable to find cmd in driver or LIO for tag 0x%x\n", 1904 - abts->exchange_addr_to_abort); 1905 - return -ENOENT; 1906 - } 1880 + if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) { 1881 + /* send TASK_ABORT response immediately */ 1882 + qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false); 1883 + return 0; 1907 1884 } 1908 1885 1909 1886 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, ··· 1896 1919 } 1897 1920 memset(mcmd, 0, sizeof(*mcmd)); 1898 1921 1899 - cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); 1900 1922 mcmd->sess = sess; 1901 1923 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 1902 1924 mcmd->reset_count = ha->base_qpair->chip_reset; 1903 1925 mcmd->tmr_func = QLA_TGT_ABTS; 1904 1926 mcmd->qpair = ha->base_qpair; 1905 1927 1906 - rc = ha->tgt.tgt_ops->handle_tmr(mcmd, cmd->unpacked_lun, mcmd->tmr_func, 1928 + /* 1929 + * LUN is looked up by target-core internally based on the passed 1930 + * abts->exchange_addr_to_abort tag. 1931 + */ 1932 + rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, mcmd->tmr_func, 1907 1933 abts->exchange_addr_to_abort); 1908 1934 if (rc != 0) { 1909 1935 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
+3 -1
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 600 600 struct fc_port *sess = mcmd->sess; 601 601 struct se_cmd *se_cmd = &mcmd->se_cmd; 602 602 int transl_tmr_func = 0; 603 + int flags = TARGET_SCF_ACK_KREF; 603 604 604 605 switch (tmr_func) { 605 606 case QLA_TGT_ABTS: 606 607 pr_debug("%ld: ABTS received\n", sess->vha->host_no); 607 608 transl_tmr_func = TMR_ABORT_TASK; 609 + flags |= TARGET_SCF_LOOKUP_LUN_FROM_TAG; 608 610 break; 609 611 case QLA_TGT_2G_ABORT_TASK: 610 612 pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no); ··· 639 637 } 640 638 641 639 return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd, 642 - transl_tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); 640 + transl_tmr_func, GFP_ATOMIC, tag, flags); 643 641 } 644 642 645 643 static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
+2 -8
drivers/target/iscsi/iscsi_target.c
··· 488 488 489 489 void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 490 490 { 491 - bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); 492 - 493 491 spin_lock_bh(&conn->cmd_lock); 494 492 if (!list_empty(&cmd->i_conn_node) && 495 493 !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP)) 496 494 list_del_init(&cmd->i_conn_node); 497 495 spin_unlock_bh(&conn->cmd_lock); 498 496 499 - __iscsit_free_cmd(cmd, scsi_cmd, true); 497 + __iscsit_free_cmd(cmd, true); 500 498 } 501 499 EXPORT_SYMBOL(iscsit_aborted_task); 502 500 ··· 1249 1251 * execution. These exceptions are processed in CmdSN order using 1250 1252 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below. 1251 1253 */ 1252 - if (cmd->sense_reason) { 1253 - if (cmd->reject_reason) 1254 - return 0; 1255 - 1254 + if (cmd->sense_reason) 1256 1255 return 1; 1257 - } 1258 1256 /* 1259 1257 * Call directly into transport_generic_new_cmd() to perform 1260 1258 * the backend memory allocation.
+2
drivers/target/iscsi/iscsi_target_configfs.c
··· 781 781 DEF_TPG_ATTRIB(t10_pi); 782 782 DEF_TPG_ATTRIB(fabric_prot_type); 783 783 DEF_TPG_ATTRIB(tpg_enabled_sendtargets); 784 + DEF_TPG_ATTRIB(login_keys_workaround); 784 785 785 786 static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { 786 787 &iscsi_tpg_attrib_attr_authentication, ··· 797 796 &iscsi_tpg_attrib_attr_t10_pi, 798 797 &iscsi_tpg_attrib_attr_fabric_prot_type, 799 798 &iscsi_tpg_attrib_attr_tpg_enabled_sendtargets, 799 + &iscsi_tpg_attrib_attr_login_keys_workaround, 800 800 NULL, 801 801 }; 802 802
+4 -26
drivers/target/iscsi/iscsi_target_nego.c
··· 655 655 iscsit_deaccess_np(np, tpg, tpg_np); 656 656 } 657 657 658 - static void iscsi_target_do_cleanup(struct work_struct *work) 659 - { 660 - struct iscsi_conn *conn = container_of(work, 661 - struct iscsi_conn, login_cleanup_work.work); 662 - struct sock *sk = conn->sock->sk; 663 - struct iscsi_login *login = conn->login; 664 - struct iscsi_np *np = login->np; 665 - struct iscsi_portal_group *tpg = conn->tpg; 666 - struct iscsi_tpg_np *tpg_np = conn->tpg_np; 667 - 668 - pr_debug("Entering iscsi_target_do_cleanup\n"); 669 - 670 - cancel_delayed_work_sync(&conn->login_work); 671 - conn->orig_state_change(sk); 672 - 673 - iscsi_target_restore_sock_callbacks(conn); 674 - iscsi_target_login_drop(conn, login); 675 - iscsit_deaccess_np(np, tpg, tpg_np); 676 - 677 - pr_debug("iscsi_target_do_cleanup done()\n"); 678 - } 679 - 680 658 static void iscsi_target_sk_state_change(struct sock *sk) 681 659 { 682 660 struct iscsi_conn *conn; ··· 864 886 SENDER_TARGET, 865 887 login->rsp_buf, 866 888 &login->rsp_length, 867 - conn->param_list); 889 + conn->param_list, 890 + conn->tpg->tpg_attrib.login_keys_workaround); 868 891 if (ret < 0) 869 892 return -1; 870 893 ··· 935 956 SENDER_TARGET, 936 957 login->rsp_buf, 937 958 &login->rsp_length, 938 - conn->param_list); 959 + conn->param_list, 960 + conn->tpg->tpg_attrib.login_keys_workaround); 939 961 if (ret < 0) { 940 962 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 941 963 ISCSI_LOGIN_STATUS_INIT_ERR); ··· 1062 1082 int sessiontype = 0, ret = 0, tag_num, tag_size; 1063 1083 1064 1084 INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx); 1065 - INIT_DELAYED_WORK(&conn->login_cleanup_work, iscsi_target_do_cleanup); 1066 1085 iscsi_target_set_sock_callbacks(conn); 1067 1086 1068 1087 login->np = np; ··· 1310 1331 1311 1332 if (ret < 0) { 1312 1333 cancel_delayed_work_sync(&conn->login_work); 1313 - cancel_delayed_work_sync(&conn->login_cleanup_work); 1314 1334 iscsi_target_restore_sock_callbacks(conn); 1315 1335 iscsi_remove_failed_auth_entry(conn); 1316 1336 }
+28 -13
drivers/target/iscsi/iscsi_target_parameters.c
··· 765 765 return 0; 766 766 } 767 767 768 - static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param) 768 + static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param, 769 + bool keys_workaround) 769 770 { 770 771 if (IS_TYPE_BOOL_AND(param)) { 771 772 if (!strcmp(param->value, NO)) ··· 774 773 } else if (IS_TYPE_BOOL_OR(param)) { 775 774 if (!strcmp(param->value, YES)) 776 775 SET_PSTATE_REPLY_OPTIONAL(param); 777 - /* 778 - * Required for gPXE iSCSI boot client 779 - */ 780 - if (!strcmp(param->name, IMMEDIATEDATA)) 781 - SET_PSTATE_REPLY_OPTIONAL(param); 776 + 777 + if (keys_workaround) { 778 + /* 779 + * Required for gPXE iSCSI boot client 780 + */ 781 + if (!strcmp(param->name, IMMEDIATEDATA)) 782 + SET_PSTATE_REPLY_OPTIONAL(param); 783 + } 782 784 } else if (IS_TYPE_NUMBER(param)) { 783 785 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) 784 786 SET_PSTATE_REPLY_OPTIONAL(param); 785 - /* 786 - * Required for gPXE iSCSI boot client 787 - */ 788 - if (!strcmp(param->name, MAXCONNECTIONS)) 789 - SET_PSTATE_REPLY_OPTIONAL(param); 787 + 788 + if (keys_workaround) { 789 + /* 790 + * Required for Mellanox Flexboot PXE boot ROM 791 + */ 792 + if (!strcmp(param->name, FIRSTBURSTLENGTH)) 793 + SET_PSTATE_REPLY_OPTIONAL(param); 794 + 795 + /* 796 + * Required for gPXE iSCSI boot client 797 + */ 798 + if (!strcmp(param->name, MAXCONNECTIONS)) 799 + SET_PSTATE_REPLY_OPTIONAL(param); 800 + } 790 801 } else if (IS_PHASE_DECLARATIVE(param)) 791 802 SET_PSTATE_REPLY_OPTIONAL(param); 792 803 } ··· 1435 1422 u8 sender, 1436 1423 char *textbuf, 1437 1424 u32 *length, 1438 - struct iscsi_param_list *param_list) 1425 + struct iscsi_param_list *param_list, 1426 + bool keys_workaround) 1439 1427 { 1440 1428 char *output_buf = NULL; 1441 1429 struct iscsi_extra_response *er; ··· 1472 1458 *length += 1; 1473 1459 output_buf = textbuf + *length; 1474 1460 SET_PSTATE_PROPOSER(param); 1475 - iscsi_check_proposer_for_optional_reply(param); 1461 + iscsi_check_proposer_for_optional_reply(param, 1462 + keys_workaround); 1476 1463 pr_debug("Sending key: %s=%s\n", 1477 1464 param->name, param->value); 1478 1465 }
+1 -1
drivers/target/iscsi/iscsi_target_parameters.h
··· 46 46 extern int iscsi_update_param_value(struct iscsi_param *, char *); 47 47 extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *); 48 48 extern int iscsi_encode_text_output(u8, u8, char *, u32 *, 49 - struct iscsi_param_list *); 49 + struct iscsi_param_list *, bool); 50 50 extern int iscsi_check_negotiated_keys(struct iscsi_param_list *); 51 51 extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *, 52 52 struct iscsi_param_list *);
+21 -6
drivers/target/iscsi/iscsi_target_tpg.c
··· 227 227 a->t10_pi = TA_DEFAULT_T10_PI; 228 228 a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE; 229 229 a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS; 230 + a->login_keys_workaround = TA_DEFAULT_LOGIN_KEYS_WORKAROUND; 230 231 } 231 232 232 233 int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) ··· 312 311 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 313 312 int ret; 314 313 315 - spin_lock(&tpg->tpg_state_lock); 316 314 if (tpg->tpg_state == TPG_STATE_ACTIVE) { 317 315 pr_err("iSCSI target portal group: %hu is already" 318 316 " active, ignoring request.\n", tpg->tpgt); 319 - spin_unlock(&tpg->tpg_state_lock); 320 317 return -EINVAL; 321 318 } 322 319 /* ··· 323 324 * is enforced (as per default), and remove the NONE option. 324 325 */ 325 326 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list); 326 - if (!param) { 327 - spin_unlock(&tpg->tpg_state_lock); 327 + if (!param) 328 328 return -EINVAL; 329 - } 330 329 331 330 if (tpg->tpg_attrib.authentication) { 332 331 if (!strcmp(param->value, NONE)) { ··· 338 341 goto err; 339 342 } 340 343 344 + spin_lock(&tpg->tpg_state_lock); 341 345 tpg->tpg_state = TPG_STATE_ACTIVE; 342 346 spin_unlock(&tpg->tpg_state_lock); 343 347 ··· 351 353 return 0; 352 354 353 355 err: 354 - spin_unlock(&tpg->tpg_state_lock); 355 356 return ret; 356 357 } 357 358 ··· 893 896 a->tpg_enabled_sendtargets = flag; 894 897 pr_debug("iSCSI_TPG[%hu] - TPG enabled bit required for SendTargets:" 895 898 " %s\n", tpg->tpgt, (a->tpg_enabled_sendtargets) ? "ON" : "OFF"); 899 + 900 + return 0; 901 + } 902 + 903 + int iscsit_ta_login_keys_workaround( 904 + struct iscsi_portal_group *tpg, 905 + u32 flag) 906 + { 907 + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; 908 + 909 + if ((flag != 0) && (flag != 1)) { 910 + pr_err("Illegal value %d\n", flag); 911 + return -EINVAL; 912 + } 913 + 914 + a->login_keys_workaround = flag; 915 + pr_debug("iSCSI_TPG[%hu] - TPG enabled bit for login keys workaround: %s ", 916 + tpg->tpgt, (a->login_keys_workaround) ? "ON" : "OFF"); 896 917 897 918 return 0; 898 919 }
+1
drivers/target/iscsi/iscsi_target_tpg.h
··· 48 48 extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32); 49 49 extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32); 50 50 extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32); 51 + extern int iscsit_ta_login_keys_workaround(struct iscsi_portal_group *, u32); 51 52 52 53 #endif /* ISCSI_TARGET_TPG_H */
+13 -47
drivers/target/iscsi/iscsi_target_util.c
··· 167 167 168 168 cmd->se_cmd.map_tag = tag; 169 169 cmd->conn = conn; 170 + cmd->data_direction = DMA_NONE; 170 171 INIT_LIST_HEAD(&cmd->i_conn_node); 171 172 INIT_LIST_HEAD(&cmd->datain_list); 172 173 INIT_LIST_HEAD(&cmd->cmd_r2t_list); ··· 712 711 } 713 712 EXPORT_SYMBOL(iscsit_release_cmd); 714 713 715 - void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, 716 - bool check_queues) 714 + void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues) 717 715 { 718 716 struct iscsi_conn *conn = cmd->conn; 719 717 720 - if (scsi_cmd) { 721 - if (cmd->data_direction == DMA_TO_DEVICE) { 722 - iscsit_stop_dataout_timer(cmd); 723 - iscsit_free_r2ts_from_list(cmd); 724 - } 725 - if (cmd->data_direction == DMA_FROM_DEVICE) 726 - iscsit_free_all_datain_reqs(cmd); 718 + if (cmd->data_direction == DMA_TO_DEVICE) { 719 + iscsit_stop_dataout_timer(cmd); 720 + iscsit_free_r2ts_from_list(cmd); 727 721 } 722 + if (cmd->data_direction == DMA_FROM_DEVICE) 723 + iscsit_free_all_datain_reqs(cmd); 728 724 729 725 if (conn && check_queues) { 730 726 iscsit_remove_cmd_from_immediate_queue(cmd, conn); ··· 734 736 735 737 void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) 736 738 { 737 - struct se_cmd *se_cmd = NULL; 739 + struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL; 738 740 int rc; 739 - bool op_scsi = false; 740 - /* 741 - * Determine if a struct se_cmd is associated with 742 - * this struct iscsi_cmd. 743 - */ 744 - switch (cmd->iscsi_opcode) { 745 - case ISCSI_OP_SCSI_CMD: 746 - op_scsi = true; 747 - /* 748 - * Fallthrough 749 - */ 750 - case ISCSI_OP_SCSI_TMFUNC: 751 - se_cmd = &cmd->se_cmd; 752 - __iscsit_free_cmd(cmd, op_scsi, shutdown); 741 + 742 + __iscsit_free_cmd(cmd, shutdown); 743 + if (se_cmd) { 753 744 rc = transport_generic_free_cmd(se_cmd, shutdown); 754 745 if (!rc && shutdown && se_cmd->se_sess) { 755 - __iscsit_free_cmd(cmd, op_scsi, shutdown); 746 + __iscsit_free_cmd(cmd, shutdown); 756 747 target_put_sess_cmd(se_cmd); 757 748 } 758 - break; 759 - case ISCSI_OP_REJECT: 760 - /* 761 - * Handle special case for REJECT when iscsi_add_reject*() has 762 - * overwritten the original iscsi_opcode assignment, and the 763 - * associated cmd->se_cmd needs to be released. 764 - */ 765 - if (cmd->se_cmd.se_tfo != NULL) { 766 - se_cmd = &cmd->se_cmd; 767 - __iscsit_free_cmd(cmd, true, shutdown); 768 - 769 - rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); 770 - if (!rc && shutdown && se_cmd->se_sess) { 771 - __iscsit_free_cmd(cmd, true, shutdown); 772 - target_put_sess_cmd(se_cmd); 773 - } 774 - break; 775 - } 776 - /* Fall-through */ 777 - default: 778 - __iscsit_free_cmd(cmd, false, shutdown); 749 + } else { 779 750 iscsit_release_cmd(cmd); 780 - break; 781 751 } 782 752 } 783 753 EXPORT_SYMBOL(iscsit_free_cmd);
+1 -1
drivers/target/iscsi/iscsi_target_util.h
··· 37 37 extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); 38 38 extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); 39 39 extern void iscsit_release_cmd(struct iscsi_cmd *); 40 - extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool); 40 + extern void __iscsit_free_cmd(struct iscsi_cmd *, bool); 41 41 extern void iscsit_free_cmd(struct iscsi_cmd *, bool); 42 42 extern int iscsit_check_session_usage_count(struct iscsi_session *); 43 43 extern void iscsit_dec_session_usage_count(struct iscsi_session *);
+18 -61
drivers/target/loopback/tcm_loop.c
··· 51 51 */ 52 52 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd) 53 53 { 54 - /* 55 - * Do not release struct se_cmd's containing a valid TMR 56 - * pointer. These will be released directly in tcm_loop_device_reset() 57 - * with transport_generic_free_cmd(). 58 - */ 59 - if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 60 - return 0; 61 - /* 62 - * Release the struct se_cmd, which will make a callback to release 63 - * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() 64 - */ 65 - transport_generic_free_cmd(se_cmd, 0); 66 - return 1; 54 + return transport_generic_free_cmd(se_cmd, 0); 67 55 } 68 56 69 57 static void tcm_loop_release_cmd(struct se_cmd *se_cmd) ··· 206 218 { 207 219 struct se_cmd *se_cmd = NULL; 208 220 struct se_session *se_sess; 209 - struct se_portal_group *se_tpg; 210 221 struct tcm_loop_nexus *tl_nexus; 211 222 struct tcm_loop_cmd *tl_cmd = NULL; 212 - struct tcm_loop_tmr *tl_tmr = NULL; 213 223 int ret = TMR_FUNCTION_FAILED, rc; 214 224 215 225 /* ··· 226 240 return ret; 227 241 } 228 242 229 - tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); 230 - if (!tl_tmr) { 231 - pr_err("Unable to allocate memory for tl_tmr\n"); 232 - goto release; 233 - } 234 - init_waitqueue_head(&tl_tmr->tl_tmr_wait); 243 + init_completion(&tl_cmd->tmr_done); 235 244 236 245 se_cmd = &tl_cmd->tl_se_cmd; 237 - se_tpg = &tl_tpg->tl_se_tpg; 238 246 se_sess = tl_tpg->tl_nexus->se_sess; 239 - /* 240 - * Initialize struct se_cmd descriptor from target_core_mod infrastructure 241 - */ 242 - transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, 243 - DMA_NONE, TCM_SIMPLE_TAG, 244 - &tl_cmd->tl_sense_buf[0]); 245 247 246 - rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL); 248 + rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun, 249 + NULL, tmr, GFP_KERNEL, task, 250 + TARGET_SCF_ACK_KREF); 247 251 if (rc < 0) 248 252 goto release; 249 - 250 - if (tmr == TMR_ABORT_TASK) 251 - se_cmd->se_tmr_req->ref_task_tag = task; 252 - 253 - /* 254 - * Locate the underlying TCM struct se_lun 255 - */ 256 - if (transport_lookup_tmr_lun(se_cmd, lun) < 0) { 257 - ret = TMR_LUN_DOES_NOT_EXIST; 258 - goto release; 259 - } 260 - /* 261 - * Queue the TMR to TCM Core and sleep waiting for 262 - * tcm_loop_queue_tm_rsp() to wake us up. 263 - */ 264 - transport_generic_handle_tmr(se_cmd); 265 - wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); 266 - /* 267 - * The TMR LUN_RESET has completed, check the response status and 268 - * then release allocations. 269 - */ 253 + wait_for_completion(&tl_cmd->tmr_done); 270 254 ret = se_cmd->se_tmr_req->response; 255 + target_put_sess_cmd(se_cmd); 256 + 257 + out: 258 + return ret; 259 + 271 260 release: 272 261 if (se_cmd) 273 - transport_generic_free_cmd(se_cmd, 1); 262 + transport_generic_free_cmd(se_cmd, 0); 274 263 else 275 264 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 276 - kfree(tl_tmr); 277 - return ret; 265 + goto out; 278 266 } 279 267 280 268 static int tcm_loop_abort_task(struct scsi_cmnd *sc) ··· 629 669 630 670 static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) 631 671 { 632 - struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 633 - struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr; 634 - /* 635 - * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead 636 - * and wake up the wait_queue_head_t in tcm_loop_device_reset() 637 - */ 638 - atomic_set(&tl_tmr->tmr_complete, 1); 639 - wake_up(&tl_tmr->tl_tmr_wait); 672 + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 673 + struct tcm_loop_cmd, tl_se_cmd); 674 + 675 + /* Wake up tcm_loop_issue_tmr(). */ 676 + complete(&tl_cmd->tmr_done); 640 677 } 641 678 642 679 static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
+1 -5
drivers/target/loopback/tcm_loop.h
··· 16 16 /* The TCM I/O descriptor that is accessed via container_of() */ 17 17 struct se_cmd tl_se_cmd; 18 18 struct work_struct work; 19 + struct completion tmr_done; 19 20 /* Sense buffer that will be mapped into outgoing status */ 20 21 unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER]; 21 - }; 22 - 23 - struct tcm_loop_tmr { 24 - atomic_t tmr_complete; 25 - wait_queue_head_t tl_tmr_wait; 26 22 }; 27 23 28 24 struct tcm_loop_nexus {
+4 -4
drivers/target/target_core_alua.c
··· 205 205 /* 206 206 * TARGET PORT GROUP 207 207 */ 208 - buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff); 209 - buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff); 208 + put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]); 209 + off += 2; 210 210 211 211 off++; /* Skip over Reserved */ 212 212 /* ··· 235 235 /* 236 236 * Set RELATIVE TARGET PORT IDENTIFIER 237 237 */ 238 - buf[off++] = ((lun->lun_rtpi >> 8) & 0xff); 239 - buf[off++] = (lun->lun_rtpi & 0xff); 238 + put_unaligned_be16(lun->lun_rtpi, &buf[off]); 239 + off += 2; 240 240 rd_len += 4; 241 241 } 242 242 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+29 -1
drivers/target/target_core_configfs.c
··· 1085 1085 return count; 1086 1086 } 1087 1087 1088 + static ssize_t alua_support_show(struct config_item *item, char *page) 1089 + { 1090 + struct se_dev_attrib *da = to_attrib(item); 1091 + u8 flags = da->da_dev->transport->transport_flags; 1092 + 1093 + return snprintf(page, PAGE_SIZE, "%d\n", 1094 + flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1); 1095 + } 1096 + 1097 + static ssize_t pgr_support_show(struct config_item *item, char *page) 1098 + { 1099 + struct se_dev_attrib *da = to_attrib(item); 1100 + u8 flags = da->da_dev->transport->transport_flags; 1101 + 1102 + return snprintf(page, PAGE_SIZE, "%d\n", 1103 + flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1); 1104 + } 1105 + 1088 1106 CONFIGFS_ATTR(, emulate_model_alias); 1089 1107 CONFIGFS_ATTR(, emulate_dpo); 1090 1108 CONFIGFS_ATTR(, emulate_fua_write); ··· 1134 1116 CONFIGFS_ATTR(, unmap_granularity_alignment); 1135 1117 CONFIGFS_ATTR(, unmap_zeroes_data); 1136 1118 CONFIGFS_ATTR(, max_write_same_len); 1119 + CONFIGFS_ATTR_RO(, alua_support); 1120 + CONFIGFS_ATTR_RO(, pgr_support); 1137 1121 1138 1122 /* 1139 1123 * dev_attrib attributes for devices using the target core SBC/SPC ··· 1174 1154 &attr_unmap_granularity_alignment, 1175 1155 &attr_unmap_zeroes_data, 1176 1156 &attr_max_write_same_len, 1157 + &attr_alua_support, 1158 + &attr_pgr_support, 1177 1159 NULL, 1178 1160 }; 1179 1161 EXPORT_SYMBOL(sbc_attrib_attrs); ··· 1190 1168 &attr_hw_block_size, 1191 1169 &attr_hw_max_sectors, 1192 1170 &attr_hw_queue_depth, 1171 + &attr_alua_support, 1172 + &attr_pgr_support, 1193 1173 NULL, 1194 1174 }; 1195 1175 EXPORT_SYMBOL(passthrough_attrib_attrs); ··· 2260 2236 target_free_device(dev); 2261 2237 } 2262 2238 2263 - static struct configfs_item_operations target_core_dev_item_ops = { 2239 + /* 2240 + * Used in target_core_fabric_configfs.c to verify valid se_device symlink 2241 + * within target_fabric_port_link() 2242 + */ 2243 + struct configfs_item_operations target_core_dev_item_ops = { 2264 2244 .release = target_core_dev_release, 2265 2245 }; 2266 2246
+118 -27
drivers/target/target_core_device.c
··· 49 49 #include "target_core_pr.h" 50 50 #include "target_core_ua.h" 51 51 52 - DEFINE_MUTEX(g_device_mutex); 53 - LIST_HEAD(g_device_list); 52 + static DEFINE_MUTEX(device_mutex); 53 + static LIST_HEAD(device_list); 54 + static DEFINE_IDR(devices_idr); 54 55 55 56 static struct se_hba *lun0_hba; 56 57 /* not static, needed by tpg.c */ ··· 169 168 rcu_read_lock(); 170 169 deve = target_nacl_find_deve(nacl, unpacked_lun); 171 170 if (deve) { 172 - se_cmd->se_lun = rcu_dereference(deve->se_lun); 173 171 se_lun = rcu_dereference(deve->se_lun); 172 + 173 + if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 174 + se_lun = NULL; 175 + goto out_unlock; 176 + } 177 + 178 + se_cmd->se_lun = rcu_dereference(deve->se_lun); 174 179 se_cmd->pr_res_key = deve->pr_res_key; 175 180 se_cmd->orig_fe_lun = unpacked_lun; 181 + se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 182 + se_cmd->lun_ref_active = true; 176 183 } 184 + out_unlock: 177 185 rcu_read_unlock(); 178 186 179 187 if (!se_lun) { ··· 192 182 unpacked_lun); 193 183 return -ENODEV; 194 184 } 195 - /* 196 - * XXX: Add percpu se_lun->lun_ref reference count for TMR 197 - */ 198 185 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 199 186 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); 200 187 ··· 763 756 if (!dev) 764 757 return NULL; 765 758 766 - dev->dev_link_magic = SE_DEV_LINK_MAGIC; 767 759 dev->se_hba = hba; 768 760 dev->transport = hba->backend->ops; 769 761 dev->prot_length = sizeof(struct t10_pi_tuple); 770 762 dev->hba_index = hba->hba_index; 771 763 772 - INIT_LIST_HEAD(&dev->dev_list); 773 764 INIT_LIST_HEAD(&dev->dev_sep_list); 774 765 INIT_LIST_HEAD(&dev->dev_tmr_list); 775 766 INIT_LIST_HEAD(&dev->delayed_cmd_list); 776 767 INIT_LIST_HEAD(&dev->state_list); 777 768 INIT_LIST_HEAD(&dev->qf_cmd_list); 778 - INIT_LIST_HEAD(&dev->g_dev_node); 779 769 spin_lock_init(&dev->execute_task_lock); 780 770 spin_lock_init(&dev->delayed_cmd_lock); 781 771 spin_lock_init(&dev->dev_reservation_lock); ··· 855 851 attrib->unmap_granularity = q->limits.discard_granularity / block_size; 856 852 attrib->unmap_granularity_alignment = q->limits.discard_alignment / 857 853 block_size; 858 - attrib->unmap_zeroes_data = 0; 854 + attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors); 859 855 return true; 860 856 } 861 857 EXPORT_SYMBOL(target_configure_unmap_from_queue); ··· 879 875 } 880 876 EXPORT_SYMBOL(target_to_linux_sector); 881 877 878 + /** 879 + * target_find_device - find a se_device by its dev_index 880 + * @id: dev_index 881 + * @do_depend: true if caller needs target_depend_item to be done 882 + * 883 + * If do_depend is true, the caller must do a target_undepend_item 884 + * when finished using the device. 885 + * 886 + * If do_depend is false, the caller must be called in a configfs 887 + * callback or during removal. 888 + */ 889 + struct se_device *target_find_device(int id, bool do_depend) 890 + { 891 + struct se_device *dev; 892 + 893 + mutex_lock(&device_mutex); 894 + dev = idr_find(&devices_idr, id); 895 + if (dev && do_depend && target_depend_item(&dev->dev_group.cg_item)) 896 + dev = NULL; 897 + mutex_unlock(&device_mutex); 898 + return dev; 899 + } 900 + EXPORT_SYMBOL(target_find_device); 901 + 902 + struct devices_idr_iter { 903 + int (*fn)(struct se_device *dev, void *data); 904 + void *data; 905 + }; 906 + 907 + static int target_devices_idr_iter(int id, void *p, void *data) 908 + { 909 + struct devices_idr_iter *iter = data; 910 + struct se_device *dev = p; 911 + 912 + /* 913 + * We add the device early to the idr, so it can be used 914 + * by backend modules during configuration. We do not want 915 + * to allow other callers to access partially setup devices, 916 + * so we skip them here. 917 + */ 918 + if (!(dev->dev_flags & DF_CONFIGURED)) 919 + return 0; 920 + 921 + return iter->fn(dev, iter->data); 922 + } 923 + 924 + /** 925 + * target_for_each_device - iterate over configured devices 926 + * @fn: iterator function 927 + * @data: pointer to data that will be passed to fn 928 + * 929 + * fn must return 0 to continue looping over devices. non-zero will break 930 + * from the loop and return that value to the caller. 931 + */ 932 + int target_for_each_device(int (*fn)(struct se_device *dev, void *data), 933 + void *data) 934 + { 935 + struct devices_idr_iter iter; 936 + int ret; 937 + 938 + iter.fn = fn; 939 + iter.data = data; 940 + 941 + mutex_lock(&device_mutex); 942 + ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); 943 + mutex_unlock(&device_mutex); 944 + return ret; 945 + } 946 + 882 947 int target_configure_device(struct se_device *dev) 883 948 { 884 949 struct se_hba *hba = dev->se_hba; 885 - int ret; 950 + int ret, id; 886 951 887 952 if (dev->dev_flags & DF_CONFIGURED) { 888 953 pr_err("se_dev->se_dev_ptr already set for storage" ··· 959 886 return -EEXIST; 960 887 } 961 888 889 + /* 890 + * Add early so modules like tcmu can use during its 891 + * configuration. 892 + */ 893 + mutex_lock(&device_mutex); 894 + /* 895 + * Use cyclic to try and avoid collisions with devices 896 + * that were recently removed. 897 + */ 898 + id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL); 899 + mutex_unlock(&device_mutex); 900 + if (id < 0) { 901 + ret = -ENOMEM; 902 + goto out; 903 + } 904 + dev->dev_index = id; 905 + 962 906 ret = dev->transport->configure_device(dev); 963 907 if (ret) 964 - goto out; 908 + goto out_free_index; 965 909 /* 966 910 * XXX: there is not much point to have two different values here.. 967 911 */ ··· 993 903 dev->dev_attrib.hw_block_size); 994 904 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 995 905 996 - dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); 997 906 dev->creation_time = get_jiffies_64(); 998 907 999 908 ret = core_setup_alua(dev); 1000 909 if (ret) 1001 - goto out; 910 + goto out_free_index; 1002 911 1003 912 /* 1004 913 * Startup the struct se_device processing thread ··· 1035 946 hba->dev_count++; 1036 947 spin_unlock(&hba->device_lock); 1037 948 1038 - mutex_lock(&g_device_mutex); 1039 - list_add_tail(&dev->g_dev_node, &g_device_list); 1040 - mutex_unlock(&g_device_mutex); 1041 - 1042 949 dev->dev_flags |= DF_CONFIGURED; 1043 950 1044 951 return 0; 1045 952 1046 953 out_free_alua: 1047 954 core_alua_free_lu_gp_mem(dev); 955 + out_free_index: 956 + mutex_lock(&device_mutex); 957 + idr_remove(&devices_idr, dev->dev_index); 958 + mutex_unlock(&device_mutex); 1048 959 out: 1049 960 se_release_vpd_for_dev(dev); 1050 961 return ret; ··· 1059 970 if (dev->dev_flags & DF_CONFIGURED) { 1060 971 destroy_workqueue(dev->tmr_wq); 1061 972 1062 - mutex_lock(&g_device_mutex); 1063 - list_del(&dev->g_dev_node); 1064 - mutex_unlock(&g_device_mutex); 973 + dev->transport->destroy_device(dev); 974 + 975 + mutex_lock(&device_mutex); 976 + idr_remove(&devices_idr, dev->dev_index); 977 + mutex_unlock(&device_mutex); 1065 978 1066 979 spin_lock(&hba->device_lock); 1067 980 hba->dev_count--; ··· 1178 1087 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1179 1088 if (cdb[0] == PERSISTENT_RESERVE_IN) { 1180 1089 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1181 - size = (cdb[7] << 8) + cdb[8]; 1090 + size = get_unaligned_be16(&cdb[7]); 1182 1091 return target_cmd_size_check(cmd, size); 1183 1092 } 1184 1093 if (cdb[0] == PERSISTENT_RESERVE_OUT) { 1185 1094 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1186 - size = (cdb[7] << 8) + cdb[8]; 1095 + size = get_unaligned_be32(&cdb[5]); 1187 1096 return target_cmd_size_check(cmd, size); 1188 1097 } 1189 1098 1190 1099 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) { 1191 1100 cmd->execute_cmd = target_scsi2_reservation_release; 1192 1101 if (cdb[0] == RELEASE_10) 1193 - size = (cdb[7] << 8) | cdb[8]; 1102 + size = get_unaligned_be16(&cdb[7]); 1194 1103 else 1195 1104 size = cmd->data_length; 1196 1105 return target_cmd_size_check(cmd, size); ··· 1198 1107 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) { 1199 1108 cmd->execute_cmd = target_scsi2_reservation_reserve; 1200 1109 if (cdb[0] == RESERVE_10) 1201 - size = (cdb[7] << 8) | cdb[8]; 1110 + size = get_unaligned_be16(&cdb[7]); 1202 1111 else 1203 1112 size = cmd->data_length; 1204 1113 return target_cmd_size_check(cmd, size); ··· 1217 1126 case WRITE_16: 1218 1127 case WRITE_VERIFY: 1219 1128 case WRITE_VERIFY_12: 1220 - case 0x8e: /* WRITE_VERIFY_16 */ 1129 + case WRITE_VERIFY_16: 1221 1130 case COMPARE_AND_WRITE: 1222 1131 case XDWRITEREAD_10: 1223 1132 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; ··· 1226 1135 switch (get_unaligned_be16(&cdb[8])) { 1227 1136 case READ_32: 1228 1137 case WRITE_32: 1229 - case 0x0c: /* WRITE_VERIFY_32 */ 1138 + case WRITE_VERIFY_32: 1230 1139 case XDWRITEREAD_32: 1231 1140 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1232 1141 break;
+15 -10
drivers/target/target_core_fabric_configfs.c
··· 65 65 pr_debug("Setup generic %s\n", __stringify(_name)); \ 66 66 } 67 67 68 + static struct configfs_item_operations target_fabric_port_item_ops; 69 + 68 70 /* Start of tfc_tpg_mappedlun_cit */ 69 71 70 72 static int target_fabric_mappedlun_link( ··· 74 72 struct config_item *lun_ci) 75 73 { 76 74 struct se_dev_entry *deve; 77 - struct se_lun *lun = container_of(to_config_group(lun_ci), 78 - struct se_lun, lun_group); 75 + struct se_lun *lun; 79 76 struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), 80 77 struct se_lun_acl, se_lun_group); 81 78 struct se_portal_group *se_tpg; 82 79 struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s; 83 80 bool lun_access_ro; 84 81 85 - if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) { 86 - pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:" 87 - " %p to struct lun: %p\n", lun_ci, lun); 82 + if (!lun_ci->ci_type || 83 + lun_ci->ci_type->ct_item_ops != &target_fabric_port_item_ops) { 84 + pr_err("Bad lun_ci, not a valid lun_ci pointer: %p\n", lun_ci); 88 85 return -EFAULT; 89 86 } 87 + lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); 88 + 90 89 /* 91 90 * Ensure that the source port exists 92 91 */ ··· 623 620 NULL, 624 621 }; 625 622 623 + extern struct configfs_item_operations target_core_dev_item_ops; 624 + 626 625 static int target_fabric_port_link( 627 626 struct config_item *lun_ci, 628 627 struct config_item *se_dev_ci) ··· 633 628 struct se_lun *lun = container_of(to_config_group(lun_ci), 634 629 struct se_lun, lun_group); 635 630 struct se_portal_group *se_tpg; 636 - struct se_device *dev = 637 - container_of(to_config_group(se_dev_ci), struct se_device, dev_group); 631 + struct se_device *dev; 638 632 struct target_fabric_configfs *tf; 639 633 int ret; 640 634 641 - if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) { 642 - pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:" 643 - " %p to struct se_device: %p\n", se_dev_ci, dev); 635 + if (!se_dev_ci->ci_type || 636 + se_dev_ci->ci_type->ct_item_ops != &target_core_dev_item_ops) { 637 + pr_err("Bad se_dev_ci, not a valid se_dev_ci pointer: %p\n", se_dev_ci); 644 638 return -EFAULT; 645 639 } 640 + dev = container_of(to_config_group(se_dev_ci), struct se_device, dev_group); 646 641 647 642 if (!(dev->dev_flags & DF_CONFIGURED)) { 648 643 pr_err("se_device not configured yet, cannot port link\n");
+3 -3
drivers/target/target_core_fabric_lib.c
··· 34 34 #include <linux/ctype.h> 35 35 #include <linux/spinlock.h> 36 36 #include <linux/export.h> 37 + #include <asm/unaligned.h> 37 38 38 39 #include <scsi/scsi_proto.h> 39 40 ··· 217 216 if (padding != 0) 218 217 len += padding; 219 218 220 - buf[2] = ((len >> 8) & 0xff); 221 - buf[3] = (len & 0xff); 219 + put_unaligned_be16(len, &buf[2]); 222 220 /* 223 221 * Increment value for total payload + header length for 224 222 * full status descriptor ··· 306 306 */ 307 307 if (out_tid_len) { 308 308 /* The shift works thanks to integer promotion rules */ 309 - add_len = (buf[2] << 8) | buf[3]; 309 + add_len = get_unaligned_be16(&buf[2]); 310 310 311 311 tid_len = strlen(&buf[4]); 312 312 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
+6 -1
drivers/target/target_core_file.c
··· 237 237 238 238 static void fd_free_device(struct se_device *dev) 239 239 { 240 + call_rcu(&dev->rcu_head, fd_dev_call_rcu); 241 + } 242 + 243 + static void fd_destroy_device(struct se_device *dev) 244 + { 240 245 struct fd_dev *fd_dev = FD_DEV(dev); 241 246 242 247 if (fd_dev->fd_file) { 243 248 filp_close(fd_dev->fd_file, NULL); 244 249 fd_dev->fd_file = NULL; 245 250 } 246 - call_rcu(&dev->rcu_head, fd_dev_call_rcu); 247 251 } 248 252 249 253 static int fd_do_rw(struct se_cmd *cmd, struct file *fd, ··· 830 826 .detach_hba = fd_detach_hba, 831 827 .alloc_device = fd_alloc_device, 832 828 .configure_device = fd_configure_device, 829 + .destroy_device = fd_destroy_device, 833 830 .free_device = fd_free_device, 834 831 .parse_cdb = fd_parse_cdb, 835 832 .set_configfs_dev_params = fd_set_configfs_dev_params,
+33 -19
drivers/target/target_core_iblock.c
··· 86 86 struct block_device *bd = NULL; 87 87 struct blk_integrity *bi; 88 88 fmode_t mode; 89 + unsigned int max_write_zeroes_sectors; 89 90 int ret = -ENOMEM; 90 91 91 92 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { ··· 130 129 * Enable write same emulation for IBLOCK and use 0xFFFF as 131 130 * the smaller WRITE_SAME(10) only has a two-byte block count. 132 131 */ 133 - dev->dev_attrib.max_write_same_len = 0xFFFF; 132 + max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd); 133 + if (max_write_zeroes_sectors) 134 + dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors; 135 + else 136 + dev->dev_attrib.max_write_same_len = 0xFFFF; 134 137 135 138 if (blk_queue_nonrot(q)) 136 139 dev->dev_attrib.is_nonrot = 1; ··· 190 185 191 186 static void iblock_free_device(struct se_device *dev) 192 187 { 188 + call_rcu(&dev->rcu_head, iblock_dev_call_rcu); 189 + } 190 + 191 + static void iblock_destroy_device(struct se_device *dev) 192 + { 193 193 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 194 194 195 195 if (ib_dev->ibd_bd != NULL) 196 196 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 197 197 if (ib_dev->ibd_bio_set != NULL) 198 198 bioset_free(ib_dev->ibd_bio_set); 199 - 200 - call_rcu(&dev->rcu_head, iblock_dev_call_rcu); 201 199 } 202 200 203 201 static unsigned long long iblock_emulate_read_cap_with_block_size( ··· 423 415 } 424 416 425 417 static sense_reason_t 426 - iblock_execute_write_same_direct(struct block_device *bdev, struct se_cmd *cmd) 418 + iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd) 427 419 { 428 420 struct se_device *dev = cmd->se_dev; 429 421 struct scatterlist *sg = &cmd->t_data_sg[0]; 430 - struct page *page = NULL; 431 - int ret; 422 + unsigned char *buf, zero = 0x00, *p = &zero; 423 + int rc, ret; 432 424 433 - if (sg->offset) { 434 - page = alloc_page(GFP_KERNEL); 435 - if (!page) 436 - return TCM_OUT_OF_RESOURCES; 437 - sg_copy_to_buffer(sg, cmd->t_data_nents, page_address(page), 438 - dev->dev_attrib.block_size); 439 - } 425 + buf = kmap(sg_page(sg)) + sg->offset; 426 + if (!buf) 427 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 428 + /* 429 + * Fall back to block_execute_write_same() slow-path if 430 + * incoming WRITE_SAME payload does not contain zeros. 431 + */ 432 + rc = memcmp(buf, p, cmd->data_length); 433 + kunmap(sg_page(sg)); 440 434 441 - ret = blkdev_issue_write_same(bdev, 435 + if (rc) 436 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 437 + 438 + ret = blkdev_issue_zeroout(bdev, 442 439 target_to_linux_sector(dev, cmd->t_task_lba), 443 440 target_to_linux_sector(dev, 444 441 sbc_get_write_same_sectors(cmd)), 445 - GFP_KERNEL, page ? page : sg_page(sg)); 446 - if (page) 447 - __free_page(page); 442 + GFP_KERNEL, false); 448 443 if (ret) 449 444 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 450 445 ··· 483 472 return TCM_INVALID_CDB_FIELD; 484 473 } 485 474 486 - if (bdev_write_same(bdev)) 487 - return iblock_execute_write_same_direct(bdev, cmd); 475 + if (bdev_write_zeroes_sectors(bdev)) { 476 + if (!iblock_execute_zero_out(bdev, cmd)) 477 + return 0; 478 + } 488 479 489 480 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 490 481 if (!ibr) ··· 861 848 .detach_hba = iblock_detach_hba, 862 849 .alloc_device = iblock_alloc_device, 863 850 .configure_device = iblock_configure_device, 851 + .destroy_device = iblock_destroy_device, 864 852 .free_device = iblock_free_device, 865 853 .parse_cdb = iblock_parse_cdb, 866 854 .set_configfs_dev_params = iblock_set_configfs_dev_params,
+2 -3
drivers/target/target_core_internal.h
··· 56 56 extern struct t10_alua_lu_gp *default_lu_gp; 57 57 58 58 /* target_core_device.c */ 59 - extern struct mutex g_device_mutex; 60 - extern struct list_head g_device_list; 61 - 62 59 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev); 63 60 struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); 64 61 void target_pr_kref_release(struct kref *); ··· 84 87 struct se_device *target_alloc_device(struct se_hba *hba, const char *name); 85 88 int target_configure_device(struct se_device *dev); 86 89 void target_free_device(struct se_device *); 90 + int target_for_each_device(int (*fn)(struct se_device *dev, void *data), 91 + void *data); 87 92 88 93 /* target_core_configfs.c */ 89 94 void target_setup_backend_cits(struct target_backend *);
+25 -84
drivers/target/target_core_pr.c
··· 1562 1562 * first extract TransportID Parameter Data Length, and make sure 1563 1563 * the value matches up to the SCSI expected data transfer length. 1564 1564 */ 1565 - tpdl = (buf[24] & 0xff) << 24; 1566 - tpdl |= (buf[25] & 0xff) << 16; 1567 - tpdl |= (buf[26] & 0xff) << 8; 1568 - tpdl |= buf[27] & 0xff; 1565 + tpdl = get_unaligned_be32(&buf[24]); 1569 1566 1570 1567 if ((tpdl + 28) != cmd->data_length) { 1571 1568 pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header" ··· 3218 3221 goto out_put_pr_reg; 3219 3222 } 3220 3223 3221 - rtpi = (buf[18] & 0xff) << 8; 3222 - rtpi |= buf[19] & 0xff; 3223 - tid_len = (buf[20] & 0xff) << 24; 3224 - tid_len |= (buf[21] & 0xff) << 16; 3225 - tid_len |= (buf[22] & 0xff) << 8; 3226 - tid_len |= buf[23] & 0xff; 3224 + rtpi = get_unaligned_be16(&buf[18]); 3225 + tid_len = get_unaligned_be32(&buf[20]); 3227 3226 transport_kunmap_data_sg(cmd); 3228 3227 buf = NULL; 3229 3228 ··· 3545 3552 return ret; 3546 3553 } 3547 3554 3548 - static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) 3549 - { 3550 - unsigned int __v1, __v2; 3551 - 3552 - __v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3]; 3553 - __v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7]; 3554 - 3555 - return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 3556 - } 3557 - 3558 3555 /* 3559 3556 * See spc4r17 section 6.14 Table 170 3560 3557 */ ··· 3585 3602 if (cmd->data_length < 24) { 3586 3603 pr_warn("SPC-PR: Received PR OUT parameter list" 3587 3604 " length too small: %u\n", cmd->data_length); 3588 - return TCM_INVALID_PARAMETER_LIST; 3605 + return TCM_PARAMETER_LIST_LENGTH_ERROR; 3589 3606 } 3590 3607 3591 3608 /* ··· 3602 3619 /* 3603 3620 * From PERSISTENT_RESERVE_OUT parameter list (payload) 3604 3621 */ 3605 - res_key = core_scsi3_extract_reservation_key(&buf[0]); 3606 - sa_res_key = core_scsi3_extract_reservation_key(&buf[8]); 3622 + res_key = get_unaligned_be64(&buf[0]); 3623 + sa_res_key = get_unaligned_be64(&buf[8]); 3607 3624 /* 3608 3625 * REGISTER_AND_MOVE uses a different SA parameter list containing 3609 3626 * SCSI TransportIDs. ··· 3629 3646 /* 3630 3647 * SPEC_I_PT=1 is only valid for Service action: REGISTER 3631 3648 */ 3632 - if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) 3649 + if (spec_i_pt && (sa != PRO_REGISTER)) 3633 3650 return TCM_INVALID_PARAMETER_LIST; 3634 3651 3635 3652 /* ··· 3641 3658 * the sense key set to ILLEGAL REQUEST, and the additional sense 3642 3659 * code set to PARAMETER LIST LENGTH ERROR. 3643 3660 */ 3644 - if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) && 3661 + if (!spec_i_pt && (sa != PRO_REGISTER_AND_MOVE) && 3645 3662 (cmd->data_length != 24)) { 3646 3663 pr_warn("SPC-PR: Received PR OUT illegal parameter" 3647 3664 " list length: %u\n", cmd->data_length); 3648 - return TCM_INVALID_PARAMETER_LIST; 3665 + return TCM_PARAMETER_LIST_LENGTH_ERROR; 3649 3666 } 3650 3667 3651 3668 /* ··· 3685 3702 break; 3686 3703 default: 3687 3704 pr_err("Unknown PERSISTENT_RESERVE_OUT service" 3688 - " action: 0x%02x\n", cdb[1] & 0x1f); 3705 + " action: 0x%02x\n", sa); 3689 3706 return TCM_INVALID_CDB_FIELD; 3690 3707 } 3691 3708 ··· 3717 3734 if (!buf) 3718 3735 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3719 3736 3720 - buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff); 3721 - buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff); 3722 - buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff); 3723 - buf[3] = (dev->t10_pr.pr_generation & 0xff); 3737 + put_unaligned_be32(dev->t10_pr.pr_generation, buf); 3724 3738 3725 3739 spin_lock(&dev->t10_pr.registration_lock); 3726 3740 list_for_each_entry(pr_reg, &dev->t10_pr.registration_list, ··· 3729 3749 if ((add_len + 8) > (cmd->data_length - 8)) 3730 3750 break; 3731 3751 3732 - buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff); 3733 - buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff); 3734 - buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff); 3735 - buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff); 3736 - buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff); 3737 - buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff); 3738 - buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff); 3739 - buf[off++] = (pr_reg->pr_res_key & 0xff); 3740 - 3752 + put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); 3753 + off += 8; 3741 3754 add_len += 8; 3742 3755 } 3743 3756 spin_unlock(&dev->t10_pr.registration_lock); 3744 3757 3745 - buf[4] = ((add_len >> 24) & 0xff); 3746 - buf[5] = ((add_len >> 16) & 0xff); 3747 - buf[6] = ((add_len >> 8) & 0xff); 3748 - buf[7] = (add_len & 0xff); 3758 + put_unaligned_be32(add_len, &buf[4]); 3749 3759 3750 3760 transport_kunmap_data_sg(cmd); 3751 3761 ··· 3766 3796 if (!buf) 3767 3797 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3768 3798 3769 - buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff); 3770 - buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff); 3771 - buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff); 3772 - buf[3] = (dev->t10_pr.pr_generation & 0xff); 3799 + put_unaligned_be32(dev->t10_pr.pr_generation, &buf[0]); 3773 3800 3774 3801 spin_lock(&dev->dev_reservation_lock); 3775 3802 pr_reg = dev->dev_pr_res_holder; ··· 3774 3807 /* 3775 3808 * Set the hardcoded Additional Length 3776 3809 */ 3777 - buf[4] = ((add_len >> 24) & 0xff); 3778 - buf[5] = ((add_len >> 16) & 0xff); 3779 - buf[6] = ((add_len >> 8) & 0xff); 3780 - buf[7] = (add_len & 0xff); 3810 + put_unaligned_be32(add_len, &buf[4]); 3781 3811 3782 3812 if (cmd->data_length < 22) 3783 3813 goto err; ··· 3801 3837 else 3802 3838 pr_res_key = pr_reg->pr_res_key; 3803 3839 3804 - buf[8] = ((pr_res_key >> 56) & 0xff); 3805 - buf[9] = ((pr_res_key >> 48) & 0xff); 3806 - buf[10] = ((pr_res_key >> 40) & 0xff); 3807 - buf[11] = ((pr_res_key >> 32) & 0xff); 3808 - buf[12] = ((pr_res_key >> 24) & 0xff); 3809 - buf[13] = ((pr_res_key >> 16) & 0xff); 3810 - buf[14] = ((pr_res_key >> 8) & 0xff); 3811 - buf[15] = (pr_res_key & 0xff); 3840 + put_unaligned_be64(pr_res_key, &buf[8]); 3812 3841 /* 3813 3842 * Set the SCOPE and TYPE 3814 3843 */ ··· 3839 3882 if (!buf) 3840 3883 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3841 3884 3842 - buf[0] = ((add_len >> 8) & 0xff); 3843 - buf[1] = (add_len & 0xff); 3885 + put_unaligned_be16(add_len, &buf[0]); 3844 3886 buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ 3845 3887 buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */ 3846 3888 buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */ ··· 3903 3947 if (!buf) 3904 3948 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3905 3949 3906 - buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff); 3907 - buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff); 3908 - buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff); 3909 - buf[3] = (dev->t10_pr.pr_generation & 0xff); 3950 + put_unaligned_be32(dev->t10_pr.pr_generation, &buf[0]); 3910 3951 3911 3952 spin_lock(&dev->dev_reservation_lock); 3912 3953 if (dev->dev_pr_res_holder) { ··· 3945 3992 /* 3946 3993 * Set RESERVATION KEY 3947 3994 */ 3948 - buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff); 3949 - buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff); 3950 - buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff); 3951 - buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff); 3952 - buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff); 3953 - buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff); 3954 - buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff); 3955 - buf[off++] = (pr_reg->pr_res_key & 0xff); 3995 + put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); 3996 + off += 8; 3956 3997 off += 4; /* Skip Over Reserved area */ 3957 3998 3958 3999 /* ··· 3988 4041 if (!pr_reg->pr_reg_all_tg_pt) { 3989 4042 u16 sep_rtpi = pr_reg->tg_pt_sep_rtpi; 3990 4043 3991 - buf[off++] = ((sep_rtpi >> 8) & 0xff); 3992 - buf[off++] = (sep_rtpi & 0xff); 4044 + put_unaligned_be16(sep_rtpi, &buf[off]); 4045 + off += 2; 3993 4046 } else 3994 4047 off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */ 3995 4048 ··· 4009 4062 /* 4010 4063 * Set the ADDITIONAL DESCRIPTOR LENGTH 4011 4064 */ 4012 - buf[off++] = ((desc_len >> 24) & 0xff); 4013 - buf[off++] = ((desc_len >> 16) & 0xff); 4014 - buf[off++] = ((desc_len >> 8) & 0xff); 4015 - buf[off++] = (desc_len & 0xff); 4065 + put_unaligned_be32(desc_len, &buf[off]); 4016 4066 /* 4017 4067 * Size of full desctipor header minus TransportID 4018 4068 * containing $FABRIC_MOD specific) initiator device/port ··· 4026 4082 /* 4027 4083 * Set ADDITIONAL_LENGTH 4028 4084 */ 4029 - buf[4] = ((add_len >> 24) & 0xff); 4030 - buf[5] = ((add_len >> 16) & 0xff); 4031 - buf[6] = ((add_len >> 8) & 0xff); 4032 - buf[7] = (add_len & 0xff); 4085 + put_unaligned_be32(add_len, &buf[4]); 4033 4086 4034 4087 transport_kunmap_data_sg(cmd); 4035 4088
+41 -41
drivers/target/target_core_pscsi.c
··· 168 168 /* 169 169 * If MODE_SENSE still returns zero, set the default value to 1024. 170 170 */ 171 - sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); 171 + sdev->sector_size = get_unaligned_be24(&buf[9]); 172 172 out_free: 173 173 if (!sdev->sector_size) 174 174 sdev->sector_size = 1024; ··· 209 209 cdb[0] = INQUIRY; 210 210 cdb[1] = 0x01; /* Query VPD */ 211 211 cdb[2] = 0x80; /* Unit Serial Number */ 212 - cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff; 213 - cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff); 212 + put_unaligned_be16(INQUIRY_VPD_SERIAL_LEN, &cdb[3]); 214 213 215 214 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 216 215 INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL); ··· 244 245 cdb[0] = INQUIRY; 245 246 cdb[1] = 0x01; /* Query VPD */ 246 247 cdb[2] = 0x83; /* Device Identifier */ 247 - cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff; 248 - cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff); 248 + put_unaligned_be16(INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, &cdb[3]); 249 249 250 250 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 251 251 INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, ··· 252 254 if (ret) 253 255 goto out; 254 256 255 - page_len = (buf[2] << 8) | buf[3]; 257 + page_len = get_unaligned_be16(&buf[2]); 256 258 while (page_len > 0) { 257 259 /* Grab a pointer to the Identification descriptor */ 258 260 page_83 = &buf[off]; ··· 382 384 spin_unlock_irq(sh->host_lock); 383 385 /* 384 386 * Claim exclusive struct block_device access to struct scsi_device 385 - * for TYPE_DISK using supplied udev_path 387 + * for TYPE_DISK and TYPE_ZBC using supplied udev_path 386 388 */ 387 389 bd = blkdev_get_by_path(dev->udev_path, 388 390 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); ··· 400 402 return ret; 401 403 } 402 404 403 - pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%llu\n", 404 - phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); 405 + pr_debug("CORE_PSCSI[%d] - Added TYPE_%s for %d:%d:%d:%llu\n", 406 + phv->phv_host_id, sd->type == TYPE_DISK ? "DISK" : "ZBC", 407 + sh->host_no, sd->channel, sd->id, sd->lun); 405 408 return 0; 406 409 } 407 410 ··· 521 522 */ 522 523 switch (sd->type) { 523 524 case TYPE_DISK: 525 + case TYPE_ZBC: 524 526 ret = pscsi_create_type_disk(dev, sd); 525 527 break; 526 528 default: ··· 566 566 567 567 static void pscsi_free_device(struct se_device *dev) 568 568 { 569 + call_rcu(&dev->rcu_head, pscsi_dev_call_rcu); 570 + } 571 + 572 + static void pscsi_destroy_device(struct se_device *dev) 573 + { 569 574 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 570 575 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 571 576 struct scsi_device *sd = pdv->pdv_sd; ··· 578 573 if (sd) { 579 574 /* 580 575 * Release exclusive pSCSI internal struct block_device claim for 581 - * struct scsi_device with TYPE_DISK from pscsi_create_type_disk() 576 + * struct scsi_device with TYPE_DISK or TYPE_ZBC 577 + * from pscsi_create_type_disk() 582 578 */ 583 - if ((sd->type == TYPE_DISK) && pdv->pdv_bd) { 579 + if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) && 580 + pdv->pdv_bd) { 584 581 blkdev_put(pdv->pdv_bd, 585 582 FMODE_WRITE|FMODE_READ|FMODE_EXCL); 586 583 pdv->pdv_bd = NULL; ··· 601 594 602 595 pdv->pdv_sd = NULL; 603 596 } 604 - call_rcu(&dev->rcu_head, pscsi_dev_call_rcu); 605 597 } 606 598 607 - static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, 608 - unsigned char *sense_buffer) 599 + static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status, 600 + unsigned char *req_sense) 609 601 { 610 602 struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); 611 603 struct scsi_device *sd = pdv->pdv_sd; 612 - int result; 613 604 struct pscsi_plugin_task *pt = cmd->priv; 614 605 unsigned char *cdb; 615 606 /* ··· 618 613 return; 619 614 620 615 cdb = &pt->pscsi_cdb[0]; 621 - result = pt->pscsi_result; 622 616 /* 623 617 * Hack to make sure that Write-Protect modepage is set if R/O mode is 624 618 * forced. ··· 626 622 goto after_mode_sense; 627 623 628 624 if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && 629 - (status_byte(result) << 1) == SAM_STAT_GOOD) { 625 + scsi_status == SAM_STAT_GOOD) { 630 626 bool read_only = target_lun_is_rdonly(cmd); 631 627 632 628 if (read_only) { ··· 661 657 * storage engine. 662 658 */ 663 659 if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && 664 - (status_byte(result) << 1) == SAM_STAT_GOOD) { 660 + scsi_status == SAM_STAT_GOOD) { 665 661 unsigned char *buf; 666 662 u16 bdl; 667 663 u32 blocksize; 668 664 669 - buf = sg_virt(&sg[0]); 665 + buf = sg_virt(&cmd->t_data_sg[0]); 670 666 if (!buf) { 671 667 pr_err("Unable to get buf for scatterlist\n"); 672 668 goto after_mode_select; 673 669 } 674 670 675 671 if (cdb[0] == MODE_SELECT) 676 - bdl = (buf[3]); 672 + bdl = buf[3]; 677 673 else 678 - bdl = (buf[6] << 8) | (buf[7]); 674 + bdl = get_unaligned_be16(&buf[6]); 679 675 680 676 if (!bdl) 681 677 goto after_mode_select; 682 678 683 679 if (cdb[0] == MODE_SELECT) 684 - blocksize = (buf[9] << 16) | (buf[10] << 8) | 685 - (buf[11]); 680 + blocksize = get_unaligned_be24(&buf[9]); 686 681 else 687 - blocksize = (buf[13] << 16) | (buf[14] << 8) | 688 - (buf[15]); 682 + blocksize = get_unaligned_be24(&buf[13]); 689 683 690 684 sd->sector_size = blocksize; 691 685 } 692 686 after_mode_select: 693 687 694 - if (sense_buffer && (status_byte(result) & CHECK_CONDITION)) { 695 - memcpy(sense_buffer, pt->pscsi_sense, TRANSPORT_SENSE_BUFFER); 696 - cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 697 - } 688 + if (scsi_status == SAM_STAT_CHECK_CONDITION) 689 + transport_copy_sense_to_cmd(cmd, req_sense); 698 690 } 699 691 700 692 enum { ··· 1002 1002 req->end_io_data = cmd; 1003 1003 scsi_req(req)->cmd_len = scsi_command_size(pt->pscsi_cdb); 1004 1004 scsi_req(req)->cmd = &pt->pscsi_cdb[0]; 1005 - if (pdv->pdv_sd->type == TYPE_DISK) 1005 + if (pdv->pdv_sd->type == TYPE_DISK || 1006 + pdv->pdv_sd->type == TYPE_ZBC) 1006 1007 req->timeout = PS_TIMEOUT_DISK; 1007 1008 else 1008 1009 req->timeout = PS_TIMEOUT_OTHER; ··· 1048 1047 { 1049 1048 struct se_cmd *cmd = req->end_io_data; 1050 1049 struct pscsi_plugin_task *pt = cmd->priv; 1050 + int result = scsi_req(req)->result; 1051 + u8 scsi_status = status_byte(result) << 1; 1051 1052 1052 - pt->pscsi_result = scsi_req(req)->result; 1053 - pt->pscsi_resid = scsi_req(req)->resid_len; 1054 - 1055 - cmd->scsi_status = status_byte(pt->pscsi_result) << 1; 1056 - if (cmd->scsi_status) { 1053 + if (scsi_status) { 1057 1054 pr_debug("PSCSI Status Byte exception at cmd: %p CDB:" 1058 1055 " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], 1059 - pt->pscsi_result); 1056 + result); 1060 1057 } 1061 1058 1062 - switch (host_byte(pt->pscsi_result)) { 1059 + pscsi_complete_cmd(cmd, scsi_status, scsi_req(req)->sense); 1060 + 1061 + switch (host_byte(result)) { 1063 1062 case DID_OK: 1064 - target_complete_cmd(cmd, cmd->scsi_status); 1063 + target_complete_cmd(cmd, scsi_status); 1065 1064 break; 1066 1065 default: 1067 1066 pr_debug("PSCSI Host Byte exception at cmd: %p CDB:" 1068 1067 " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], 1069 - pt->pscsi_result); 1068 + result); 1070 1069 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 1071 1070 break; 1072 1071 } 1073 1072 1074 - memcpy(pt->pscsi_sense, scsi_req(req)->sense, TRANSPORT_SENSE_BUFFER); 1075 1073 __blk_put_request(req->q, req); 1076 1074 kfree(pt); 1077 1075 } ··· 1086 1086 .pmode_enable_hba = pscsi_pmode_enable_hba, 1087 1087 .alloc_device = pscsi_alloc_device, 1088 1088 .configure_device = pscsi_configure_device, 1089 + .destroy_device = pscsi_destroy_device, 1089 1090 .free_device = pscsi_free_device, 1090 - .transport_complete = pscsi_transport_complete, 1091 1091 .parse_cdb = pscsi_parse_cdb, 1092 1092 .set_configfs_dev_params = pscsi_set_configfs_dev_params, 1093 1093 .show_configfs_dev_params = pscsi_show_configfs_dev_params,
-4
drivers/target/target_core_pscsi.h
··· 23 23 struct Scsi_Host; 24 24 25 25 struct pscsi_plugin_task { 26 - unsigned char pscsi_sense[TRANSPORT_SENSE_BUFFER]; 27 - int pscsi_direction; 28 - int pscsi_result; 29 - u32 pscsi_resid; 30 26 unsigned char pscsi_cdb[0]; 31 27 } ____cacheline_aligned; 32 28
+8 -3
drivers/target/target_core_rd.c
··· 339 339 340 340 static void rd_free_device(struct se_device *dev) 341 341 { 342 + call_rcu(&dev->rcu_head, rd_dev_call_rcu); 343 + } 344 + 345 + static void rd_destroy_device(struct se_device *dev) 346 + { 342 347 struct rd_dev *rd_dev = RD_DEV(dev); 343 348 344 349 rd_release_device_space(rd_dev); 345 - call_rcu(&dev->rcu_head, rd_dev_call_rcu); 346 350 } 347 351 348 352 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) ··· 558 554 struct rd_dev *rd_dev = RD_DEV(dev); 559 555 char *orig, *ptr, *opts; 560 556 substring_t args[MAX_OPT_ARGS]; 561 - int ret = 0, arg, token; 557 + int arg, token; 562 558 563 559 opts = kstrdup(page, GFP_KERNEL); 564 560 if (!opts) ··· 593 589 } 594 590 595 591 kfree(orig); 596 - return (!ret) ? count : ret; 592 + return count; 597 593 } 598 594 599 595 static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b) ··· 655 651 .detach_hba = rd_detach_hba, 656 652 .alloc_device = rd_alloc_device, 657 653 .configure_device = rd_configure_device, 654 + .destroy_device = rd_destroy_device, 658 655 .free_device = rd_free_device, 659 656 .parse_cdb = rd_parse_cdb, 660 657 .set_configfs_dev_params = rd_set_configfs_dev_params,
+21 -44
drivers/target/target_core_sbc.c
··· 71 71 else 72 72 blocks = (u32)blocks_long; 73 73 74 - buf[0] = (blocks >> 24) & 0xff; 75 - buf[1] = (blocks >> 16) & 0xff; 76 - buf[2] = (blocks >> 8) & 0xff; 77 - buf[3] = blocks & 0xff; 78 - buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; 79 - buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; 80 - buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; 81 - buf[7] = dev->dev_attrib.block_size & 0xff; 74 + put_unaligned_be32(blocks, &buf[0]); 75 + put_unaligned_be32(dev->dev_attrib.block_size, &buf[4]); 82 76 83 77 rbuf = transport_kmap_data_sg(cmd); 84 78 if (rbuf) { ··· 96 102 unsigned long long blocks = dev->transport->get_blocks(dev); 97 103 98 104 memset(buf, 0, sizeof(buf)); 99 - buf[0] = (blocks >> 56) & 0xff; 100 - buf[1] = (blocks >> 48) & 0xff; 101 - buf[2] = (blocks >> 40) & 0xff; 102 - buf[3] = (blocks >> 32) & 0xff; 103 - buf[4] = (blocks >> 24) & 0xff; 104 - buf[5] = (blocks >> 16) & 0xff; 105 - buf[6] = (blocks >> 8) & 0xff; 106 - buf[7] = blocks & 0xff; 107 - buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; 108 - buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 109 - buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 110 - buf[11] = dev->dev_attrib.block_size & 0xff; 105 + put_unaligned_be64(blocks, &buf[0]); 106 + put_unaligned_be32(dev->dev_attrib.block_size, &buf[8]); 111 107 /* 112 108 * Set P_TYPE and PROT_EN bits for DIF support 113 109 */ ··· 118 134 119 135 if (dev->transport->get_alignment_offset_lbas) { 120 136 u16 lalba = dev->transport->get_alignment_offset_lbas(dev); 121 - buf[14] = (lalba >> 8) & 0x3f; 122 - buf[15] = lalba & 0xff; 137 + 138 + put_unaligned_be16(lalba, &buf[14]); 123 139 } 124 140 125 141 /* ··· 246 262 247 263 static inline u32 transport_get_sectors_10(unsigned char *cdb) 248 264 { 249 - return (u32)(cdb[7] << 8) + cdb[8]; 265 + return get_unaligned_be16(&cdb[7]); 250 266 } 251 267 252 268 static inline u32 transport_get_sectors_12(unsigned char *cdb) 253 269 { 254 - return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 270 + return get_unaligned_be32(&cdb[6]); 255 271 } 256 272 257 273 static inline u32 transport_get_sectors_16(unsigned char *cdb) 258 274 { 259 - return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 260 - (cdb[12] << 8) + cdb[13]; 275 + return get_unaligned_be32(&cdb[10]); 261 276 } 262 277 263 278 /* ··· 264 281 */ 265 282 static inline u32 transport_get_sectors_32(unsigned char *cdb) 266 283 { 267 - return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 268 - (cdb[30] << 8) + cdb[31]; 284 + return get_unaligned_be32(&cdb[28]); 269 285 270 286 } 271 287 272 288 static inline u32 transport_lba_21(unsigned char *cdb) 273 289 { 274 - return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 290 + return get_unaligned_be24(&cdb[1]) & 0x1fffff; 275 291 } 276 292 277 293 static inline u32 transport_lba_32(unsigned char *cdb) 278 294 { 279 - return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 295 + return get_unaligned_be32(&cdb[2]); 280 296 } 281 297 282 298 static inline unsigned long long transport_lba_64(unsigned char *cdb) 283 299 { 284 - unsigned int __v1, __v2; 285 - 286 - __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 287 - __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 288 - 289 - return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 300 + return get_unaligned_be64(&cdb[2]); 290 301 } 291 302 292 303 /* ··· 288 311 */ 289 312 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 290 313 { 291 - unsigned int __v1, __v2; 292 - 293 - __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 294 - __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 295 - 296 - return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 314 + return get_unaligned_be64(&cdb[12]); 297 315 } 298 316 299 317 static sense_reason_t ··· 977 1005 break; 978 1006 } 979 1007 case COMPARE_AND_WRITE: 1008 + if (!dev->dev_attrib.emulate_caw) { 1009 + pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject" 1010 + " COMPARE_AND_WRITE\n", dev->se_hba->backend->ops->name, 1011 + dev->dev_group.cg_item.ci_name, dev->t10_wwn.unit_serial); 1012 + return TCM_UNSUPPORTED_SCSI_OPCODE; 1013 + } 980 1014 sectors = cdb[13]; 981 1015 /* 982 1016 * Currently enforce COMPARE_AND_WRITE for a single sector ··· 1023 1045 cmd->t_task_cdb[1] & 0x1f); 1024 1046 return TCM_INVALID_CDB_FIELD; 1025 1047 } 1026 - size = (cdb[10] << 24) | (cdb[11] << 16) | 1027 - (cdb[12] << 8) | cdb[13]; 1048 + size = get_unaligned_be32(&cdb[10]); 1028 1049 break; 1029 1050 case SYNCHRONIZE_CACHE: 1030 1051 case SYNCHRONIZE_CACHE_16:
+20 -22
drivers/target/target_core_spc.c
··· 287 287 /* Skip over Obsolete field in RTPI payload 288 288 * in Table 472 */ 289 289 off += 2; 290 - buf[off++] = ((lun->lun_rtpi >> 8) & 0xff); 291 - buf[off++] = (lun->lun_rtpi & 0xff); 290 + put_unaligned_be16(lun->lun_rtpi, &buf[off]); 291 + off += 2; 292 292 len += 8; /* Header size + Designation descriptor */ 293 293 /* 294 294 * Target port group identifier, see spc4r17 ··· 316 316 off++; /* Skip over Reserved */ 317 317 buf[off++] = 4; /* DESIGNATOR LENGTH */ 318 318 off += 2; /* Skip over Reserved Field */ 319 - buf[off++] = ((tg_pt_gp_id >> 8) & 0xff); 320 - buf[off++] = (tg_pt_gp_id & 0xff); 319 + put_unaligned_be16(tg_pt_gp_id, &buf[off]); 320 + off += 2; 321 321 len += 8; /* Header size + Designation descriptor */ 322 322 /* 323 323 * Logical Unit Group identifier, see spc4r17 ··· 343 343 off++; /* Skip over Reserved */ 344 344 buf[off++] = 4; /* DESIGNATOR LENGTH */ 345 345 off += 2; /* Skip over Reserved Field */ 346 - buf[off++] = ((lu_gp_id >> 8) & 0xff); 347 - buf[off++] = (lu_gp_id & 0xff); 346 + put_unaligned_be16(lu_gp_id, &buf[off]); 347 + off += 2; 348 348 len += 8; /* Header size + Designation descriptor */ 349 349 /* 350 350 * SCSI name string designator, see spc4r17 ··· 431 431 /* Header size + Designation descriptor */ 432 432 len += (scsi_target_len + 4); 433 433 } 434 - buf[2] = ((len >> 8) & 0xff); 435 - buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ 434 + put_unaligned_be16(len, &buf[2]); /* Page Length for VPD 0x83 */ 436 435 return 0; 437 436 } 438 437 EXPORT_SYMBOL(spc_emulate_evpd_83); ··· 1287 1288 cmd->execute_cmd = spc_emulate_modeselect; 1288 1289 break; 1289 1290 case MODE_SELECT_10: 1290 - *size = (cdb[7] << 8) + cdb[8]; 1291 + *size = get_unaligned_be16(&cdb[7]); 1291 1292 cmd->execute_cmd = spc_emulate_modeselect; 1292 1293 break; 1293 1294 case MODE_SENSE: ··· 1295 1296 cmd->execute_cmd = spc_emulate_modesense; 1296 1297 break; 1297 1298 case MODE_SENSE_10: 1298 - *size = (cdb[7] << 8) + cdb[8]; 1299 + *size = get_unaligned_be16(&cdb[7]); 1299 1300 cmd->execute_cmd = spc_emulate_modesense; 1300 1301 break; 1301 1302 case LOG_SELECT: 1302 1303 case LOG_SENSE: 1303 - *size = (cdb[7] << 8) + cdb[8]; 1304 + *size = get_unaligned_be16(&cdb[7]); 1304 1305 break; 1305 1306 case PERSISTENT_RESERVE_IN: 1306 - *size = (cdb[7] << 8) + cdb[8]; 1307 + *size = get_unaligned_be16(&cdb[7]); 1307 1308 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1308 1309 break; 1309 1310 case PERSISTENT_RESERVE_OUT: 1310 - *size = (cdb[7] << 8) + cdb[8]; 1311 + *size = get_unaligned_be32(&cdb[5]); 1311 1312 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1312 1313 break; 1313 1314 case RELEASE: 1314 1315 case RELEASE_10: 1315 1316 if (cdb[0] == RELEASE_10) 1316 - *size = (cdb[7] << 8) | cdb[8]; 1317 + *size = get_unaligned_be16(&cdb[7]); 1317 1318 else 1318 1319 *size = cmd->data_length; 1319 1320 ··· 1326 1327 * Assume the passthrough or $FABRIC_MOD will tell us about it. 1327 1328 */ 1328 1329 if (cdb[0] == RESERVE_10) 1329 - *size = (cdb[7] << 8) | cdb[8]; 1330 + *size = get_unaligned_be16(&cdb[7]); 1330 1331 else 1331 1332 *size = cmd->data_length; 1332 1333 ··· 1337 1338 cmd->execute_cmd = spc_emulate_request_sense; 1338 1339 break; 1339 1340 case INQUIRY: 1340 - *size = (cdb[3] << 8) + cdb[4]; 1341 + *size = get_unaligned_be16(&cdb[3]); 1341 1342 1342 1343 /* 1343 1344 * Do implicit HEAD_OF_QUEUE processing for INQUIRY. ··· 1348 1349 break; 1349 1350 case SECURITY_PROTOCOL_IN: 1350 1351 case SECURITY_PROTOCOL_OUT: 1351 - *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1352 + *size = get_unaligned_be32(&cdb[6]); 1352 1353 break; 1353 1354 case EXTENDED_COPY: 1354 1355 *size = get_unaligned_be32(&cdb[10]); ··· 1360 1361 break; 1361 1362 case READ_ATTRIBUTE: 1362 1363 case WRITE_ATTRIBUTE: 1363 - *size = (cdb[10] << 24) | (cdb[11] << 16) | 1364 - (cdb[12] << 8) | cdb[13]; 1364 + *size = get_unaligned_be32(&cdb[10]); 1365 1365 break; 1366 1366 case RECEIVE_DIAGNOSTIC: 1367 1367 case SEND_DIAGNOSTIC: 1368 - *size = (cdb[3] << 8) | cdb[4]; 1368 + *size = get_unaligned_be16(&cdb[3]); 1369 1369 break; 1370 1370 case WRITE_BUFFER: 1371 - *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 1371 + *size = get_unaligned_be24(&cdb[6]); 1372 1372 break; 1373 1373 case REPORT_LUNS: 1374 1374 cmd->execute_cmd = spc_emulate_report_luns; 1375 - *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1375 + *size = get_unaligned_be32(&cdb[6]); 1376 1376 /* 1377 1377 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS 1378 1378 * See spc4r17 section 5.3
+4 -14
drivers/target/target_core_tmr.c
··· 355 355 cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); 356 356 list_del_init(&cmd->state_list); 357 357 358 - pr_debug("LUN_RESET: %s cmd: %p" 359 - " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d" 360 - "cdb: 0x%02x\n", 361 - (preempt_and_abort_list) ? "Preempt" : "", cmd, 362 - cmd->tag, 0, 363 - cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, 364 - cmd->t_task_cdb[0]); 365 - pr_debug("LUN_RESET: ITT[0x%08llx] - pr_res_key: 0x%016Lx" 366 - " -- CMD_T_ACTIVE: %d" 367 - " CMD_T_STOP: %d CMD_T_SENT: %d\n", 368 - cmd->tag, cmd->pr_res_key, 369 - (cmd->transport_state & CMD_T_ACTIVE) != 0, 370 - (cmd->transport_state & CMD_T_STOP) != 0, 371 - (cmd->transport_state & CMD_T_SENT) != 0); 358 + target_show_cmd("LUN_RESET: ", cmd); 359 + pr_debug("LUN_RESET: ITT[0x%08llx] - %s pr_res_key: 0x%016Lx\n", 360 + cmd->tag, (preempt_and_abort_list) ? "preempt" : "", 361 + cmd->pr_res_key); 372 362 373 363 /* 374 364 * If the command may be queued onto a workqueue cancel it now.
-1
drivers/target/target_core_tpg.c
··· 576 576 return ERR_PTR(-ENOMEM); 577 577 } 578 578 lun->unpacked_lun = unpacked_lun; 579 - lun->lun_link_magic = SE_LUN_LINK_MAGIC; 580 579 atomic_set(&lun->lun_acl_count, 0); 581 580 init_completion(&lun->lun_ref_comp); 582 581 init_completion(&lun->lun_shutdown_comp);
+193 -29
drivers/target/target_core_transport.c
··· 704 704 return cmd->sense_buffer; 705 705 } 706 706 707 + void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) 708 + { 709 + unsigned char *cmd_sense_buf; 710 + unsigned long flags; 711 + 712 + spin_lock_irqsave(&cmd->t_state_lock, flags); 713 + cmd_sense_buf = transport_get_sense_buffer(cmd); 714 + if (!cmd_sense_buf) { 715 + spin_unlock_irqrestore(&cmd->t_state_lock, flags); 716 + return; 717 + } 718 + 719 + cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 720 + memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length); 721 + spin_unlock_irqrestore(&cmd->t_state_lock, flags); 722 + } 723 + EXPORT_SYMBOL(transport_copy_sense_to_cmd); 724 + 707 725 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 708 726 { 709 727 struct se_device *dev = cmd->se_dev; 710 - int success = scsi_status == GOOD; 728 + int success; 711 729 unsigned long flags; 712 730 713 731 cmd->scsi_status = scsi_status; 714 732 715 - 716 733 spin_lock_irqsave(&cmd->t_state_lock, flags); 717 - 718 - if (dev && dev->transport->transport_complete) { 719 - dev->transport->transport_complete(cmd, 720 - cmd->t_data_sg, 721 - transport_get_sense_buffer(cmd)); 734 + switch (cmd->scsi_status) { 735 + case SAM_STAT_CHECK_CONDITION: 722 736 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 723 737 success = 1; 738 + else 739 + success = 0; 740 + break; 741 + default: 742 + success = 1; 743 + break; 724 744 } 725 745 726 746 /* ··· 750 730 if (cmd->transport_state & CMD_T_ABORTED || 751 731 cmd->transport_state & CMD_T_STOP) { 752 732 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 733 + /* 734 + * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(), 735 + * release se_device->caw_sem obtained by sbc_compare_and_write() 736 + * since target_complete_ok_work() or target_complete_failure_work() 737 + * won't be called to invoke the normal CAW completion callbacks. 738 + */ 739 + if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 740 + up(&dev->caw_sem); 741 + } 753 742 complete_all(&cmd->t_transport_stop_comp); 754 743 return; 755 744 } else if (!success) { ··· 1268 1239 init_completion(&cmd->t_transport_stop_comp); 1269 1240 init_completion(&cmd->cmd_wait_comp); 1270 1241 spin_lock_init(&cmd->t_state_lock); 1242 + INIT_WORK(&cmd->work, NULL); 1271 1243 kref_init(&cmd->cmd_kref); 1272 1244 1273 1245 cmd->se_tfo = tfo; ··· 1620 1590 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1621 1591 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1622 1592 1593 + transport_lun_remove_cmd(se_cmd); 1623 1594 transport_cmd_check_stop_to_fabric(se_cmd); 1595 + } 1596 + 1597 + static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag, 1598 + u64 *unpacked_lun) 1599 + { 1600 + struct se_cmd *se_cmd; 1601 + unsigned long flags; 1602 + bool ret = false; 1603 + 1604 + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 1605 + list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1606 + if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 1607 + continue; 1608 + 1609 + if (se_cmd->tag == tag) { 1610 + *unpacked_lun = se_cmd->orig_fe_lun; 1611 + ret = true; 1612 + break; 1613 + } 1614 + } 1615 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 1616 + 1617 + return ret; 1624 1618 } 1625 1619 1626 1620 /** ··· 1694 1640 core_tmr_release_req(se_cmd->se_tmr_req); 1695 1641 return ret; 1696 1642 } 1643 + /* 1644 + * If this is ABORT_TASK with no explicit fabric provided LUN, 1645 + * go ahead and search active session tags for a match to figure 1646 + * out unpacked_lun for the original se_cmd. 1647 + */ 1648 + if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) { 1649 + if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun)) 1650 + goto failure; 1651 + } 1697 1652 1698 1653 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1699 - if (ret) { 1700 - /* 1701 - * For callback during failure handling, push this work off 1702 - * to process context with TMR_LUN_DOES_NOT_EXIST status. 1703 - */ 1704 - INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1705 - schedule_work(&se_cmd->work); 1706 - return 0; 1707 - } 1654 + if (ret) 1655 + goto failure; 1656 + 1708 1657 transport_generic_handle_tmr(se_cmd); 1658 + return 0; 1659 + 1660 + /* 1661 + * For callback during failure handling, push this work off 1662 + * to process context with TMR_LUN_DOES_NOT_EXIST status. 1663 + */ 1664 + failure: 1665 + INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1666 + schedule_work(&se_cmd->work); 1709 1667 return 0; 1710 1668 } 1711 1669 EXPORT_SYMBOL(target_submit_tmr); ··· 1733 1667 if (transport_check_aborted_status(cmd, 1)) 1734 1668 return; 1735 1669 1736 - pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" 1737 - " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); 1738 - pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", 1739 - cmd->se_tfo->get_cmd_state(cmd), 1740 - cmd->t_state, sense_reason); 1741 - pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", 1742 - (cmd->transport_state & CMD_T_ACTIVE) != 0, 1743 - (cmd->transport_state & CMD_T_STOP) != 0, 1744 - (cmd->transport_state & CMD_T_SENT) != 0); 1670 + pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", 1671 + sense_reason); 1672 + target_show_cmd("-----[ ", cmd); 1745 1673 1746 1674 /* 1747 1675 * For SAM Task Attribute emulation for failed struct se_cmd ··· 2728 2668 } 2729 2669 EXPORT_SYMBOL(target_put_sess_cmd); 2730 2670 2671 + static const char *data_dir_name(enum dma_data_direction d) 2672 + { 2673 + switch (d) { 2674 + case DMA_BIDIRECTIONAL: return "BIDI"; 2675 + case DMA_TO_DEVICE: return "WRITE"; 2676 + case DMA_FROM_DEVICE: return "READ"; 2677 + case DMA_NONE: return "NONE"; 2678 + } 2679 + 2680 + return "(?)"; 2681 + } 2682 + 2683 + static const char *cmd_state_name(enum transport_state_table t) 2684 + { 2685 + switch (t) { 2686 + case TRANSPORT_NO_STATE: return "NO_STATE"; 2687 + case TRANSPORT_NEW_CMD: return "NEW_CMD"; 2688 + case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING"; 2689 + case TRANSPORT_PROCESSING: return "PROCESSING"; 2690 + case TRANSPORT_COMPLETE: return "COMPLETE"; 2691 + case TRANSPORT_ISTATE_PROCESSING: 2692 + return "ISTATE_PROCESSING"; 2693 + case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP"; 2694 + case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK"; 2695 + case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR"; 2696 + } 2697 + 2698 + return "(?)"; 2699 + } 2700 + 2701 + static void target_append_str(char **str, const char *txt) 2702 + { 2703 + char *prev = *str; 2704 + 2705 + *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) : 2706 + kstrdup(txt, GFP_ATOMIC); 2707 + kfree(prev); 2708 + } 2709 + 2710 + /* 2711 + * Convert a transport state bitmask into a string. The caller is 2712 + * responsible for freeing the returned pointer. 2713 + */ 2714 + static char *target_ts_to_str(u32 ts) 2715 + { 2716 + char *str = NULL; 2717 + 2718 + if (ts & CMD_T_ABORTED) 2719 + target_append_str(&str, "aborted"); 2720 + if (ts & CMD_T_ACTIVE) 2721 + target_append_str(&str, "active"); 2722 + if (ts & CMD_T_COMPLETE) 2723 + target_append_str(&str, "complete"); 2724 + if (ts & CMD_T_SENT) 2725 + target_append_str(&str, "sent"); 2726 + if (ts & CMD_T_STOP) 2727 + target_append_str(&str, "stop"); 2728 + if (ts & CMD_T_FABRIC_STOP) 2729 + target_append_str(&str, "fabric_stop"); 2730 + 2731 + return str; 2732 + } 2733 + 2734 + static const char *target_tmf_name(enum tcm_tmreq_table tmf) 2735 + { 2736 + switch (tmf) { 2737 + case TMR_ABORT_TASK: return "ABORT_TASK"; 2738 + case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET"; 2739 + case TMR_CLEAR_ACA: return "CLEAR_ACA"; 2740 + case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET"; 2741 + case TMR_LUN_RESET: return "LUN_RESET"; 2742 + case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; 2743 + case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; 2744 + case TMR_UNKNOWN: break; 2745 + } 2746 + return "(?)"; 2747 + } 2748 + 2749 + void target_show_cmd(const char *pfx, struct se_cmd *cmd) 2750 + { 2751 + char *ts_str = target_ts_to_str(cmd->transport_state); 2752 + const u8 *cdb = cmd->t_task_cdb; 2753 + struct se_tmr_req *tmf = cmd->se_tmr_req; 2754 + 2755 + if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2756 + pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n", 2757 + pfx, cdb[0], cdb[1], cmd->tag, 2758 + data_dir_name(cmd->data_direction), 2759 + cmd->se_tfo->get_cmd_state(cmd), 2760 + cmd_state_name(cmd->t_state), cmd->data_length, 2761 + kref_read(&cmd->cmd_kref), ts_str); 2762 + } else { 2763 + pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n", 2764 + pfx, target_tmf_name(tmf->function), cmd->tag, 2765 + tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd), 2766 + cmd_state_name(cmd->t_state), 2767 + kref_read(&cmd->cmd_kref), ts_str); 2768 + } 2769 + kfree(ts_str); 2770 + } 2771 + EXPORT_SYMBOL(target_show_cmd); 2772 + 2731 2773 /* target_sess_cmd_list_set_waiting - Flag all commands in 2732 2774 * sess_cmd_list to complete cmd_wait_comp. Set 2733 2775 * sess_tearing_down so no more commands are queued. ··· 2974 2812 2975 2813 cmd->transport_state |= CMD_T_STOP; 2976 2814 2977 - pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d," 2978 - " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag, 2979 - cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2815 + target_show_cmd("wait_for_tasks: Stopping ", cmd); 2980 2816 2981 2817 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 2982 2818 2983 - wait_for_completion(&cmd->t_transport_stop_comp); 2819 + while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp, 2820 + 180 * HZ)) 2821 + target_show_cmd("wait for tasks: ", cmd); 2984 2822 2985 2823 spin_lock_irqsave(&cmd->t_state_lock, *flags); 2986 2824 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); ··· 3363 3201 cmd->se_tfo->queue_tm_rsp(cmd); 3364 3202 3365 3203 check_stop: 3204 + transport_lun_remove_cmd(cmd); 3366 3205 transport_cmd_check_stop_to_fabric(cmd); 3367 3206 } 3368 3207 ··· 3386 3223 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d" 3387 3224 "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function, 3388 3225 cmd->se_tmr_req->ref_task_tag, cmd->tag); 3226 + transport_lun_remove_cmd(cmd); 3389 3227 transport_cmd_check_stop_to_fabric(cmd); 3390 3228 return 0; 3391 3229 }
+397 -50
drivers/target/target_core_user.c
··· 87 87 /* Default maximum of the global data blocks(512K * PAGE_SIZE) */ 88 88 #define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024) 89 89 90 + static u8 tcmu_kern_cmd_reply_supported; 91 + 90 92 static struct device *tcmu_root_device; 91 93 92 94 struct tcmu_hba { ··· 96 94 }; 97 95 98 96 #define TCMU_CONFIG_LEN 256 97 + 98 + struct tcmu_nl_cmd { 99 + /* wake up thread waiting for reply */ 100 + struct completion complete; 101 + int cmd; 102 + int status; 103 + }; 99 104 100 105 struct tcmu_dev { 101 106 struct list_head node; ··· 143 134 144 135 struct timer_list timeout; 145 136 unsigned int cmd_time_out; 137 + 138 + spinlock_t nl_cmd_lock; 139 + struct tcmu_nl_cmd curr_nl_cmd; 140 + /* wake up threads waiting on curr_nl_cmd */ 141 + wait_queue_head_t nl_cmd_wq; 146 142 147 143 char dev_config[TCMU_CONFIG_LEN]; 148 144 }; ··· 192 178 [TCMU_MCGRP_CONFIG] = { .name = "config", }, 193 179 }; 194 180 181 + static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = { 182 + [TCMU_ATTR_DEVICE] = { .type = NLA_STRING }, 183 + [TCMU_ATTR_MINOR] = { .type = NLA_U32 }, 184 + [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 }, 185 + [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 }, 186 + [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 }, 187 + }; 188 + 189 + static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) 190 + { 191 + struct se_device *dev; 192 + struct tcmu_dev *udev; 193 + struct tcmu_nl_cmd *nl_cmd; 194 + int dev_id, rc, ret = 0; 195 + bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE); 196 + 197 + if (!info->attrs[TCMU_ATTR_CMD_STATUS] || 198 + !info->attrs[TCMU_ATTR_DEVICE_ID]) { 199 + printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); 200 + return -EINVAL; 201 + } 202 + 203 + dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); 204 + rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); 205 + 206 + dev = target_find_device(dev_id, !is_removed); 207 + if (!dev) { 208 + printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n", 209 + completed_cmd, rc, dev_id); 210 + return -ENODEV; 211 + } 212 + udev = TCMU_DEV(dev); 213 + 214 + spin_lock(&udev->nl_cmd_lock); 215 + nl_cmd = &udev->curr_nl_cmd; 216 + 217 + pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id, 218 + nl_cmd->cmd, completed_cmd, rc); 219 + 220 + if (nl_cmd->cmd != completed_cmd) { 221 + printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n", 222 + completed_cmd, nl_cmd->cmd); 223 + ret = -EINVAL; 224 + } else { 225 + nl_cmd->status = rc; 226 + } 227 + 228 + spin_unlock(&udev->nl_cmd_lock); 229 + if (!is_removed) 230 + target_undepend_item(&dev->dev_group.cg_item); 231 + if (!ret) 232 + complete(&nl_cmd->complete); 233 + return ret; 234 + } 235 + 236 + static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info) 237 + { 238 + return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE); 239 + } 240 + 241 + static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info) 242 + { 243 + return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE); 244 + } 245 + 246 + static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb, 247 + struct genl_info *info) 248 + { 249 + return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE); 250 + } 251 + 252 + static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info) 253 + { 254 + if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) { 255 + tcmu_kern_cmd_reply_supported = 256 + nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]); 257 + printk(KERN_INFO "tcmu daemon: command reply support %u.\n", 258 + tcmu_kern_cmd_reply_supported); 259 + } 260 + 261 + return 0; 262 + } 263 + 264 + static const struct genl_ops tcmu_genl_ops[] = { 265 + { 266 + .cmd = TCMU_CMD_SET_FEATURES, 267 + .flags = GENL_ADMIN_PERM, 268 + .policy = tcmu_attr_policy, 269 + .doit = tcmu_genl_set_features, 270 + }, 271 + { 272 + .cmd = TCMU_CMD_ADDED_DEVICE_DONE, 273 + .flags = GENL_ADMIN_PERM, 274 + .policy = tcmu_attr_policy, 275 + .doit = tcmu_genl_add_dev_done, 276 + }, 277 + { 278 + .cmd = TCMU_CMD_REMOVED_DEVICE_DONE, 279 + .flags = GENL_ADMIN_PERM, 280 + .policy = tcmu_attr_policy, 281 + .doit = tcmu_genl_rm_dev_done, 282 + }, 283 + { 284 + .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE, 285 + .flags = GENL_ADMIN_PERM, 286 + .policy = tcmu_attr_policy, 287 + .doit = tcmu_genl_reconfig_dev_done, 288 + }, 289 + }; 290 + 195 291 /* Our generic netlink family */ 196 292 static struct genl_family tcmu_genl_family __ro_after_init = { 197 293 .module = THIS_MODULE, 198 294 .hdrsize = 0, 199 295 .name = "TCM-USER", 200 - .version = 1, 296 + .version = 2, 201 297 .maxattr = TCMU_ATTR_MAX, 202 298 .mcgrps = tcmu_mcgrps, 203 299 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 204 300 .netnsok = true, 301 + .ops = tcmu_genl_ops, 302 + .n_ops = ARRAY_SIZE(tcmu_genl_ops), 205 303 }; 206 304 207 305 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index)) ··· 342 216 343 217 page = radix_tree_lookup(&udev->data_blocks, dbi); 344 218 if (!page) { 345 - 346 219 if (atomic_add_return(1, &global_db_count) > 347 220 TCMU_GLOBAL_MAX_BLOCKS) { 348 221 atomic_dec(&global_db_count); ··· 351 226 /* try to get new page from the mm */ 352 227 page = alloc_page(GFP_KERNEL); 353 228 if (!page) 354 - return false; 229 + goto err_alloc; 355 230 356 231 ret = radix_tree_insert(&udev->data_blocks, dbi, page); 357 - if (ret) { 358 - __free_page(page); 359 - return false; 360 - } 361 - 232 + if (ret) 233 + goto err_insert; 362 234 } 363 235 364 236 if (dbi > udev->dbi_max) ··· 365 243 tcmu_cmd_set_dbi(tcmu_cmd, dbi); 366 244 367 245 return true; 246 + err_insert: 247 + __free_page(page); 248 + err_alloc: 249 + atomic_dec(&global_db_count); 250 + return false; 368 251 } 369 252 370 253 static bool tcmu_get_empty_blocks(struct tcmu_dev *udev, ··· 528 401 DATA_BLOCK_SIZE - remaining; 529 402 } 530 403 531 - static inline size_t iov_tail(struct tcmu_dev *udev, struct iovec *iov) 404 + static inline size_t iov_tail(struct iovec *iov) 532 405 { 533 406 return (size_t)iov->iov_base + iov->iov_len; 534 407 } ··· 564 437 to_offset = get_block_offset_user(udev, dbi, 565 438 block_remaining); 566 439 offset = DATA_BLOCK_SIZE - block_remaining; 567 - to = (void *)(unsigned long)to + offset; 440 + to += offset; 568 441 569 442 if (*iov_cnt != 0 && 570 - to_offset == iov_tail(udev, *iov)) { 443 + to_offset == iov_tail(*iov)) { 571 444 (*iov)->iov_len += copy_bytes; 572 445 } else { 573 446 new_iov(iov, iov_cnt, udev); ··· 637 510 copy_bytes = min_t(size_t, sg_remaining, 638 511 block_remaining); 639 512 offset = DATA_BLOCK_SIZE - block_remaining; 640 - from = (void *)(unsigned long)from + offset; 513 + from += offset; 641 514 tcmu_flush_dcache_range(from, copy_bytes); 642 515 memcpy(to + sg->length - sg_remaining, from, 643 516 copy_bytes); ··· 723 596 } 724 597 } 725 598 726 - if (!tcmu_get_empty_blocks(udev, cmd)) 727 - return false; 728 - 729 - return true; 599 + return tcmu_get_empty_blocks(udev, cmd); 730 600 } 731 601 732 602 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt) ··· 823 699 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); 824 700 825 701 entry = (void *) mb + CMDR_OFF + cmd_head; 826 - tcmu_flush_dcache_range(entry, sizeof(*entry)); 827 702 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); 828 703 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); 829 704 entry->hdr.cmd_id = 0; /* not used for PAD */ 830 705 entry->hdr.kflags = 0; 831 706 entry->hdr.uflags = 0; 707 + tcmu_flush_dcache_range(entry, sizeof(*entry)); 832 708 833 709 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); 710 + tcmu_flush_dcache_range(mb, sizeof(*mb)); 834 711 835 712 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 836 713 WARN_ON(cmd_head != 0); 837 714 } 838 715 839 716 entry = (void *) mb + CMDR_OFF + cmd_head; 840 - tcmu_flush_dcache_range(entry, sizeof(*entry)); 717 + memset(entry, 0, command_size); 841 718 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 842 719 entry->hdr.cmd_id = tcmu_cmd->cmd_id; 843 - entry->hdr.kflags = 0; 844 - entry->hdr.uflags = 0; 845 720 846 721 /* Handle allocating space from the data area */ 847 722 tcmu_cmd_reset_dbi_cur(tcmu_cmd); ··· 859 736 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 860 737 } 861 738 entry->req.iov_cnt = iov_cnt; 862 - entry->req.iov_dif_cnt = 0; 863 739 864 740 /* Handle BIDI commands */ 741 + iov_cnt = 0; 865 742 if (se_cmd->se_cmd_flags & SCF_BIDI) { 866 - iov_cnt = 0; 867 743 iov++; 868 744 ret = scatter_data_area(udev, tcmu_cmd, 869 745 se_cmd->t_bidi_data_sg, ··· 875 753 pr_err("tcmu: alloc and scatter bidi data failed\n"); 876 754 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 877 755 } 878 - entry->req.iov_bidi_cnt = iov_cnt; 879 756 } 757 + entry->req.iov_bidi_cnt = iov_cnt; 880 758 881 759 /* 882 760 * Recalaulate the command's base size and size according ··· 952 830 cmd->se_cmd); 953 831 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; 954 832 } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 955 - memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer, 956 - se_cmd->scsi_sense_length); 833 + transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); 957 834 } else if (se_cmd->se_cmd_flags & SCF_BIDI) { 958 835 /* Get Data-In buffer before clean up */ 959 836 gather_data_area(udev, cmd, true); ··· 1110 989 setup_timer(&udev->timeout, tcmu_device_timedout, 1111 990 (unsigned long)udev); 1112 991 992 + init_waitqueue_head(&udev->nl_cmd_wq); 993 + spin_lock_init(&udev->nl_cmd_lock); 994 + 1113 995 return &udev->se_dev; 1114 996 } 1115 997 ··· 1264 1140 return -EBUSY; 1265 1141 1266 1142 udev->inode = inode; 1143 + kref_get(&udev->kref); 1267 1144 1268 1145 pr_debug("open\n"); 1269 1146 ··· 1296 1171 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 1297 1172 1298 1173 pr_debug("close\n"); 1299 - /* release ref from configure */ 1174 + /* release ref from open */ 1300 1175 kref_put(&udev->kref, tcmu_dev_kref_release); 1301 1176 return 0; 1302 1177 } 1303 1178 1304 - static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor) 1179 + static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) 1180 + { 1181 + struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1182 + 1183 + if (!tcmu_kern_cmd_reply_supported) 1184 + return; 1185 + relock: 1186 + spin_lock(&udev->nl_cmd_lock); 1187 + 1188 + if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 1189 + spin_unlock(&udev->nl_cmd_lock); 1190 + pr_debug("sleeping for open nl cmd\n"); 1191 + wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC)); 1192 + goto relock; 1193 + } 1194 + 1195 + memset(nl_cmd, 0, sizeof(*nl_cmd)); 1196 + nl_cmd->cmd = cmd; 1197 + init_completion(&nl_cmd->complete); 1198 + 1199 + spin_unlock(&udev->nl_cmd_lock); 1200 + } 1201 + 1202 + static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) 1203 + { 1204 + struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1205 + int ret; 1206 + DEFINE_WAIT(__wait); 1207 + 1208 + if (!tcmu_kern_cmd_reply_supported) 1209 + return 0; 1210 + 1211 + pr_debug("sleeping for nl reply\n"); 1212 + wait_for_completion(&nl_cmd->complete); 1213 + 1214 + spin_lock(&udev->nl_cmd_lock); 1215 + nl_cmd->cmd = TCMU_CMD_UNSPEC; 1216 + ret = nl_cmd->status; 1217 + nl_cmd->status = 0; 1218 + spin_unlock(&udev->nl_cmd_lock); 1219 + 1220 + wake_up_all(&udev->nl_cmd_wq); 1221 + 1222 + return ret;; 1223 + } 1224 + 1225 + static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd, 1226 + int reconfig_attr, const void *reconfig_data) 1305 1227 { 1306 1228 struct sk_buff *skb; 1307 1229 void *msg_header; ··· 1362 1190 if (!msg_header) 1363 1191 goto free_skb; 1364 1192 1365 - ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name); 1193 + ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); 1366 1194 if (ret < 0) 1367 1195 goto free_skb; 1368 1196 1369 - ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor); 1197 + ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); 1370 1198 if (ret < 0) 1371 1199 goto free_skb; 1200 + 1201 + ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); 1202 + if (ret < 0) 1203 + goto free_skb; 1204 + 1205 + if (cmd == TCMU_CMD_RECONFIG_DEVICE) { 1206 + switch (reconfig_attr) { 1207 + case TCMU_ATTR_DEV_CFG: 1208 + ret = nla_put_string(skb, reconfig_attr, reconfig_data); 1209 + break; 1210 + case TCMU_ATTR_DEV_SIZE: 1211 + ret = nla_put_u64_64bit(skb, reconfig_attr, 1212 + *((u64 *)reconfig_data), 1213 + TCMU_ATTR_PAD); 1214 + break; 1215 + case TCMU_ATTR_WRITECACHE: 1216 + ret = nla_put_u8(skb, reconfig_attr, 1217 + *((u8 *)reconfig_data)); 1218 + break; 1219 + default: 1220 + BUG(); 1221 + } 1222 + 1223 + if (ret < 0) 1224 + goto free_skb; 1225 + } 1372 1226 1373 1227 genlmsg_end(skb, msg_header); 1374 1228 1229 + tcmu_init_genl_cmd_reply(udev, cmd); 1230 + 1375 1231 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, 1376 1232 TCMU_MCGRP_CONFIG, GFP_KERNEL); 1377 - 1378 1233 /* We don't care if no one is listening */ 1379 1234 if (ret == -ESRCH) 1380 1235 ret = 0; 1236 + if (!ret) 1237 + ret = tcmu_wait_genl_cmd_reply(udev); 1381 1238 1382 1239 return ret; 1383 1240 free_skb: ··· 1414 1213 return ret; 1415 1214 } 1416 1215 1417 - static int tcmu_configure_device(struct se_device *dev) 1216 + static int tcmu_update_uio_info(struct tcmu_dev *udev) 1418 1217 { 1419 - struct tcmu_dev *udev = TCMU_DEV(dev); 1420 1218 struct tcmu_hba *hba = udev->hba->hba_ptr; 1421 1219 struct uio_info *info; 1422 - struct tcmu_mailbox *mb; 1423 - size_t size; 1424 - size_t used; 1425 - int ret = 0; 1220 + size_t size, used; 1426 1221 char *str; 1427 1222 1428 1223 info = &udev->uio_info; 1429 - 1430 1224 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name, 1431 1225 udev->dev_config); 1432 1226 size += 1; /* for \0 */ ··· 1430 1234 return -ENOMEM; 1431 1235 1432 1236 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name); 1433 - 1434 1237 if (udev->dev_config[0]) 1435 1238 snprintf(str + used, size - used, "/%s", udev->dev_config); 1436 1239 1437 1240 info->name = str; 1241 + 1242 + return 0; 1243 + } 1244 + 1245 + static int tcmu_configure_device(struct se_device *dev) 1246 + { 1247 + struct tcmu_dev *udev = TCMU_DEV(dev); 1248 + struct uio_info *info; 1249 + struct tcmu_mailbox *mb; 1250 + int ret = 0; 1251 + 1252 + ret = tcmu_update_uio_info(udev); 1253 + if (ret) 1254 + return ret; 1255 + 1256 + info = &udev->uio_info; 1438 1257 1439 1258 udev->mb_addr = vzalloc(CMDR_SIZE); 1440 1259 if (!udev->mb_addr) { ··· 1501 1290 /* Other attributes can be configured in userspace */ 1502 1291 if (!dev->dev_attrib.hw_max_sectors) 1503 1292 dev->dev_attrib.hw_max_sectors = 128; 1293 + if (!dev->dev_attrib.emulate_write_cache) 1294 + dev->dev_attrib.emulate_write_cache = 0; 1504 1295 dev->dev_attrib.hw_queue_depth = 128; 1505 1296 1506 1297 /* ··· 1511 1298 */ 1512 1299 kref_get(&udev->kref); 1513 1300 1514 - ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, 1515 - udev->uio_info.uio_dev->minor); 1301 + ret = tcmu_netlink_event(udev, TCMU_CMD_ADDED_DEVICE, 0, NULL); 1516 1302 if (ret) 1517 1303 goto err_netlink; 1518 1304 ··· 1567 1355 static void tcmu_free_device(struct se_device *dev) 1568 1356 { 1569 1357 struct tcmu_dev *udev = TCMU_DEV(dev); 1358 + 1359 + /* release ref from init */ 1360 + kref_put(&udev->kref, tcmu_dev_kref_release); 1361 + } 1362 + 1363 + static void tcmu_destroy_device(struct se_device *dev) 1364 + { 1365 + struct tcmu_dev *udev = TCMU_DEV(dev); 1570 1366 struct tcmu_cmd *cmd; 1571 1367 bool all_expired = true; 1572 1368 int i; ··· 1599 1379 1600 1380 tcmu_blocks_release(udev); 1601 1381 1602 - if (tcmu_dev_configured(udev)) { 1603 - tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, 1604 - udev->uio_info.uio_dev->minor); 1382 + tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL); 1605 1383 1606 - uio_unregister_device(&udev->uio_info); 1607 - } 1384 + uio_unregister_device(&udev->uio_info); 1608 1385 1609 - /* release ref from init */ 1386 + /* release ref from configure */ 1610 1387 kref_put(&udev->kref, tcmu_dev_kref_release); 1611 1388 } 1612 1389 ··· 1763 1546 } 1764 1547 CONFIGFS_ATTR(tcmu_, cmd_time_out); 1765 1548 1549 + static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) 1550 + { 1551 + struct se_dev_attrib *da = container_of(to_config_group(item), 1552 + struct se_dev_attrib, da_group); 1553 + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1554 + 1555 + return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); 1556 + } 1557 + 1558 + static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, 1559 + size_t count) 1560 + { 1561 + struct se_dev_attrib *da = container_of(to_config_group(item), 1562 + struct se_dev_attrib, da_group); 1563 + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1564 + int ret, len; 1565 + 1566 + len = strlen(page); 1567 + if (!len || len > TCMU_CONFIG_LEN - 1) 1568 + return -EINVAL; 1569 + 1570 + /* Check if device has been configured before */ 1571 + if (tcmu_dev_configured(udev)) { 1572 + ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, 1573 + TCMU_ATTR_DEV_CFG, page); 1574 + if (ret) { 1575 + pr_err("Unable to reconfigure device\n"); 1576 + return ret; 1577 + } 1578 + strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 1579 + 1580 + ret = tcmu_update_uio_info(udev); 1581 + if (ret) 1582 + return ret; 1583 + return count; 1584 + } 1585 + strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 1586 + 1587 + return count; 1588 + } 1589 + CONFIGFS_ATTR(tcmu_, dev_config); 1590 + 1591 + static ssize_t tcmu_dev_size_show(struct config_item *item, char *page) 1592 + { 1593 + struct se_dev_attrib *da = container_of(to_config_group(item), 1594 + struct se_dev_attrib, da_group); 1595 + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1596 + 1597 + return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size); 1598 + } 1599 + 1600 + static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, 1601 + size_t count) 1602 + { 1603 + struct se_dev_attrib *da = container_of(to_config_group(item), 1604 + struct se_dev_attrib, da_group); 1605 + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1606 + u64 val; 1607 + int ret; 1608 + 1609 + ret = kstrtou64(page, 0, &val); 1610 + if (ret < 0) 1611 + return ret; 1612 + 1613 + /* Check if device has been configured before */ 1614 + if (tcmu_dev_configured(udev)) { 1615 + ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, 1616 + TCMU_ATTR_DEV_SIZE, &val); 1617 + if (ret) { 1618 + pr_err("Unable to reconfigure device\n"); 1619 + return ret; 1620 + } 1621 + } 1622 + udev->dev_size = val; 1623 + return count; 1624 + } 1625 + CONFIGFS_ATTR(tcmu_, dev_size); 1626 + 1627 + static ssize_t tcmu_emulate_write_cache_show(struct config_item *item, 1628 + char *page) 1629 + { 1630 + struct se_dev_attrib *da = container_of(to_config_group(item), 1631 + struct se_dev_attrib, da_group); 1632 + 1633 + return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); 1634 + } 1635 + 1636 + static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, 1637 + const char *page, size_t count) 1638 + { 1639 + struct se_dev_attrib *da = container_of(to_config_group(item), 1640 + struct se_dev_attrib, da_group); 1641 + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1642 + u8 val; 1643 + int ret; 1644 + 1645 + ret = kstrtou8(page, 0, &val); 1646 + if (ret < 0) 1647 + return ret; 1648 + 1649 + /* Check if device has been configured before */ 1650 + if (tcmu_dev_configured(udev)) { 1651 + ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, 1652 + TCMU_ATTR_WRITECACHE, &val); 1653 + if (ret) { 1654 + pr_err("Unable to reconfigure device\n"); 1655 + return ret; 1656 + } 1657 + } 1658 + 1659 + da->emulate_write_cache = val; 1660 + return count; 1661 + } 1662 + CONFIGFS_ATTR(tcmu_, emulate_write_cache); 1663 + 1664 + static struct configfs_attribute *tcmu_attrib_attrs[] = { 1665 + &tcmu_attr_cmd_time_out, 1666 + &tcmu_attr_dev_config, 1667 + &tcmu_attr_dev_size, 1668 + &tcmu_attr_emulate_write_cache, 1669 + NULL, 1670 + }; 1671 + 1766 1672 static struct configfs_attribute **tcmu_attrs; 1767 1673 1768 1674 static struct target_backend_ops tcmu_ops = { ··· 1896 1556 .detach_hba = tcmu_detach_hba, 1897 1557 .alloc_device = tcmu_alloc_device, 1898 1558 .configure_device = tcmu_configure_device, 1559 + .destroy_device = tcmu_destroy_device, 1899 1560 .free_device = tcmu_free_device, 1900 1561 .parse_cdb = tcmu_parse_cdb, 1901 1562 .set_configfs_dev_params = tcmu_set_configfs_dev_params, ··· 1914 1573 struct page *page; 1915 1574 int i; 1916 1575 1917 - while (1) { 1576 + while (!kthread_should_stop()) { 1918 1577 DEFINE_WAIT(__wait); 1919 1578 1920 1579 prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE); ··· 1986 1645 1987 1646 static int __init tcmu_module_init(void) 1988 1647 { 1989 - int ret, i, len = 0; 1648 + int ret, i, k, len = 0; 1990 1649 1991 1650 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 1992 1651 ··· 2011 1670 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { 2012 1671 len += sizeof(struct configfs_attribute *); 2013 1672 } 2014 - len += sizeof(struct configfs_attribute *) * 2; 1673 + for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) { 1674 + len += sizeof(struct configfs_attribute *); 1675 + } 1676 + len += sizeof(struct configfs_attribute *); 2015 1677 2016 1678 tcmu_attrs = kzalloc(len, GFP_KERNEL); 2017 1679 if (!tcmu_attrs) { ··· 2025 1681 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { 2026 1682 tcmu_attrs[i] = passthrough_attrib_attrs[i]; 2027 1683 } 2028 - tcmu_attrs[i] = &tcmu_attr_cmd_time_out; 1684 + for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) { 1685 + tcmu_attrs[i] = tcmu_attrib_attrs[k]; 1686 + i++; 1687 + } 2029 1688 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; 2030 1689 2031 1690 ret = transport_backend_register(&tcmu_ops);
+118 -80
drivers/target/target_core_xcopy.c
··· 40 40 41 41 static struct workqueue_struct *xcopy_wq = NULL; 42 42 43 + static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop); 44 + 43 45 static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) 44 46 { 45 47 int off = 0; ··· 55 53 return 0; 56 54 } 57 55 58 - static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn, 59 - struct se_device **found_dev) 56 + struct xcopy_dev_search_info { 57 + const unsigned char *dev_wwn; 58 + struct se_device *found_dev; 59 + }; 60 + 61 + static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev, 62 + void *data) 60 63 { 61 - struct se_device *se_dev; 64 + struct xcopy_dev_search_info *info = data; 62 65 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; 63 66 int rc; 64 67 65 - mutex_lock(&g_device_mutex); 66 - list_for_each_entry(se_dev, &g_device_list, g_dev_node) { 67 - 68 - if (!se_dev->dev_attrib.emulate_3pc) 69 - continue; 70 - 71 - memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 72 - target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); 73 - 74 - rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); 75 - if (rc != 0) 76 - continue; 77 - 78 - *found_dev = se_dev; 79 - pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev); 80 - 81 - rc = target_depend_item(&se_dev->dev_group.cg_item); 82 - if (rc != 0) { 83 - pr_err("configfs_depend_item attempt failed:" 84 - " %d for se_dev: %p\n", rc, se_dev); 85 - mutex_unlock(&g_device_mutex); 86 - return rc; 87 - } 88 - 89 - pr_debug("Called configfs_depend_item for se_dev: %p" 90 - " se_dev->se_dev_group: %p\n", se_dev, 91 - &se_dev->dev_group); 92 - 93 - mutex_unlock(&g_device_mutex); 68 + if (!se_dev->dev_attrib.emulate_3pc) 94 69 return 0; 95 - } 96 - mutex_unlock(&g_device_mutex); 97 70 98 - pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); 99 - return -EINVAL; 71 + memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 72 + target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); 73 + 74 + rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); 75 + if (rc != 0) 76 + return 0; 77 + 78 + info->found_dev = se_dev; 79 + pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev); 80 + 81 + rc = target_depend_item(&se_dev->dev_group.cg_item); 82 + if (rc != 0) { 83 + pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n", 84 + rc, se_dev); 85 + return rc; 86 + } 87 + 88 + pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n", 89 + se_dev, &se_dev->dev_group); 90 + return 1; 91 + } 92 + 93 + static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn, 94 + struct se_device **found_dev) 95 + { 96 + struct xcopy_dev_search_info info; 97 + int ret; 98 + 99 + memset(&info, 0, sizeof(info)); 100 + info.dev_wwn = dev_wwn; 101 + 102 + ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info); 103 + if (ret == 1) { 104 + *found_dev = info.found_dev; 105 + return 0; 106 + } else { 107 + pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); 108 + return -EINVAL; 109 + } 100 110 } 101 111 102 112 static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, ··· 325 311 (unsigned long long)xop->dst_lba); 326 312 327 313 if (dc != 0) { 328 - xop->dbl = (desc[29] & 0xff) << 16; 329 - xop->dbl |= (desc[30] & 0xff) << 8; 330 - xop->dbl |= desc[31] & 0xff; 314 + xop->dbl = get_unaligned_be24(&desc[29]); 331 315 332 316 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); 333 317 } ··· 793 781 static void target_xcopy_do_work(struct work_struct *work) 794 782 { 795 783 struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work); 796 - struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev; 797 784 struct se_cmd *ec_cmd = xop->xop_se_cmd; 798 - sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba; 785 + struct se_device *src_dev, *dst_dev; 786 + sector_t src_lba, dst_lba, end_lba; 799 787 unsigned int max_sectors; 800 - int rc; 801 - unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0; 788 + int rc = 0; 789 + unsigned short nolb, cur_nolb, max_nolb, copied_nolb = 0; 802 790 791 + if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE) 792 + goto err_free; 793 + 794 + if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev)) 795 + goto err_free; 796 + 797 + src_dev = xop->src_dev; 798 + dst_dev = xop->dst_dev; 799 + src_lba = xop->src_lba; 800 + dst_lba = xop->dst_lba; 801 + nolb = xop->nolb; 803 802 end_lba = src_lba + nolb; 804 803 /* 805 804 * Break up XCOPY I/O into hw_max_sectors sized I/O based on the ··· 878 855 879 856 out: 880 857 xcopy_pt_undepend_remotedev(xop); 858 + 859 + err_free: 881 860 kfree(xop); 882 861 /* 883 862 * Don't override an error scsi status if it has already been set ··· 892 867 target_complete_cmd(ec_cmd, ec_cmd->scsi_status); 893 868 } 894 869 895 - sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) 870 + /* 871 + * Returns TCM_NO_SENSE upon success or a sense code != TCM_NO_SENSE if parsing 872 + * fails. 873 + */ 874 + static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop) 896 875 { 897 - struct se_device *dev = se_cmd->se_dev; 898 - struct xcopy_op *xop = NULL; 876 + struct se_cmd *se_cmd = xop->xop_se_cmd; 899 877 unsigned char *p = NULL, *seg_desc; 900 - unsigned int list_id, list_id_usage, sdll, inline_dl, sa; 878 + unsigned int list_id, list_id_usage, sdll, inline_dl; 901 879 sense_reason_t ret = TCM_INVALID_PARAMETER_LIST; 902 880 int rc; 903 881 unsigned short tdll; 904 882 905 - if (!dev->dev_attrib.emulate_3pc) { 906 - pr_err("EXTENDED_COPY operation explicitly disabled\n"); 907 - return TCM_UNSUPPORTED_SCSI_OPCODE; 908 - } 909 - 910 - sa = se_cmd->t_task_cdb[1] & 0x1f; 911 - if (sa != 0x00) { 912 - pr_err("EXTENDED_COPY(LID4) not supported\n"); 913 - return TCM_UNSUPPORTED_SCSI_OPCODE; 914 - } 915 - 916 - if (se_cmd->data_length == 0) { 917 - target_complete_cmd(se_cmd, SAM_STAT_GOOD); 918 - return TCM_NO_SENSE; 919 - } 920 - if (se_cmd->data_length < XCOPY_HDR_LEN) { 921 - pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n", 922 - se_cmd->data_length, XCOPY_HDR_LEN); 923 - return TCM_PARAMETER_LIST_LENGTH_ERROR; 924 - } 925 - 926 - xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); 927 - if (!xop) { 928 - pr_err("Unable to allocate xcopy_op\n"); 929 - return TCM_OUT_OF_RESOURCES; 930 - } 931 - xop->xop_se_cmd = se_cmd; 932 - 933 883 p = transport_kmap_data_sg(se_cmd); 934 884 if (!p) { 935 885 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); 936 - kfree(xop); 937 886 return TCM_OUT_OF_RESOURCES; 938 887 } 939 888 ··· 976 977 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, 977 978 rc * XCOPY_TARGET_DESC_LEN); 978 979 transport_kunmap_data_sg(se_cmd); 979 - 980 - INIT_WORK(&xop->xop_work, target_xcopy_do_work); 981 - queue_work(xcopy_wq, &xop->xop_work); 982 980 return TCM_NO_SENSE; 983 981 984 982 out: 985 983 if (p) 986 984 transport_kunmap_data_sg(se_cmd); 987 - kfree(xop); 988 985 return ret; 986 + } 987 + 988 + sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) 989 + { 990 + struct se_device *dev = se_cmd->se_dev; 991 + struct xcopy_op *xop; 992 + unsigned int sa; 993 + 994 + if (!dev->dev_attrib.emulate_3pc) { 995 + pr_err("EXTENDED_COPY operation explicitly disabled\n"); 996 + return TCM_UNSUPPORTED_SCSI_OPCODE; 997 + } 998 + 999 + sa = se_cmd->t_task_cdb[1] & 0x1f; 1000 + if (sa != 0x00) { 1001 + pr_err("EXTENDED_COPY(LID4) not supported\n"); 1002 + return TCM_UNSUPPORTED_SCSI_OPCODE; 1003 + } 1004 + 1005 + if (se_cmd->data_length == 0) { 1006 + target_complete_cmd(se_cmd, SAM_STAT_GOOD); 1007 + return TCM_NO_SENSE; 1008 + } 1009 + if (se_cmd->data_length < XCOPY_HDR_LEN) { 1010 + pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n", 1011 + se_cmd->data_length, XCOPY_HDR_LEN); 1012 + return TCM_PARAMETER_LIST_LENGTH_ERROR; 1013 + } 1014 + 1015 + xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); 1016 + if (!xop) 1017 + goto err; 1018 + xop->xop_se_cmd = se_cmd; 1019 + INIT_WORK(&xop->xop_work, target_xcopy_do_work); 1020 + if (WARN_ON_ONCE(!queue_work(xcopy_wq, &xop->xop_work))) 1021 + goto free; 1022 + return TCM_NO_SENSE; 1023 + 1024 + free: 1025 + kfree(xop); 1026 + 1027 + err: 1028 + return TCM_OUT_OF_RESOURCES; 989 1029 } 990 1030 991 1031 static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
+3 -8
drivers/vhost/scsi.c
··· 496 496 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, 497 497 vs_event_work); 498 498 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 499 - struct vhost_scsi_evt *evt; 499 + struct vhost_scsi_evt *evt, *t; 500 500 struct llist_node *llnode; 501 501 502 502 mutex_lock(&vq->mutex); 503 503 llnode = llist_del_all(&vs->vs_event_list); 504 - while (llnode) { 505 - evt = llist_entry(llnode, struct vhost_scsi_evt, list); 506 - llnode = llist_next(llnode); 504 + llist_for_each_entry_safe(evt, t, llnode, list) { 507 505 vhost_scsi_do_evt_work(vs, evt); 508 506 vhost_scsi_free_evt(vs, evt); 509 507 } ··· 527 529 528 530 bitmap_zero(signal, VHOST_SCSI_MAX_VQ); 529 531 llnode = llist_del_all(&vs->vs_completion_list); 530 - while (llnode) { 531 - cmd = llist_entry(llnode, struct vhost_scsi_cmd, 532 - tvc_completion_list); 533 - llnode = llist_next(llnode); 532 + llist_for_each_entry(cmd, llnode, tvc_completion_list) { 534 533 se_cmd = &cmd->tvc_se_cmd; 535 534 536 535 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
+9 -27
drivers/xen/xen-scsiback.c
··· 134 134 struct page *pages[VSCSI_MAX_GRANTS]; 135 135 136 136 struct se_cmd se_cmd; 137 - }; 138 137 139 - struct scsiback_tmr { 140 - atomic_t tmr_complete; 141 - wait_queue_head_t tmr_wait; 138 + struct completion tmr_done; 142 139 }; 143 140 144 141 #define VSCSI_DEFAULT_SESSION_TAGS 128 ··· 596 599 struct scsiback_tpg *tpg = pending_req->v2p->tpg; 597 600 struct scsiback_nexus *nexus = tpg->tpg_nexus; 598 601 struct se_cmd *se_cmd = &pending_req->se_cmd; 599 - struct scsiback_tmr *tmr; 600 602 u64 unpacked_lun = pending_req->v2p->lun; 601 603 int rc, err = FAILED; 602 604 603 - tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL); 604 - if (!tmr) { 605 - target_put_sess_cmd(se_cmd); 606 - goto err; 607 - } 608 - 609 - init_waitqueue_head(&tmr->tmr_wait); 605 + init_completion(&pending_req->tmr_done); 610 606 611 607 rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess, 612 608 &pending_req->sense_buffer[0], 613 - unpacked_lun, tmr, act, GFP_KERNEL, 609 + unpacked_lun, NULL, act, GFP_KERNEL, 614 610 tag, TARGET_SCF_ACK_KREF); 615 611 if (rc) 616 612 goto err; 617 613 618 - wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete)); 614 + wait_for_completion(&pending_req->tmr_done); 619 615 620 616 err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? 621 617 SUCCESS : FAILED; 622 618 623 619 scsiback_do_resp_with_sense(NULL, err, 0, pending_req); 624 - transport_generic_free_cmd(&pending_req->se_cmd, 1); 620 + transport_generic_free_cmd(&pending_req->se_cmd, 0); 625 621 return; 622 + 626 623 err: 627 - if (tmr) 628 - kfree(tmr); 629 624 scsiback_do_resp_with_sense(NULL, err, 0, pending_req); 630 625 } 631 626 ··· 1378 1389 static void scsiback_release_cmd(struct se_cmd *se_cmd) 1379 1390 { 1380 1391 struct se_session *se_sess = se_cmd->se_sess; 1381 - struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 1382 - 1383 - if (se_tmr && se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { 1384 - struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr; 1385 - kfree(tmr); 1386 - } 1387 1392 1388 1393 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 1389 1394 } ··· 1438 1455 1439 1456 static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd) 1440 1457 { 1441 - struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 1442 - struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr; 1458 + struct vscsibk_pend *pending_req = container_of(se_cmd, 1459 + struct vscsibk_pend, se_cmd); 1443 1460 1444 - atomic_set(&tmr->tmr_complete, 1); 1445 - wake_up(&tmr->tmr_wait); 1461 + complete(&pending_req->tmr_done); 1446 1462 } 1447 1463 1448 1464 static void scsiback_aborted_task(struct se_cmd *se_cmd)
+1
include/scsi/scsi_proto.h
··· 158 158 #define READ_32 0x09 159 159 #define VERIFY_32 0x0a 160 160 #define WRITE_32 0x0b 161 + #define WRITE_VERIFY_32 0x0c 161 162 #define WRITE_SAME_32 0x0d 162 163 #define ATA_32 0x1ff0 163 164
+9 -1
include/target/iscsi/iscsi_target_core.h
··· 66 66 #define TA_DEFAULT_FABRIC_PROT_TYPE 0 67 67 /* TPG status needs to be enabled to return sendtargets discovery endpoint info */ 68 68 #define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1 69 + /* 70 + * Used to control the sending of keys with optional to respond state bit, 71 + * as a workaround for non RFC compliant initiators,that do not propose, 72 + * nor respond to specific keys required for login to complete. 73 + * 74 + * See iscsi_check_proposer_for_optional_reply() for more details. 75 + */ 76 + #define TA_DEFAULT_LOGIN_KEYS_WORKAROUND 1 69 77 70 78 #define ISCSI_IOV_DATA_BUFFER 5 71 79 ··· 568 560 #define LOGIN_FLAGS_INITIAL_PDU 8 569 561 unsigned long login_flags; 570 562 struct delayed_work login_work; 571 - struct delayed_work login_cleanup_work; 572 563 struct iscsi_login *login; 573 564 struct timer_list nopin_timer; 574 565 struct timer_list nopin_response_timer; ··· 776 769 u8 t10_pi; 777 770 u32 fabric_prot_type; 778 771 u32 tpg_enabled_sendtargets; 772 + u32 login_keys_workaround; 779 773 struct iscsi_portal_group *tpg; 780 774 }; 781 775
+13 -4
include/target/target_core_backend.h
··· 2 2 #define TARGET_CORE_BACKEND_H 3 3 4 4 #include <linux/types.h> 5 + #include <asm/unaligned.h> 5 6 #include <target/target_core_base.h> 6 7 7 8 #define TRANSPORT_FLAG_PASSTHROUGH 0x1 ··· 30 29 31 30 struct se_device *(*alloc_device)(struct se_hba *, const char *); 32 31 int (*configure_device)(struct se_device *); 32 + void (*destroy_device)(struct se_device *); 33 33 void (*free_device)(struct se_device *device); 34 34 35 35 ssize_t (*set_configfs_dev_params)(struct se_device *, 36 36 const char *, ssize_t); 37 37 ssize_t (*show_configfs_dev_params)(struct se_device *, char *); 38 - 39 - void (*transport_complete)(struct se_cmd *cmd, 40 - struct scatterlist *, 41 - unsigned char *); 42 38 43 39 sense_reason_t (*parse_cdb)(struct se_cmd *cmd); 44 40 u32 (*get_device_type)(struct se_device *); ··· 68 70 69 71 void target_complete_cmd(struct se_cmd *, u8); 70 72 void target_complete_cmd_with_length(struct se_cmd *, u8, int); 73 + 74 + void transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *); 71 75 72 76 sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size); 73 77 sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd); ··· 104 104 sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, 105 105 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); 106 106 107 + struct se_device *target_find_device(int id, bool do_depend); 108 + 107 109 bool target_sense_desc_format(struct se_device *dev); 108 110 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); 109 111 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, 110 112 struct request_queue *q); 113 + 114 + 115 + /* Only use get_unaligned_be24() if reading p - 1 is allowed. */ 116 + static inline uint32_t get_unaligned_be24(const uint8_t *const p) 117 + { 118 + return get_unaligned_be32(p - 1) & 0xffffffU; 119 + } 111 120 112 121 #endif /* TARGET_CORE_BACKEND_H */
+2 -9
include/target/target_core_base.h
··· 188 188 TARGET_SCF_BIDI_OP = 0x01, 189 189 TARGET_SCF_ACK_KREF = 0x02, 190 190 TARGET_SCF_UNKNOWN_SIZE = 0x04, 191 - TARGET_SCF_USE_CPUID = 0x08, 191 + TARGET_SCF_USE_CPUID = 0x08, 192 + TARGET_SCF_LOOKUP_LUN_FROM_TAG = 0x10, 192 193 }; 193 194 194 195 /* fabric independent task management function values */ ··· 219 218 */ 220 219 typedef enum { 221 220 SCSI_INST_INDEX, 222 - SCSI_DEVICE_INDEX, 223 221 SCSI_AUTH_INTR_INDEX, 224 222 SCSI_INDEX_TYPE_MAX 225 223 } scsi_index_t; ··· 701 701 702 702 struct se_lun { 703 703 u64 unpacked_lun; 704 - #define SE_LUN_LINK_MAGIC 0xffff7771 705 - u32 lun_link_magic; 706 704 bool lun_shutdown; 707 705 bool lun_access_ro; 708 706 u32 lun_index; ··· 744 746 }; 745 747 746 748 struct se_device { 747 - #define SE_DEV_LINK_MAGIC 0xfeeddeef 748 - u32 dev_link_magic; 749 749 /* RELATIVE TARGET PORT IDENTIFER Counter */ 750 750 u16 dev_rpti_counter; 751 751 /* Used for SAM Task Attribute ordering */ ··· 796 800 struct list_head delayed_cmd_list; 797 801 struct list_head state_list; 798 802 struct list_head qf_cmd_list; 799 - struct list_head g_dev_node; 800 803 /* Pointer to associated SE HBA */ 801 804 struct se_hba *se_hba; 802 805 /* T10 Inquiry and VPD WWN Information */ ··· 814 819 unsigned char udev_path[SE_UDEV_PATH_LEN]; 815 820 /* Pointer to template of function pointers for transport */ 816 821 const struct target_backend_ops *transport; 817 - /* Linked list for struct se_hba struct se_device list */ 818 - struct list_head dev_list; 819 822 struct se_lun xcopy_lun; 820 823 /* Protection Information */ 821 824 int prot_length;
+1
include/target/target_core_fabric.h
··· 160 160 int target_put_sess_cmd(struct se_cmd *); 161 161 void target_sess_cmd_list_set_waiting(struct se_session *); 162 162 void target_wait_for_sess_cmds(struct se_session *); 163 + void target_show_cmd(const char *pfx, struct se_cmd *cmd); 163 164 164 165 int core_alua_check_nonop_delay(struct se_cmd *); 165 166
+12
include/uapi/linux/target_core_user.h
··· 130 130 TCMU_CMD_UNSPEC, 131 131 TCMU_CMD_ADDED_DEVICE, 132 132 TCMU_CMD_REMOVED_DEVICE, 133 + TCMU_CMD_RECONFIG_DEVICE, 134 + TCMU_CMD_ADDED_DEVICE_DONE, 135 + TCMU_CMD_REMOVED_DEVICE_DONE, 136 + TCMU_CMD_RECONFIG_DEVICE_DONE, 137 + TCMU_CMD_SET_FEATURES, 133 138 __TCMU_CMD_MAX, 134 139 }; 135 140 #define TCMU_CMD_MAX (__TCMU_CMD_MAX - 1) ··· 143 138 TCMU_ATTR_UNSPEC, 144 139 TCMU_ATTR_DEVICE, 145 140 TCMU_ATTR_MINOR, 141 + TCMU_ATTR_PAD, 142 + TCMU_ATTR_DEV_CFG, 143 + TCMU_ATTR_DEV_SIZE, 144 + TCMU_ATTR_WRITECACHE, 145 + TCMU_ATTR_CMD_STATUS, 146 + TCMU_ATTR_DEVICE_ID, 147 + TCMU_ATTR_SUPP_KERN_CMD_REPLY, 146 148 __TCMU_ATTR_MAX, 147 149 }; 148 150 #define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1)