Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull more SCSI updates from James Bottomley:
"This is a batch of changes that didn't make it in the initial pull
request because the lpfc series had to be rebased to redo an incorrect
split.

It's basically driver updates to lpfc, target, bnx2fc and ufs with the
rest being minor updates except the sr_block_release one which fixes a
use after free introduced by the removal of the global mutex in the
first patch set"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (35 commits)
scsi: core: Add DID_ALLOC_FAILURE and DID_MEDIUM_ERROR to hostbyte_table
scsi: ufs: Use ufshcd_config_pwr_mode() when scaling gear
scsi: bnx2fc: fix boolreturn.cocci warnings
scsi: zfcp: use fallthrough;
scsi: aacraid: do not overwrite retval in aac_reset_adapter()
scsi: sr: Fix sr_block_release()
scsi: aic7xxx: Remove more FreeBSD-specific code
scsi: mpt3sas: Fix kernel panic observed on soft HBA unplug
scsi: ufs: set device as active power mode after resetting device
scsi: iscsi: Report unbind session event when the target has been removed
scsi: lpfc: Change default SCSI LUN QD to 64
scsi: libfc: rport state move to PLOGI if all PRLI retry exhausted
scsi: libfc: If PRLI rejected, move rport to PLOGI state
scsi: bnx2fc: Update the driver version to 2.12.13
scsi: bnx2fc: Fix SCSI command completion after cleanup is posted
scsi: bnx2fc: Process the RQE with CQE in interrupt context
scsi: target: use the stack for XCOPY passthrough cmds
scsi: target: increase XCOPY I/O size
scsi: target: avoid per-loop XCOPY buffer allocations
scsi: target: drop xcopy DISK BLOCK LENGTH debug
...

+725 -773
+5 -5
drivers/s390/scsi/zfcp_erp.c
··· 178 178 return 0; 179 179 if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED)) 180 180 need = ZFCP_ERP_ACTION_REOPEN_PORT; 181 - /* fall through */ 181 + fallthrough; 182 182 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 183 183 p_status = atomic_read(&port->status); 184 184 if (!(p_status & ZFCP_STATUS_COMMON_OPEN)) 185 185 need = ZFCP_ERP_ACTION_REOPEN_PORT; 186 - /* fall through */ 186 + fallthrough; 187 187 case ZFCP_ERP_ACTION_REOPEN_PORT: 188 188 p_status = atomic_read(&port->status); 189 189 if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE) ··· 196 196 return need; 197 197 if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED)) 198 198 need = ZFCP_ERP_ACTION_REOPEN_ADAPTER; 199 - /* fall through */ 199 + fallthrough; 200 200 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 201 201 a_status = atomic_read(&adapter->status); 202 202 if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE) ··· 1086 1086 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) 1087 1087 return zfcp_erp_lun_strategy_close(erp_action); 1088 1088 /* already closed */ 1089 - /* fall through */ 1089 + fallthrough; 1090 1090 case ZFCP_ERP_STEP_LUN_CLOSING: 1091 1091 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) 1092 1092 return ZFCP_ERP_FAILED; ··· 1415 1415 if (act->step != ZFCP_ERP_STEP_UNINITIALIZED) 1416 1416 if (result == ZFCP_ERP_SUCCEEDED) 1417 1417 zfcp_erp_try_rport_unblock(port); 1418 - /* fall through */ 1418 + fallthrough; 1419 1419 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1420 1420 put_device(&port->dev); 1421 1421 break;
+11 -12
drivers/s390/scsi/zfcp_fsf.c
··· 564 564 case FSF_TOPO_AL: 565 565 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 566 566 fc_host_fabric_name(shost) = 0; 567 - /* fall through */ 567 + fallthrough; 568 568 default: 569 569 fc_host_fabric_name(shost) = 0; 570 570 dev_err(&adapter->ccw_device->dev, ··· 1032 1032 switch (fsq->word[0]) { 1033 1033 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1034 1034 zfcp_fc_test_link(zfcp_sdev->port); 1035 - /* fall through */ 1035 + fallthrough; 1036 1036 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1037 1037 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1038 1038 break; ··· 1127 1127 break; 1128 1128 case FSF_PORT_HANDLE_NOT_VALID: 1129 1129 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1"); 1130 - /* fall through */ 1130 + fallthrough; 1131 1131 case FSF_GENERIC_COMMAND_REJECTED: 1132 1132 case FSF_PAYLOAD_SIZE_MISMATCH: 1133 1133 case FSF_REQUEST_SIZE_TOO_LARGE: ··· 1313 1313 break; 1314 1314 case FSF_SBAL_MISMATCH: 1315 1315 /* should never occur, avoided in zfcp_fsf_send_els */ 1316 - /* fall through */ 1316 + fallthrough; 1317 1317 default: 1318 1318 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1319 1319 break; ··· 1736 1736 switch (header->fsf_status_qual.word[0]) { 1737 1737 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1738 1738 /* no zfcp_fc_test_link() with failed open port */ 1739 - /* fall through */ 1739 + fallthrough; 1740 1740 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1741 1741 case FSF_SQ_NO_RETRY_POSSIBLE: 1742 1742 req->status |= ZFCP_STATUS_FSFREQ_ERROR; ··· 1909 1909 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1910 1910 dev_warn(&req->adapter->ccw_device->dev, 1911 1911 "Opening WKA port 0x%x failed\n", wka_port->d_id); 1912 - /* fall through */ 1912 + fallthrough; 1913 1913 case FSF_ADAPTER_STATUS_AVAILABLE: 1914 1914 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1915 1915 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1916 1916 break; 1917 1917 case FSF_GOOD: 1918 1918 wka_port->handle = header->port_handle; 1919 - /* fall through */ 1919 + fallthrough; 1920 1920 case FSF_PORT_ALREADY_OPEN: 1921 1921 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE; 1922 1922 } ··· 2059 2059 case FSF_ADAPTER_STATUS_AVAILABLE: 2060 2060 switch (header->fsf_status_qual.word[0]) { 2061 2061 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 2062 - /* fall through */ 2063 2062 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 2064 2063 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2065 2064 break; ··· 2143 2144 2144 2145 case FSF_PORT_HANDLE_NOT_VALID: 2145 2146 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1"); 2146 - /* fall through */ 2147 + fallthrough; 2147 2148 case FSF_LUN_ALREADY_OPEN: 2148 2149 break; 2149 2150 case FSF_PORT_BOXED: ··· 2174 2175 (unsigned long long)zfcp_scsi_dev_lun(sdev), 2175 2176 (unsigned long long)zfcp_sdev->port->wwpn); 2176 2177 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); 2177 - /* fall through */ 2178 + fallthrough; 2178 2179 case FSF_INVALID_COMMAND_OPTION: 2179 2180 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2180 2181 break; ··· 2182 2183 switch (header->fsf_status_qual.word[0]) { 2183 2184 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 2184 2185 zfcp_fc_test_link(zfcp_sdev->port); 2185 - /* fall through */ 2186 + fallthrough; 2186 2187 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 2187 2188 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2188 2189 break; ··· 2276 2277 switch (req->qtcb->header.fsf_status_qual.word[0]) { 2277 2278 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 2278 2279 zfcp_fc_test_link(zfcp_sdev->port); 2279 - /* fall through */ 2280 + fallthrough; 2280 2281 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 2281 2282 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2282 2283 break;
+4 -3
drivers/scsi/aacraid/commsup.c
··· 1626 1626 int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) 1627 1627 { 1628 1628 unsigned long flagv = 0; 1629 - int retval; 1629 + int retval, unblock_retval; 1630 1630 struct Scsi_Host *host = aac->scsi_host_ptr; 1631 1631 int bled; 1632 1632 ··· 1656 1656 retval = _aac_reset_adapter(aac, bled, reset_type); 1657 1657 spin_unlock_irqrestore(host->host_lock, flagv); 1658 1658 1659 - retval = scsi_host_unblock(host, SDEV_RUNNING); 1660 - 1659 + unblock_retval = scsi_host_unblock(host, SDEV_RUNNING); 1660 + if (!retval) 1661 + retval = unblock_retval; 1661 1662 if ((forced < 2) && (retval == -ENODEV)) { 1662 1663 /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */ 1663 1664 struct fib * fibctx = aac_fib_alloc(aac);
-23
drivers/scsi/aic7xxx/aic7xxx_core.c
··· 1834 1834 printerror = 0; 1835 1835 } else if (ahc_sent_msg(ahc, AHCMSG_1B, 1836 1836 MSG_BUS_DEV_RESET, TRUE)) { 1837 - #ifdef __FreeBSD__ 1838 - /* 1839 - * Don't mark the user's request for this BDR 1840 - * as completing with CAM_BDR_SENT. CAM3 1841 - * specifies CAM_REQ_CMP. 1842 - */ 1843 - if (scb != NULL 1844 - && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV 1845 - && ahc_match_scb(ahc, scb, target, channel, 1846 - CAM_LUN_WILDCARD, 1847 - SCB_LIST_NULL, 1848 - ROLE_INITIATOR)) { 1849 - ahc_set_transaction_status(scb, CAM_REQ_CMP); 1850 - } 1851 - #endif 1852 1837 ahc_compile_devinfo(&devinfo, 1853 1838 initiator_role_id, 1854 1839 target, ··· 4384 4399 struct ahc_softc *ahc; 4385 4400 int i; 4386 4401 4387 - #ifndef __FreeBSD__ 4388 4402 ahc = kmalloc(sizeof(*ahc), GFP_ATOMIC); 4389 4403 if (!ahc) { 4390 4404 printk("aic7xxx: cannot malloc softc!\n"); 4391 4405 kfree(name); 4392 4406 return NULL; 4393 4407 } 4394 - #else 4395 - ahc = device_get_softc((device_t)platform_arg); 4396 - #endif 4397 4408 memset(ahc, 0, sizeof(*ahc)); 4398 4409 ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC); 4399 4410 if (ahc->seep_config == NULL) { 4400 - #ifndef __FreeBSD__ 4401 4411 kfree(ahc); 4402 - #endif 4403 4412 kfree(name); 4404 4413 return (NULL); 4405 4414 } ··· 4519 4540 kfree(ahc->name); 4520 4541 if (ahc->seep_config != NULL) 4521 4542 kfree(ahc->seep_config); 4522 - #ifndef __FreeBSD__ 4523 4543 kfree(ahc); 4524 - #endif 4525 4544 return; 4526 4545 } 4527 4546
+9 -4
drivers/scsi/bnx2fc/bnx2fc.h
··· 66 66 #include "bnx2fc_constants.h" 67 67 68 68 #define BNX2FC_NAME "bnx2fc" 69 - #define BNX2FC_VERSION "2.12.10" 69 + #define BNX2FC_VERSION "2.12.13" 70 70 71 71 #define PFX "bnx2fc: " 72 72 ··· 482 482 struct bnx2fc_work { 483 483 struct list_head list; 484 484 struct bnx2fc_rport *tgt; 485 + struct fcoe_task_ctx_entry *task; 486 + unsigned char rq_data[BNX2FC_RQ_BUF_SZ]; 485 487 u16 wqe; 488 + u8 num_rq; 486 489 }; 487 490 struct bnx2fc_unsol_els { 488 491 struct fc_lport *lport; ··· 553 550 enum fc_rport_event event); 554 551 void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, 555 552 struct fcoe_task_ctx_entry *task, 556 - u8 num_rq); 553 + u8 num_rq, unsigned char *rq_data); 557 554 void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, 558 555 struct fcoe_task_ctx_entry *task, 559 556 u8 num_rq); ··· 562 559 u8 num_rq); 563 560 void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, 564 561 struct fcoe_task_ctx_entry *task, 565 - u8 num_rq); 562 + u8 num_rq, unsigned char *rq_data); 566 563 void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req, 567 564 struct fcoe_task_ctx_entry *task, 568 565 u8 num_rq); ··· 580 577 void *arg, u32 timeout); 581 578 void bnx2fc_arm_cq(struct bnx2fc_rport *tgt); 582 579 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt); 583 - void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe); 580 + void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe, 581 + unsigned char *rq_data, u8 num_rq, 582 + struct fcoe_task_ctx_entry *task); 584 583 struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, 585 584 u32 port_id); 586 585 void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
+6 -2
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
··· 660 660 661 661 list_for_each_entry_safe(work, tmp, &work_list, list) { 662 662 list_del_init(&work->list); 663 - bnx2fc_process_cq_compl(work->tgt, work->wqe); 663 + bnx2fc_process_cq_compl(work->tgt, work->wqe, 664 + work->rq_data, 665 + work->num_rq, 666 + work->task); 664 667 kfree(work); 665 668 } 666 669 ··· 2658 2655 /* Free all work in the list */ 2659 2656 list_for_each_entry_safe(work, tmp, &p->work_list, list) { 2660 2657 list_del_init(&work->list); 2661 - bnx2fc_process_cq_compl(work->tgt, work->wqe); 2658 + bnx2fc_process_cq_compl(work->tgt, work->wqe, work->rq_data, 2659 + work->num_rq, work->task); 2662 2660 kfree(work); 2663 2661 } 2664 2662
+75 -28
drivers/scsi/bnx2fc/bnx2fc_hwi.c
··· 863 863 } 864 864 } 865 865 866 - void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) 866 + void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe, 867 + unsigned char *rq_data, u8 num_rq, 868 + struct fcoe_task_ctx_entry *task) 867 869 { 868 - struct fcoe_task_ctx_entry *task; 869 - struct fcoe_task_ctx_entry *task_page; 870 870 struct fcoe_port *port = tgt->port; 871 871 struct bnx2fc_interface *interface = port->priv; 872 872 struct bnx2fc_hba *hba = interface->hba; 873 873 struct bnx2fc_cmd *io_req; 874 - int task_idx, index; 874 + 875 875 u16 xid; 876 876 u8 cmd_type; 877 877 u8 rx_state = 0; 878 - u8 num_rq; 879 878 880 879 spin_lock_bh(&tgt->tgt_lock); 880 + 881 881 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; 882 - if (xid >= hba->max_tasks) { 883 - printk(KERN_ERR PFX "ERROR:xid out of range\n"); 884 - spin_unlock_bh(&tgt->tgt_lock); 885 - return; 886 - } 887 - task_idx = xid / BNX2FC_TASKS_PER_PAGE; 888 - index = xid % BNX2FC_TASKS_PER_PAGE; 889 - task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; 890 - task = &(task_page[index]); 891 - 892 - num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & 893 - FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >> 894 - FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT); 895 - 896 882 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; 897 883 898 884 if (io_req == NULL) { ··· 898 912 switch (cmd_type) { 899 913 case BNX2FC_SCSI_CMD: 900 914 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { 901 - bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); 915 + bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq, 916 + rq_data); 902 917 spin_unlock_bh(&tgt->tgt_lock); 903 918 return; 904 919 } ··· 916 929 917 930 case BNX2FC_TASK_MGMT_CMD: 918 931 BNX2FC_IO_DBG(io_req, "Processing TM complete\n"); 919 - bnx2fc_process_tm_compl(io_req, task, num_rq); 932 + bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data); 920 933 break; 921 934 922 935 case BNX2FC_ABTS: ··· 974 987 975 988 } 976 989 977 - static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) 990 + static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe, 991 + unsigned char *rq_data, u8 num_rq, 992 + struct fcoe_task_ctx_entry *task) 978 993 { 979 994 struct bnx2fc_work *work; 980 995 work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC); ··· 986 997 INIT_LIST_HEAD(&work->list); 987 998 work->tgt = tgt; 988 999 work->wqe = wqe; 1000 + work->num_rq = num_rq; 1001 + work->task = task; 1002 + if (rq_data) 1003 + memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ); 1004 + 989 1005 return work; 990 1006 } 991 1007 992 1008 /* Pending work request completion */ 993 - static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe) 1009 + static bool bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe) 994 1010 { 995 1011 unsigned int cpu = wqe % num_possible_cpus(); 996 1012 struct bnx2fc_percpu_s *fps; 997 1013 struct bnx2fc_work *work; 1014 + struct fcoe_task_ctx_entry *task; 1015 + struct fcoe_task_ctx_entry *task_page; 1016 + struct fcoe_port *port = tgt->port; 1017 + struct bnx2fc_interface *interface = port->priv; 1018 + struct bnx2fc_hba *hba = interface->hba; 1019 + unsigned char *rq_data = NULL; 1020 + unsigned char rq_data_buff[BNX2FC_RQ_BUF_SZ]; 1021 + int task_idx, index; 1022 + unsigned char *dummy; 1023 + u16 xid; 1024 + u8 num_rq; 1025 + int i; 1026 + 1027 + xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; 1028 + if (xid >= hba->max_tasks) { 1029 + pr_err(PFX "ERROR:xid out of range\n"); 1030 + return false; 1031 + } 1032 + 1033 + task_idx = xid / BNX2FC_TASKS_PER_PAGE; 1034 + index = xid % BNX2FC_TASKS_PER_PAGE; 1035 + task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; 1036 + task = &task_page[index]; 1037 + 1038 + num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & 1039 + FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >> 1040 + FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT); 1041 + 1042 + memset(rq_data_buff, 0, BNX2FC_RQ_BUF_SZ); 1043 + 1044 + if (!num_rq) 1045 + goto num_rq_zero; 1046 + 1047 + rq_data = bnx2fc_get_next_rqe(tgt, 1); 1048 + 1049 + if (num_rq > 1) { 1050 + /* We do not need extra sense data */ 1051 + for (i = 1; i < num_rq; i++) 1052 + dummy = bnx2fc_get_next_rqe(tgt, 1); 1053 + } 1054 + 1055 + if (rq_data) 1056 + memcpy(rq_data_buff, rq_data, BNX2FC_RQ_BUF_SZ); 1057 + 1058 + /* return RQ entries */ 1059 + for (i = 0; i < num_rq; i++) 1060 + bnx2fc_return_rqe(tgt, 1); 1061 + 1062 + num_rq_zero: 998 1063 999 1064 fps = &per_cpu(bnx2fc_percpu, cpu); 1000 1065 spin_lock_bh(&fps->fp_work_lock); 1001 1066 if (fps->iothread) { 1002 - work = bnx2fc_alloc_work(tgt, wqe); 1067 + work = bnx2fc_alloc_work(tgt, wqe, rq_data_buff, 1068 + num_rq, task); 1003 1069 if (work) { 1004 1070 list_add_tail(&work->list, &fps->work_list); 1005 1071 wake_up_process(fps->iothread); 1006 1072 spin_unlock_bh(&fps->fp_work_lock); 1007 - return; 1073 + return true; 1008 1074 } 1009 1075 } 1010 1076 spin_unlock_bh(&fps->fp_work_lock); 1011 - bnx2fc_process_cq_compl(tgt, wqe); 1077 + bnx2fc_process_cq_compl(tgt, wqe, 1078 + rq_data_buff, num_rq, task); 1079 + 1080 + return true; 1012 1081 } 1013 1082 1014 1083 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) ··· 1103 1056 /* Unsolicited event notification */ 1104 1057 bnx2fc_process_unsol_compl(tgt, wqe); 1105 1058 } else { 1106 - bnx2fc_pending_work(tgt, wqe); 1107 - num_free_sqes++; 1059 + if (bnx2fc_pending_work(tgt, wqe)) 1060 + num_free_sqes++; 1108 1061 } 1109 1062 cqe++; 1110 1063 tgt->cq_cons_idx++;
+13 -21
drivers/scsi/bnx2fc/bnx2fc_io.c
··· 24 24 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); 25 25 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 26 26 struct fcoe_fcp_rsp_payload *fcp_rsp, 27 - u8 num_rq); 27 + u8 num_rq, unsigned char *rq_data); 28 28 29 29 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, 30 30 unsigned int timer_msec) ··· 1518 1518 } 1519 1519 1520 1520 void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, 1521 - struct fcoe_task_ctx_entry *task, u8 num_rq) 1521 + struct fcoe_task_ctx_entry *task, u8 num_rq, 1522 + unsigned char *rq_data) 1522 1523 { 1523 1524 struct bnx2fc_mp_req *tm_req; 1524 1525 struct fc_frame_header *fc_hdr; ··· 1558 1557 if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) { 1559 1558 bnx2fc_parse_fcp_rsp(io_req, 1560 1559 (struct fcoe_fcp_rsp_payload *) 1561 - rsp_buf, num_rq); 1560 + rsp_buf, num_rq, rq_data); 1562 1561 if (io_req->fcp_rsp_code == 0) { 1563 1562 /* TM successful */ 1564 1563 if (tm_req->tm_flags & FCP_TMF_LUN_RESET) ··· 1756 1755 1757 1756 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 1758 1757 struct fcoe_fcp_rsp_payload *fcp_rsp, 1759 - u8 num_rq) 1758 + u8 num_rq, unsigned char *rq_data) 1760 1759 { 1761 1760 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1762 - struct bnx2fc_rport *tgt = io_req->tgt; 1763 1761 u8 rsp_flags = fcp_rsp->fcp_flags.flags; 1764 1762 u32 rq_buff_len = 0; 1765 - int i; 1766 - unsigned char *rq_data; 1767 - unsigned char *dummy; 1768 1763 int fcp_sns_len = 0; 1769 1764 int fcp_rsp_len = 0; 1770 1765 ··· 1806 1809 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; 1807 1810 } 1808 1811 1809 - rq_data = bnx2fc_get_next_rqe(tgt, 1); 1810 - 1811 - if (num_rq > 1) { 1812 - /* We do not need extra sense data */ 1813 - for (i = 1; i < num_rq; i++) 1814 - dummy = bnx2fc_get_next_rqe(tgt, 1); 1815 - } 1816 - 1817 1812 /* fetch fcp_rsp_code */ 1818 1813 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { 1819 1814 /* Only for task management function */ ··· 1826 1837 if (fcp_sns_len) 1827 1838 memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); 1828 1839 1829 - /* return RQ entries */ 1830 - for (i = 0; i < num_rq; i++) 1831 - bnx2fc_return_rqe(tgt, 1); 1832 1840 } 1833 1841 } 1834 1842 ··· 1904 1918 1905 1919 void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, 1906 1920 struct fcoe_task_ctx_entry *task, 1907 - u8 num_rq) 1921 + u8 num_rq, unsigned char *rq_data) 1908 1922 { 1909 1923 struct fcoe_fcp_rsp_payload *fcp_rsp; 1910 1924 struct bnx2fc_rport *tgt = io_req->tgt; ··· 1917 1931 /* we will not receive ABTS response for this IO */ 1918 1932 BNX2FC_IO_DBG(io_req, "Timer context finished processing " 1919 1933 "this scsi cmd\n"); 1934 + if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, 1935 + &io_req->req_flags)) { 1936 + BNX2FC_IO_DBG(io_req, 1937 + "Actual completion after cleanup request cleaning up\n"); 1938 + bnx2fc_process_cleanup_compl(io_req, task, num_rq); 1939 + } 1920 1940 return; 1921 1941 } 1922 1942 ··· 1942 1950 &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload); 1943 1951 1944 1952 /* parse fcp_rsp and obtain sense data from RQ if available */ 1945 - bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); 1953 + bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data); 1946 1954 1947 1955 if (!sc_cmd->SCp.ptr) { 1948 1956 printk(KERN_ERR PFX "SCp.ptr is NULL\n");
+1 -1
drivers/scsi/constants.c
··· 404 404 "DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR", 405 405 "DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE", 406 406 "DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE", 407 - "DID_NEXUS_FAILURE" }; 407 + "DID_NEXUS_FAILURE", "DID_ALLOC_FAILURE", "DID_MEDIUM_ERROR" }; 408 408 409 409 static const char * const driverbyte_table[]={ 410 410 "DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
+9 -1
drivers/scsi/libfc/fc_rport.c
··· 632 632 fc_rport_enter_ready(rdata); 633 633 break; 634 634 case RPORT_ST_PRLI: 635 + fc_rport_enter_plogi(rdata); 636 + break; 635 637 case RPORT_ST_ADISC: 636 638 fc_rport_enter_logo(rdata); 637 639 break; ··· 1210 1208 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1211 1209 if (!rjt) 1212 1210 FC_RPORT_DBG(rdata, "PRLI bad response\n"); 1213 - else 1211 + else { 1214 1212 FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n", 1215 1213 rjt->er_reason, rjt->er_explan); 1214 + if (rjt->er_reason == ELS_RJT_UNAB && 1215 + rjt->er_explan == ELS_EXPL_PLOGI_REQD) { 1216 + fc_rport_enter_plogi(rdata); 1217 + goto out; 1218 + } 1219 + } 1216 1220 fc_rport_error_retry(rdata, FC_EX_ELS_RJT); 1217 1221 } 1218 1222
+12 -13
drivers/scsi/lpfc/lpfc.h
··· 207 207 } rev; 208 208 struct { 209 209 #ifdef __BIG_ENDIAN_BITFIELD 210 - uint32_t rsvd3 :19; /* Reserved */ 211 - uint32_t cdss : 1; /* Configure Data Security SLI */ 210 + uint32_t rsvd3 :20; /* Reserved */ 212 211 uint32_t rsvd2 : 3; /* Reserved */ 213 212 uint32_t cbg : 1; /* Configure BlockGuard */ 214 213 uint32_t cmv : 1; /* Configure Max VPIs */ ··· 229 230 uint32_t cmv : 1; /* Configure Max VPIs */ 230 231 uint32_t cbg : 1; /* Configure BlockGuard */ 231 232 uint32_t rsvd2 : 3; /* Reserved */ 232 - uint32_t cdss : 1; /* Configure Data Security SLI */ 233 - uint32_t rsvd3 :19; /* Reserved */ 233 + uint32_t rsvd3 :20; /* Reserved */ 234 234 #endif 235 235 } sli3Feat; 236 236 } lpfc_vpd_t; ··· 478 480 struct dentry *debug_nodelist; 479 481 struct dentry *debug_nvmestat; 480 482 struct dentry *debug_scsistat; 481 - struct dentry *debug_nvmektime; 482 - struct dentry *debug_cpucheck; 483 + struct dentry *debug_ioktime; 484 + struct dentry *debug_hdwqstat; 483 485 struct dentry *vport_debugfs_root; 484 486 struct lpfc_debugfs_trc *disc_trc; 485 487 atomic_t disc_trc_cnt; ··· 885 887 #define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ 886 888 #define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ 887 889 #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ 888 - uint32_t cfg_enable_dss; 889 890 uint32_t cfg_fdmi_on; 890 891 #define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */ 891 892 #define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ ··· 1153 1156 uint32_t iocb_cnt; 1154 1157 uint32_t iocb_max; 1155 1158 atomic_t sdev_cnt; 1156 - uint8_t fips_spec_rev; 1157 - uint8_t fips_level; 1158 1159 spinlock_t devicelock; /* lock for luns list */ 1159 1160 mempool_t *device_data_mem_pool; 1160 1161 struct list_head luns; ··· 1170 1175 uint16_t sfp_warning; 1171 1176 1172 1177 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1173 - uint16_t cpucheck_on; 1178 + uint16_t hdwqstat_on; 1174 1179 #define LPFC_CHECK_OFF 0 1175 1180 #define LPFC_CHECK_NVME_IO 1 1176 - #define LPFC_CHECK_NVMET_RCV 2 1177 - #define LPFC_CHECK_NVMET_IO 4 1178 - #define LPFC_CHECK_SCSI_IO 8 1181 + #define LPFC_CHECK_NVMET_IO 2 1182 + #define LPFC_CHECK_SCSI_IO 4 1179 1183 uint16_t ktime_on; 1180 1184 uint64_t ktime_data_samples; 1181 1185 uint64_t ktime_status_samples; ··· 1219 1225 #define LPFC_POLL_SLOWPATH 1 /* called from slowpath */ 1220 1226 1221 1227 char os_host_name[MAXHOSTNAMELEN]; 1228 + 1229 + /* SCSI host template information - for physical port */ 1230 + struct scsi_host_template port_template; 1231 + /* SCSI host template information - for all vports */ 1232 + struct scsi_host_template vport_template; 1222 1233 }; 1223 1234 1224 1235 static inline struct Scsi_Host *
+2 -71
drivers/scsi/lpfc/lpfc_attr.c
··· 2231 2231 } 2232 2232 2233 2233 /** 2234 - * lpfc_fips_level_show - Return the current FIPS level for the HBA 2235 - * @dev: class unused variable. 2236 - * @attr: device attribute, not used. 2237 - * @buf: on return contains the module description text. 2238 - * 2239 - * Returns: size of formatted string. 2240 - **/ 2241 - static ssize_t 2242 - lpfc_fips_level_show(struct device *dev, struct device_attribute *attr, 2243 - char *buf) 2244 - { 2245 - struct Scsi_Host *shost = class_to_shost(dev); 2246 - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2247 - struct lpfc_hba *phba = vport->phba; 2248 - 2249 - return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level); 2250 - } 2251 - 2252 - /** 2253 - * lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA 2254 - * @dev: class unused variable. 2255 - * @attr: device attribute, not used. 2256 - * @buf: on return contains the module description text. 2257 - * 2258 - * Returns: size of formatted string. 2259 - **/ 2260 - static ssize_t 2261 - lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr, 2262 - char *buf) 2263 - { 2264 - struct Scsi_Host *shost = class_to_shost(dev); 2265 - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2266 - struct lpfc_hba *phba = vport->phba; 2267 - 2268 - return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev); 2269 - } 2270 - 2271 - /** 2272 - * lpfc_dss_show - Return the current state of dss and the configured state 2273 - * @dev: class converted to a Scsi_host structure. 2274 - * @attr: device attribute, not used. 2275 - * @buf: on return contains the formatted text. 2276 - * 2277 - * Returns: size of formatted string. 2278 - **/ 2279 - static ssize_t 2280 - lpfc_dss_show(struct device *dev, struct device_attribute *attr, 2281 - char *buf) 2282 - { 2283 - struct Scsi_Host *shost = class_to_shost(dev); 2284 - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2285 - struct lpfc_hba *phba = vport->phba; 2286 - 2287 - return scnprintf(buf, PAGE_SIZE, "%s - %sOperational\n", 2288 - (phba->cfg_enable_dss) ? "Enabled" : "Disabled", 2289 - (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ? 2290 - "" : "Not "); 2291 - } 2292 - 2293 - /** 2294 2234 * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions 2295 2235 * @dev: class converted to a Scsi_host structure. 2296 2236 * @attr: device attribute, not used. ··· 2645 2705 static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL); 2646 2706 static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); 2647 2707 static DEVICE_ATTR_RO(lpfc_temp_sensor); 2648 - static DEVICE_ATTR_RO(lpfc_fips_level); 2649 - static DEVICE_ATTR_RO(lpfc_fips_rev); 2650 - static DEVICE_ATTR_RO(lpfc_dss); 2651 2708 static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn); 2652 2709 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); 2653 2710 static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, ··· 3805 3868 3806 3869 /* 3807 3870 # lun_queue_depth: This parameter is used to limit the number of outstanding 3808 - # commands per FCP LUN. Value range is [1,512]. Default value is 30. 3871 + # commands per FCP LUN. 3809 3872 */ 3810 - LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512, 3873 + LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512, 3811 3874 "Max number of FCP commands we can queue to a specific LUN"); 3812 3875 3813 3876 /* ··· 6188 6251 &dev_attr_pt, 6189 6252 &dev_attr_txq_hw, 6190 6253 &dev_attr_txcmplq_hw, 6191 - &dev_attr_lpfc_fips_level, 6192 - &dev_attr_lpfc_fips_rev, 6193 - &dev_attr_lpfc_dss, 6194 6254 &dev_attr_lpfc_sriov_hw_max_virtfn, 6195 6255 &dev_attr_protocol, 6196 6256 &dev_attr_lpfc_xlane_supported, ··· 6223 6289 &dev_attr_lpfc_max_scsicmpl_time, 6224 6290 &dev_attr_lpfc_stat_data_ctrl, 6225 6291 &dev_attr_lpfc_static_vport, 6226 - &dev_attr_lpfc_fips_level, 6227 - &dev_attr_lpfc_fips_rev, 6228 6292 NULL, 6229 6293 }; 6230 6294 ··· 7331 7399 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); 7332 7400 lpfc_delay_discovery_init(phba, lpfc_delay_discovery); 7333 7401 lpfc_sli_mode_init(phba, lpfc_sli_mode); 7334 - phba->cfg_enable_dss = 1; 7335 7402 lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags); 7336 7403 lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize); 7337 7404 lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
+1 -2
drivers/scsi/lpfc/lpfc_crtn.h
··· 404 404 extern struct device_attribute *lpfc_hba_attrs[]; 405 405 extern struct device_attribute *lpfc_vport_attrs[]; 406 406 extern struct scsi_host_template lpfc_template; 407 - extern struct scsi_host_template lpfc_template_no_hr; 408 407 extern struct scsi_host_template lpfc_template_nvme; 409 - extern struct scsi_host_template lpfc_vport_template; 410 408 extern struct fc_function_template lpfc_transport_functions; 411 409 extern struct fc_function_template lpfc_vport_transport_functions; 412 410 ··· 588 590 int); 589 591 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd, 590 592 struct lpfc_sli4_hdw_queue *qp); 593 + void lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd); 591 594 void lpfc_nvme_cmd_template(void); 592 595 void lpfc_nvmet_cmd_template(void); 593 596 void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
+229 -108
drivers/scsi/lpfc/lpfc_debugfs.c
··· 1300 1300 return len; 1301 1301 } 1302 1302 1303 + void 1304 + lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 1305 + { 1306 + uint64_t seg1, seg2, seg3, seg4; 1307 + uint64_t segsum; 1308 + 1309 + if (!lpfc_cmd->ts_last_cmd || 1310 + !lpfc_cmd->ts_cmd_start || 1311 + !lpfc_cmd->ts_cmd_wqput || 1312 + !lpfc_cmd->ts_isr_cmpl || 1313 + !lpfc_cmd->ts_data_io) 1314 + return; 1315 + 1316 + if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_cmd_start) 1317 + return; 1318 + if (lpfc_cmd->ts_cmd_start < lpfc_cmd->ts_last_cmd) 1319 + return; 1320 + if (lpfc_cmd->ts_cmd_wqput < lpfc_cmd->ts_cmd_start) 1321 + return; 1322 + if (lpfc_cmd->ts_isr_cmpl < lpfc_cmd->ts_cmd_wqput) 1323 + return; 1324 + if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_isr_cmpl) 1325 + return; 1326 + /* 1327 + * Segment 1 - Time from Last FCP command cmpl is handed 1328 + * off to NVME Layer to start of next command. 1329 + * Segment 2 - Time from Driver receives a IO cmd start 1330 + * from NVME Layer to WQ put is done on IO cmd. 1331 + * Segment 3 - Time from Driver WQ put is done on IO cmd 1332 + * to MSI-X ISR for IO cmpl. 1333 + * Segment 4 - Time from MSI-X ISR for IO cmpl to when 1334 + * cmpl is handled off to the NVME Layer. 1335 + */ 1336 + seg1 = lpfc_cmd->ts_cmd_start - lpfc_cmd->ts_last_cmd; 1337 + if (seg1 > 5000000) /* 5 ms - for sequential IOs only */ 1338 + seg1 = 0; 1339 + 1340 + /* Calculate times relative to start of IO */ 1341 + seg2 = (lpfc_cmd->ts_cmd_wqput - lpfc_cmd->ts_cmd_start); 1342 + segsum = seg2; 1343 + seg3 = lpfc_cmd->ts_isr_cmpl - lpfc_cmd->ts_cmd_start; 1344 + if (segsum > seg3) 1345 + return; 1346 + seg3 -= segsum; 1347 + segsum += seg3; 1348 + 1349 + seg4 = lpfc_cmd->ts_data_io - lpfc_cmd->ts_cmd_start; 1350 + if (segsum > seg4) 1351 + return; 1352 + seg4 -= segsum; 1353 + 1354 + phba->ktime_data_samples++; 1355 + phba->ktime_seg1_total += seg1; 1356 + if (seg1 < phba->ktime_seg1_min) 1357 + phba->ktime_seg1_min = seg1; 1358 + else if (seg1 > phba->ktime_seg1_max) 1359 + phba->ktime_seg1_max = seg1; 1360 + phba->ktime_seg2_total += seg2; 1361 + if (seg2 < phba->ktime_seg2_min) 1362 + phba->ktime_seg2_min = seg2; 1363 + else if (seg2 > phba->ktime_seg2_max) 1364 + phba->ktime_seg2_max = seg2; 1365 + phba->ktime_seg3_total += seg3; 1366 + if (seg3 < phba->ktime_seg3_min) 1367 + phba->ktime_seg3_min = seg3; 1368 + else if (seg3 > phba->ktime_seg3_max) 1369 + phba->ktime_seg3_max = seg3; 1370 + phba->ktime_seg4_total += seg4; 1371 + if (seg4 < phba->ktime_seg4_min) 1372 + phba->ktime_seg4_min = seg4; 1373 + else if (seg4 > phba->ktime_seg4_max) 1374 + phba->ktime_seg4_max = seg4; 1375 + 1376 + lpfc_cmd->ts_last_cmd = 0; 1377 + lpfc_cmd->ts_cmd_start = 0; 1378 + lpfc_cmd->ts_cmd_wqput = 0; 1379 + lpfc_cmd->ts_isr_cmpl = 0; 1380 + lpfc_cmd->ts_data_io = 0; 1381 + } 1382 + 1303 1383 /** 1304 - * lpfc_debugfs_nvmektime_data - Dump target node list to a buffer 1384 + * lpfc_debugfs_ioktime_data - Dump target node list to a buffer 1305 1385 * @vport: The vport to gather target node info from. 1306 1386 * @buf: The buffer to dump log into. 1307 1387 * @size: The maximum amount of data to process. ··· 1394 1314 * not exceed @size. 1395 1315 **/ 1396 1316 static int 1397 - lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) 1317 + lpfc_debugfs_ioktime_data(struct lpfc_vport *vport, char *buf, int size) 1398 1318 { 1399 1319 struct lpfc_hba *phba = vport->phba; 1400 1320 int len = 0; 1401 1321 1402 1322 if (phba->nvmet_support == 0) { 1403 - /* NVME Initiator */ 1323 + /* Initiator */ 1404 1324 len += scnprintf(buf + len, PAGE_SIZE - len, 1405 1325 "ktime %s: Total Samples: %lld\n", 1406 1326 (phba->ktime_on ? "Enabled" : "Disabled"), ··· 1410 1330 1411 1331 len += scnprintf( 1412 1332 buf + len, PAGE_SIZE - len, 1413 - "Segment 1: Last NVME Cmd cmpl " 1414 - "done -to- Start of next NVME cnd (in driver)\n"); 1333 + "Segment 1: Last Cmd cmpl " 1334 + "done -to- Start of next Cmd (in driver)\n"); 1415 1335 len += scnprintf( 1416 1336 buf + len, PAGE_SIZE - len, 1417 1337 "avg:%08lld min:%08lld max %08lld\n", ··· 1421 1341 phba->ktime_seg1_max); 1422 1342 len += scnprintf( 1423 1343 buf + len, PAGE_SIZE - len, 1424 - "Segment 2: Driver start of NVME cmd " 1344 + "Segment 2: Driver start of Cmd " 1425 1345 "-to- Firmware WQ doorbell\n"); 1426 1346 len += scnprintf( 1427 1347 buf + len, PAGE_SIZE - len, ··· 1444 1364 len += scnprintf( 1445 1365 buf + len, PAGE_SIZE - len, 1446 1366 "Segment 4: MSI-X ISR cmpl -to- " 1447 - "NVME cmpl done\n"); 1367 + "Cmd cmpl done\n"); 1448 1368 len += scnprintf( 1449 1369 buf + len, PAGE_SIZE - len, 1450 1370 "avg:%08lld min:%08lld max %08lld\n", ··· 1683 1603 } 1684 1604 1685 1605 /** 1686 - * lpfc_debugfs_cpucheck_data - Dump target node list to a buffer 1606 + * lpfc_debugfs_hdwqstat_data - Dump I/O stats to a buffer 1687 1607 * @vport: The vport to gather target node info from. 1688 1608 * @buf: The buffer to dump log into. 1689 1609 * @size: The maximum amount of data to process. 1690 1610 * 1691 1611 * Description: 1692 - * This routine dumps the NVME statistics associated with @vport 1612 + * This routine dumps the NVME + SCSI statistics associated with @vport 1693 1613 * 1694 1614 * Return Value: 1695 1615 * This routine returns the amount of bytes that were dumped into @buf and will 1696 1616 * not exceed @size. 1697 1617 **/ 1698 1618 static int 1699 - lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size) 1619 + lpfc_debugfs_hdwqstat_data(struct lpfc_vport *vport, char *buf, int size) 1700 1620 { 1701 1621 struct lpfc_hba *phba = vport->phba; 1702 1622 struct lpfc_sli4_hdw_queue *qp; 1703 - int i, j, max_cnt; 1704 - int len = 0; 1623 + struct lpfc_hdwq_stat *c_stat; 1624 + int i, j, len; 1705 1625 uint32_t tot_xmt; 1706 1626 uint32_t tot_rcv; 1707 1627 uint32_t tot_cmpl; 1628 + char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0}; 1708 1629 1709 - len += scnprintf(buf + len, PAGE_SIZE - len, 1710 - "CPUcheck %s ", 1711 - (phba->cpucheck_on & LPFC_CHECK_NVME_IO ? 1712 - "Enabled" : "Disabled")); 1713 - if (phba->nvmet_support) { 1714 - len += scnprintf(buf + len, PAGE_SIZE - len, 1715 - "%s\n", 1716 - (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ? 1717 - "Rcv Enabled\n" : "Rcv Disabled\n")); 1718 - } else { 1719 - len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); 1720 - } 1721 - max_cnt = size - LPFC_DEBUG_OUT_LINE_SZ; 1630 + scnprintf(tmp, sizeof(tmp), "HDWQ Stats:\n\n"); 1631 + if (strlcat(buf, tmp, size) >= size) 1632 + goto buffer_done; 1633 + 1634 + scnprintf(tmp, sizeof(tmp), "(NVME Accounting: %s) ", 1635 + (phba->hdwqstat_on & 1636 + (LPFC_CHECK_NVME_IO | LPFC_CHECK_NVMET_IO) ? 1637 + "Enabled" : "Disabled")); 1638 + if (strlcat(buf, tmp, size) >= size) 1639 + goto buffer_done; 1640 + 1641 + scnprintf(tmp, sizeof(tmp), "(SCSI Accounting: %s) ", 1642 + (phba->hdwqstat_on & LPFC_CHECK_SCSI_IO ? 1643 + "Enabled" : "Disabled")); 1644 + if (strlcat(buf, tmp, size) >= size) 1645 + goto buffer_done; 1646 + 1647 + scnprintf(tmp, sizeof(tmp), "\n\n"); 1648 + if (strlcat(buf, tmp, size) >= size) 1649 + goto buffer_done; 1722 1650 1723 1651 for (i = 0; i < phba->cfg_hdw_queue; i++) { 1724 1652 qp = &phba->sli4_hba.hdwq[i]; ··· 1734 1646 tot_rcv = 0; 1735 1647 tot_xmt = 0; 1736 1648 tot_cmpl = 0; 1737 - for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) { 1738 - tot_xmt += qp->cpucheck_xmt_io[j]; 1739 - tot_cmpl += qp->cpucheck_cmpl_io[j]; 1649 + 1650 + for_each_present_cpu(j) { 1651 + c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, j); 1652 + 1653 + /* Only display for this HDWQ */ 1654 + if (i != c_stat->hdwq_no) 1655 + continue; 1656 + 1657 + /* Only display non-zero counters */ 1658 + if (!c_stat->xmt_io && !c_stat->cmpl_io && 1659 + !c_stat->rcv_io) 1660 + continue; 1661 + 1662 + if (!tot_xmt && !tot_cmpl && !tot_rcv) { 1663 + /* Print HDWQ string only the first time */ 1664 + scnprintf(tmp, sizeof(tmp), "[HDWQ %d]:\t", i); 1665 + if (strlcat(buf, tmp, size) >= size) 1666 + goto buffer_done; 1667 + } 1668 + 1669 + tot_xmt += c_stat->xmt_io; 1670 + tot_cmpl += c_stat->cmpl_io; 1740 1671 if (phba->nvmet_support) 1741 - tot_rcv += qp->cpucheck_rcv_io[j]; 1672 + tot_rcv += c_stat->rcv_io; 1673 + 1674 + scnprintf(tmp, sizeof(tmp), "| [CPU %d]: ", j); 1675 + if (strlcat(buf, tmp, size) >= size) 1676 + goto buffer_done; 1677 + 1678 + if (phba->nvmet_support) { 1679 + scnprintf(tmp, sizeof(tmp), 1680 + "XMT 0x%x CMPL 0x%x RCV 0x%x |", 1681 + c_stat->xmt_io, c_stat->cmpl_io, 1682 + c_stat->rcv_io); 1683 + if (strlcat(buf, tmp, size) >= size) 1684 + goto buffer_done; 1685 + } else { 1686 + scnprintf(tmp, sizeof(tmp), 1687 + "XMT 0x%x CMPL 0x%x |", 1688 + c_stat->xmt_io, c_stat->cmpl_io); 1689 + if (strlcat(buf, tmp, size) >= size) 1690 + goto buffer_done; 1691 + } 1742 1692 } 1743 1693 1744 - /* Only display Hardware Qs with something */ 1694 + /* Check if nothing to display */ 1745 1695 if (!tot_xmt && !tot_cmpl && !tot_rcv) 1746 1696 continue; 1747 1697 1748 - len += scnprintf(buf + len, PAGE_SIZE - len, 1749 - "HDWQ %03d: ", i); 1750 - for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) { 1751 - /* Only display non-zero counters */ 1752 - if (!qp->cpucheck_xmt_io[j] && 1753 - !qp->cpucheck_cmpl_io[j] && 1754 - !qp->cpucheck_rcv_io[j]) 1755 - continue; 1756 - if (phba->nvmet_support) { 1757 - len += scnprintf(buf + len, PAGE_SIZE - len, 1758 - "CPU %03d: %x/%x/%x ", j, 1759 - qp->cpucheck_rcv_io[j], 1760 - qp->cpucheck_xmt_io[j], 1761 - qp->cpucheck_cmpl_io[j]); 1762 - } else { 1763 - len += scnprintf(buf + len, PAGE_SIZE - len, 1764 - "CPU %03d: %x/%x ", j, 1765 - qp->cpucheck_xmt_io[j], 1766 - qp->cpucheck_cmpl_io[j]); 1767 - } 1768 - } 1769 - len += scnprintf(buf + len, PAGE_SIZE - len, 1770 - "Total: %x\n", tot_xmt); 1771 - if (len >= max_cnt) { 1772 - len += scnprintf(buf + len, PAGE_SIZE - len, 1773 - "Truncated ...\n"); 1774 - return len; 1698 + scnprintf(tmp, sizeof(tmp), "\t->\t[HDWQ Total: "); 1699 + if (strlcat(buf, tmp, size) >= size) 1700 + goto buffer_done; 1701 + 1702 + if (phba->nvmet_support) { 1703 + scnprintf(tmp, sizeof(tmp), 1704 + "XMT 0x%x CMPL 0x%x RCV 0x%x]\n\n", 1705 + tot_xmt, tot_cmpl, tot_rcv); 1706 + if (strlcat(buf, tmp, size) >= size) 1707 + goto buffer_done; 1708 + } else { 1709 + scnprintf(tmp, sizeof(tmp), 1710 + "XMT 0x%x CMPL 0x%x]\n\n", 1711 + tot_xmt, tot_cmpl); 1712 + if (strlcat(buf, tmp, size) >= size) 1713 + goto buffer_done; 1775 1714 } 1776 1715 } 1716 + 1717 + buffer_done: 1718 + len = strnlen(buf, size); 1777 1719 return len; 1778 1720 } 1779 1721 ··· 2807 2689 } 2808 2690 2809 2691 static int 2810 - lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file) 2692 + lpfc_debugfs_ioktime_open(struct inode *inode, struct file *file) 2811 2693 { 2812 2694 struct lpfc_vport *vport = inode->i_private; 2813 2695 struct lpfc_debug *debug; ··· 2818 2700 goto out; 2819 2701 2820 2702 /* Round to page boundary */ 2821 - debug->buffer = kmalloc(LPFC_NVMEKTIME_SIZE, GFP_KERNEL); 2703 + debug->buffer = kmalloc(LPFC_IOKTIME_SIZE, GFP_KERNEL); 2822 2704 if (!debug->buffer) { 2823 2705 kfree(debug); 2824 2706 goto out; 2825 2707 } 2826 2708 2827 - debug->len = lpfc_debugfs_nvmektime_data(vport, debug->buffer, 2828 - LPFC_NVMEKTIME_SIZE); 2709 + debug->len = lpfc_debugfs_ioktime_data(vport, debug->buffer, 2710 + LPFC_IOKTIME_SIZE); 2829 2711 2830 2712 debug->i_private = inode->i_private; 2831 2713 file->private_data = debug; ··· 2836 2718 } 2837 2719 2838 2720 static ssize_t 2839 - lpfc_debugfs_nvmektime_write(struct file *file, const char __user *buf, 2840 - size_t nbytes, loff_t *ppos) 2721 + lpfc_debugfs_ioktime_write(struct file *file, const char __user *buf, 2722 + size_t nbytes, loff_t *ppos) 2841 2723 { 2842 2724 struct lpfc_debug *debug = file->private_data; 2843 2725 struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; ··· 3039 2921 } 3040 2922 3041 2923 static int 3042 - lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file) 2924 + lpfc_debugfs_hdwqstat_open(struct inode *inode, struct file *file) 3043 2925 { 3044 2926 struct lpfc_vport *vport = inode->i_private; 3045 2927 struct lpfc_debug *debug; ··· 3050 2932 goto out; 3051 2933 3052 2934 /* Round to page boundary */ 3053 - debug->buffer = kmalloc(LPFC_CPUCHECK_SIZE, GFP_KERNEL); 2935 + debug->buffer = kcalloc(1, LPFC_SCSISTAT_SIZE, GFP_KERNEL); 3054 2936 if (!debug->buffer) { 3055 2937 kfree(debug); 3056 2938 goto out; 3057 2939 } 3058 2940 3059 - debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer, 3060 - LPFC_CPUCHECK_SIZE); 2941 + debug->len = lpfc_debugfs_hdwqstat_data(vport, debug->buffer, 2942 + LPFC_SCSISTAT_SIZE); 3061 2943 3062 2944 debug->i_private = inode->i_private; 3063 2945 file->private_data = debug; ··· 3068 2950 } 3069 2951 3070 2952 static ssize_t 3071 - lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf, 2953 + lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf, 3072 2954 size_t nbytes, loff_t *ppos) 3073 2955 { 3074 2956 struct lpfc_debug *debug = file->private_data; 3075 2957 struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; 3076 2958 struct lpfc_hba *phba = vport->phba; 3077 - struct lpfc_sli4_hdw_queue *qp; 2959 + struct lpfc_hdwq_stat *c_stat; 3078 2960 char mybuf[64]; 3079 2961 char *pbuf; 3080 - int i, j; 2962 + int i; 3081 2963 3082 2964 if (nbytes > 64) 3083 2965 nbytes = 64; ··· 3090 2972 3091 2973 if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { 3092 2974 if (phba->nvmet_support) 3093 - phba->cpucheck_on |= LPFC_CHECK_NVMET_IO; 2975 + phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO; 3094 2976 else 3095 - phba->cpucheck_on |= (LPFC_CHECK_NVME_IO | 2977 + phba->hdwqstat_on |= (LPFC_CHECK_NVME_IO | 3096 2978 LPFC_CHECK_SCSI_IO); 3097 2979 return strlen(pbuf); 3098 2980 } else if ((strncmp(pbuf, "nvme_on", sizeof("nvme_on") - 1) == 0)) { 3099 2981 if (phba->nvmet_support) 3100 - phba->cpucheck_on |= LPFC_CHECK_NVMET_IO; 2982 + phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO; 3101 2983 else 3102 - phba->cpucheck_on |= LPFC_CHECK_NVME_IO; 2984 + phba->hdwqstat_on |= LPFC_CHECK_NVME_IO; 3103 2985 return strlen(pbuf); 3104 2986 } else if ((strncmp(pbuf, "scsi_on", sizeof("scsi_on") - 1) == 0)) { 3105 - phba->cpucheck_on |= LPFC_CHECK_SCSI_IO; 2987 + if (!phba->nvmet_support) 2988 + phba->hdwqstat_on |= LPFC_CHECK_SCSI_IO; 3106 2989 return strlen(pbuf); 3107 - } else if ((strncmp(pbuf, "rcv", 3108 - sizeof("rcv") - 1) == 0)) { 3109 - if (phba->nvmet_support) 3110 - phba->cpucheck_on |= LPFC_CHECK_NVMET_RCV; 3111 - else 3112 - return -EINVAL; 2990 + } else if ((strncmp(pbuf, "nvme_off", sizeof("nvme_off") - 1) == 0)) { 2991 + phba->hdwqstat_on &= ~(LPFC_CHECK_NVME_IO | 2992 + LPFC_CHECK_NVMET_IO); 2993 + return strlen(pbuf); 2994 + } else if ((strncmp(pbuf, "scsi_off", sizeof("scsi_off") - 1) == 0)) { 2995 + phba->hdwqstat_on &= ~LPFC_CHECK_SCSI_IO; 3113 2996 return strlen(pbuf); 3114 2997 } else if ((strncmp(pbuf, "off", 3115 2998 sizeof("off") - 1) == 0)) { 3116 - phba->cpucheck_on = LPFC_CHECK_OFF; 2999 + phba->hdwqstat_on = LPFC_CHECK_OFF; 3117 3000 return strlen(pbuf); 3118 3001 } else if ((strncmp(pbuf, "zero", 3119 3002 sizeof("zero") - 1) == 0)) { 3120 - for (i = 0; i < phba->cfg_hdw_queue; i++) { 3121 - qp = &phba->sli4_hba.hdwq[i]; 3122 - 3123 - for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) { 3124 - qp->cpucheck_rcv_io[j] = 0; 3125 - qp->cpucheck_xmt_io[j] = 0; 3126 - qp->cpucheck_cmpl_io[j] = 0; 3127 - } 3003 + for_each_present_cpu(i) { 3004 + c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, i); 3005 + c_stat->xmt_io = 0; 3006 + c_stat->cmpl_io = 0; 3007 + c_stat->rcv_io = 0; 3128 3008 } 3129 3009 return strlen(pbuf); 3130 3010 } ··· 5547 5431 .release = lpfc_debugfs_release, 5548 5432 }; 5549 5433 5550 - #undef lpfc_debugfs_op_nvmektime 5551 - static const struct file_operations lpfc_debugfs_op_nvmektime = { 5434 + #undef lpfc_debugfs_op_ioktime 5435 + static const struct file_operations lpfc_debugfs_op_ioktime = { 5552 5436 .owner = THIS_MODULE, 5553 - .open = lpfc_debugfs_nvmektime_open, 5437 + .open = lpfc_debugfs_ioktime_open, 5554 5438 .llseek = lpfc_debugfs_lseek, 5555 5439 .read = lpfc_debugfs_read, 5556 - .write = lpfc_debugfs_nvmektime_write, 5440 + .write = lpfc_debugfs_ioktime_write, 5557 5441 .release = lpfc_debugfs_release, 5558 5442 }; 5559 5443 ··· 5567 5451 .release = lpfc_debugfs_release, 5568 5452 }; 5569 5453 5570 - #undef lpfc_debugfs_op_cpucheck 5571 - static const struct file_operations lpfc_debugfs_op_cpucheck = { 5454 + #undef lpfc_debugfs_op_hdwqstat 5455 + static const struct file_operations lpfc_debugfs_op_hdwqstat = { 5572 5456 .owner = THIS_MODULE, 5573 - .open = lpfc_debugfs_cpucheck_open, 5457 + .open = lpfc_debugfs_hdwqstat_open, 5574 5458 .llseek = lpfc_debugfs_lseek, 5575 5459 .read = lpfc_debugfs_read, 5576 - .write = lpfc_debugfs_cpucheck_write, 5460 + .write = lpfc_debugfs_hdwqstat_write, 5577 5461 .release = lpfc_debugfs_release, 5578 5462 }; 5579 5463 ··· 6191 6075 goto debug_failed; 6192 6076 } 6193 6077 6194 - snprintf(name, sizeof(name), "nvmektime"); 6195 - vport->debug_nvmektime = 6078 + snprintf(name, sizeof(name), "ioktime"); 6079 + vport->debug_ioktime = 6196 6080 debugfs_create_file(name, 0644, 6197 6081 vport->vport_debugfs_root, 6198 - vport, &lpfc_debugfs_op_nvmektime); 6082 + vport, &lpfc_debugfs_op_ioktime); 6083 + if (!vport->debug_ioktime) { 6084 + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 6085 + "0815 Cannot create debugfs ioktime\n"); 6086 + goto debug_failed; 6087 + } 6199 6088 6200 - snprintf(name, sizeof(name), "cpucheck"); 6201 - vport->debug_cpucheck = 6089 + snprintf(name, sizeof(name), "hdwqstat"); 6090 + vport->debug_hdwqstat = 6202 6091 debugfs_create_file(name, 0644, 6203 6092 vport->vport_debugfs_root, 6204 - vport, &lpfc_debugfs_op_cpucheck); 6093 + vport, &lpfc_debugfs_op_hdwqstat); 6205 6094 6206 6095 /* 6207 6096 * The following section is for additional directories/files for the ··· 6337 6216 debugfs_remove(vport->debug_scsistat); /* scsistat */ 6338 6217 vport->debug_scsistat = NULL; 6339 6218 6340 - debugfs_remove(vport->debug_nvmektime); /* nvmektime */ 6341 - vport->debug_nvmektime = NULL; 6219 + debugfs_remove(vport->debug_ioktime); /* ioktime */ 6220 + vport->debug_ioktime = NULL; 6342 6221 6343 - debugfs_remove(vport->debug_cpucheck); /* cpucheck */ 6344 - vport->debug_cpucheck = NULL; 6222 + debugfs_remove(vport->debug_hdwqstat); /* hdwqstat */ 6223 + vport->debug_hdwqstat = NULL; 6345 6224 6346 6225 if (vport->vport_debugfs_root) { 6347 6226 debugfs_remove(vport->vport_debugfs_root); /* vportX */
+1 -2
drivers/scsi/lpfc/lpfc_debugfs.h
··· 46 46 47 47 /* nvmestat output buffer size */ 48 48 #define LPFC_NVMESTAT_SIZE 8192 49 - #define LPFC_NVMEKTIME_SIZE 8192 50 - #define LPFC_CPUCHECK_SIZE 8192 49 + #define LPFC_IOKTIME_SIZE 8192 51 50 #define LPFC_NVMEIO_TRC_SIZE 8192 52 51 53 52 /* scsistat output buffer size */
+6 -14
drivers/scsi/lpfc/lpfc_hw.h
··· 3262 3262 #endif 3263 3263 3264 3264 #ifdef __BIG_ENDIAN_BITFIELD 3265 - uint32_t rsvd1 : 19; /* Reserved */ 3266 - uint32_t cdss : 1; /* Configure Data Security SLI */ 3265 + uint32_t rsvd1 : 20; /* Reserved */ 3267 3266 uint32_t casabt : 1; /* Configure async abts status notice */ 3268 3267 uint32_t rsvd2 : 2; /* Reserved */ 3269 3268 uint32_t cbg : 1; /* Configure BlockGuard */ ··· 3286 3287 uint32_t cbg : 1; /* Configure BlockGuard */ 3287 3288 uint32_t rsvd2 : 2; /* Reserved */ 3288 3289 uint32_t casabt : 1; /* Configure async abts status notice */ 3289 - uint32_t cdss : 1; /* Configure Data Security SLI */ 3290 - uint32_t rsvd1 : 19; /* Reserved */ 3290 + uint32_t rsvd1 : 20; /* Reserved */ 3291 3291 #endif 3292 3292 #ifdef __BIG_ENDIAN_BITFIELD 3293 - uint32_t rsvd3 : 19; /* Reserved */ 3294 - uint32_t gdss : 1; /* Configure Data Security SLI */ 3293 + uint32_t rsvd3 : 20; /* Reserved */ 3295 3294 uint32_t gasabt : 1; /* Grant async abts status notice */ 3296 3295 uint32_t rsvd4 : 2; /* Reserved */ 3297 3296 uint32_t gbg : 1; /* Grant BlockGuard */ ··· 3313 3316 uint32_t gbg : 1; /* Grant BlockGuard */ 3314 3317 uint32_t rsvd4 : 2; /* Reserved */ 3315 3318 uint32_t gasabt : 1; /* Grant async abts status notice */ 3316 - uint32_t gdss : 1; /* Configure Data Security SLI */ 3317 - uint32_t rsvd3 : 19; /* Reserved */ 3319 + uint32_t rsvd3 : 20; /* Reserved */ 3318 3320 #endif 3319 3321 3320 3322 #ifdef __BIG_ENDIAN_BITFIELD ··· 3335 3339 uint32_t rsvd6; /* Reserved */ 3336 3340 3337 3341 #ifdef __BIG_ENDIAN_BITFIELD 3338 - uint32_t fips_rev : 3; /* FIPS Spec Revision */ 3339 - uint32_t fips_level : 4; /* FIPS Level */ 3340 - uint32_t sec_err : 9; /* security crypto error */ 3342 + uint32_t rsvd7 : 16; 3341 3343 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 3342 3344 #else /* __LITTLE_ENDIAN */ 3343 3345 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 3344 - uint32_t sec_err : 9; /* security crypto error */ 3345 - uint32_t fips_level : 4; /* FIPS Level */ 3346 - uint32_t fips_rev : 3; /* FIPS Spec Revision */ 3346 + uint32_t rsvd7 : 16; 3347 3347 #endif 3348 3348 3349 3349 } CONFIG_PORT_VAR;
+79 -27
drivers/scsi/lpfc/lpfc_init.c
··· 4231 4231 { 4232 4232 struct lpfc_vport *vport; 4233 4233 struct Scsi_Host *shost = NULL; 4234 + struct scsi_host_template *template; 4234 4235 int error = 0; 4235 4236 int i; 4236 4237 uint64_t wwn; ··· 4260 4259 } 4261 4260 } 4262 4261 4263 - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4264 - if (dev != &phba->pcidev->dev) { 4265 - shost = scsi_host_alloc(&lpfc_vport_template, 4266 - sizeof(struct lpfc_vport)); 4262 + /* Seed template for SCSI host registration */ 4263 + if (dev == &phba->pcidev->dev) { 4264 + template = &phba->port_template; 4265 + 4266 + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4267 + /* Seed physical port template */ 4268 + memcpy(template, &lpfc_template, sizeof(*template)); 4269 + 4270 + if (use_no_reset_hba) { 4271 + /* template is for a no reset SCSI Host */ 4272 + template->max_sectors = 0xffff; 4273 + template->eh_host_reset_handler = NULL; 4274 + } 4275 + 4276 + /* Template for all vports this physical port creates */ 4277 + memcpy(&phba->vport_template, &lpfc_template, 4278 + sizeof(*template)); 4279 + phba->vport_template.max_sectors = 0xffff; 4280 + phba->vport_template.shost_attrs = lpfc_vport_attrs; 4281 + phba->vport_template.eh_bus_reset_handler = NULL; 4282 + phba->vport_template.eh_host_reset_handler = NULL; 4283 + phba->vport_template.vendor_id = 0; 4284 + 4285 + /* Initialize the host templates with updated value */ 4286 + if (phba->sli_rev == LPFC_SLI_REV4) { 4287 + template->sg_tablesize = phba->cfg_scsi_seg_cnt; 4288 + phba->vport_template.sg_tablesize = 4289 + phba->cfg_scsi_seg_cnt; 4290 + } else { 4291 + template->sg_tablesize = phba->cfg_sg_seg_cnt; 4292 + phba->vport_template.sg_tablesize = 4293 + phba->cfg_sg_seg_cnt; 4294 + } 4295 + 4267 4296 } else { 4268 - if (!use_no_reset_hba) 4269 - shost = scsi_host_alloc(&lpfc_template, 4270 - sizeof(struct lpfc_vport)); 4271 - else 4272 - shost = scsi_host_alloc(&lpfc_template_no_hr, 4273 - sizeof(struct lpfc_vport)); 4297 + /* NVMET is for physical port only */ 4298 + memcpy(template, &lpfc_template_nvme, 4299 + sizeof(*template)); 4274 4300 } 4275 - } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 4276 - shost = scsi_host_alloc(&lpfc_template_nvme, 4277 - sizeof(struct lpfc_vport)); 4301 + } else { 4302 + template = &phba->vport_template; 4278 4303 } 4304 + 4305 + shost = scsi_host_alloc(template, sizeof(struct lpfc_vport)); 4279 4306 if (!shost) 4280 4307 goto out; 4281 4308 ··· 4357 4328 shost->transportt = lpfc_transport_template; 4358 4329 vport->port_type = LPFC_PHYSICAL_PORT; 4359 4330 } 4331 + 4332 + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 4333 + "9081 CreatePort TMPLATE type %x TBLsize %d " 4334 + "SEGcnt %d/%d\n", 4335 + vport->port_type, shost->sg_tablesize, 4336 + phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); 4360 4337 4361 4338 /* Initialize all internally managed lists. */ 4362 4339 INIT_LIST_HEAD(&vport->fc_nodes); ··· 6336 6301 * used to create the sg_dma_buf_pool must be dynamically calculated. 6337 6302 */ 6338 6303 6339 - /* Initialize the host templates the configured values. */ 6340 - lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 6341 - lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; 6342 - lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 6343 - 6344 6304 if (phba->sli_rev == LPFC_SLI_REV4) 6345 6305 entry_sz = sizeof(struct sli4_sge); 6346 6306 else ··· 6376 6346 } 6377 6347 6378 6348 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6379 - "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 6349 + "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 6380 6350 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6381 6351 phba->cfg_total_seg_cnt); 6382 6352 ··· 6846 6816 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 6847 6817 } 6848 6818 6849 - /* Initialize the host templates with the updated values. */ 6850 - lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt; 6851 - lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt; 6852 - lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt; 6853 - 6854 6819 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6855 6820 "9087 sg_seg_cnt:%d dmabuf_size:%d " 6856 6821 "total:%d scsi:%d nvme:%d\n", ··· 6951 6926 rc = -ENOMEM; 6952 6927 goto out_free_hba_cpu_map; 6953 6928 } 6929 + 6930 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 6931 + phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); 6932 + if (!phba->sli4_hba.c_stat) { 6933 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6934 + "3332 Failed allocating per cpu hdwq stats\n"); 6935 + rc = -ENOMEM; 6936 + goto out_free_hba_eq_info; 6937 + } 6938 + #endif 6939 + 6954 6940 /* 6955 6941 * Enable sr-iov virtual functions if supported and configured 6956 6942 * through the module parameter. ··· 6981 6945 6982 6946 return 0; 6983 6947 6948 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 6949 + out_free_hba_eq_info: 6950 + free_percpu(phba->sli4_hba.eq_info); 6951 + #endif 6984 6952 out_free_hba_cpu_map: 6985 6953 kfree(phba->sli4_hba.cpu_map); 6986 6954 out_free_hba_eq_hdl: ··· 7023 6983 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 7024 6984 7025 6985 free_percpu(phba->sli4_hba.eq_info); 6986 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 6987 + free_percpu(phba->sli4_hba.c_stat); 6988 + #endif 7026 6989 7027 6990 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 7028 6991 kfree(phba->sli4_hba.cpu_map); ··· 10866 10823 #ifdef CONFIG_X86 10867 10824 struct cpuinfo_x86 *cpuinfo; 10868 10825 #endif 10826 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 10827 + struct lpfc_hdwq_stat *c_stat; 10828 + #endif 10869 10829 10870 10830 max_phys_id = 0; 10871 10831 min_phys_id = LPFC_VECTOR_MAP_EMPTY; ··· 11120 11074 idx = 0; 11121 11075 for_each_possible_cpu(cpu) { 11122 11076 cpup = &phba->sli4_hba.cpu_map[cpu]; 11077 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 11078 + c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); 11079 + c_stat->hdwq_no = cpup->hdwq; 11080 + #endif 11123 11081 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) 11124 11082 continue; 11125 11083 11126 11084 cpup->hdwq = idx++ % phba->cfg_hdw_queue; 11085 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 11086 + c_stat->hdwq_no = cpup->hdwq; 11087 + #endif 11127 11088 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11128 11089 "3340 Set Affinity: not present " 11129 11090 "CPU %d hdwq %d\n", ··· 11226 11173 11227 11174 rcu_read_lock(); 11228 11175 11229 - if (!list_empty(&phba->poll_list)) { 11230 - timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 11176 + if (!list_empty(&phba->poll_list)) 11231 11177 mod_timer(&phba->cpuhp_poll_timer, 11232 11178 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 11233 - } 11234 11179 11235 11180 rcu_read_unlock(); 11236 11181 ··· 13196 13145 lpfc_sli4_ras_setup(phba); 13197 13146 13198 13147 INIT_LIST_HEAD(&phba->poll_list); 13148 + timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 13199 13149 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); 13200 13150 13201 13151 return 0;
-2
drivers/scsi/lpfc/lpfc_mbox.c
··· 1299 1299 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) { 1300 1300 if (phba->cfg_enable_bg) 1301 1301 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ 1302 - if (phba->cfg_enable_dss) 1303 - mb->un.varCfgPort.cdss = 1; /* Configure Security */ 1304 1302 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ 1305 1303 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ 1306 1304 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
+32 -115
drivers/scsi/lpfc/lpfc_nvme.c
··· 382 382 if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) { 383 383 ndlp->nrport = NULL; 384 384 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG; 385 - } 386 - spin_unlock_irq(&vport->phba->hbalock); 385 + spin_unlock_irq(&vport->phba->hbalock); 387 386 388 - /* Remove original register reference. The host transport 389 - * won't reference this rport/remoteport any further. 390 - */ 391 - lpfc_nlp_put(ndlp); 387 + /* Remove original register reference. The host transport 388 + * won't reference this rport/remoteport any further. 389 + */ 390 + lpfc_nlp_put(ndlp); 391 + } else { 392 + spin_unlock_irq(&vport->phba->hbalock); 393 + } 392 394 393 395 rport_err: 394 396 return; ··· 899 897 sgl->sge_len = cpu_to_le32(nCmd->rsplen); 900 898 } 901 899 902 - #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 903 - static void 904 - lpfc_nvme_ktime(struct lpfc_hba *phba, 905 - struct lpfc_io_buf *lpfc_ncmd) 906 - { 907 - uint64_t seg1, seg2, seg3, seg4; 908 - uint64_t segsum; 909 - 910 - if (!lpfc_ncmd->ts_last_cmd || 911 - !lpfc_ncmd->ts_cmd_start || 912 - !lpfc_ncmd->ts_cmd_wqput || 913 - !lpfc_ncmd->ts_isr_cmpl || 914 - !lpfc_ncmd->ts_data_nvme) 915 - return; 916 - 917 - if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start) 918 - return; 919 - if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd) 920 - return; 921 - if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start) 922 - return; 923 - if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput) 924 - return; 925 - if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl) 926 - return; 927 - /* 928 - * Segment 1 - Time from Last FCP command cmpl is handed 929 - * off to NVME Layer to start of next command. 930 - * Segment 2 - Time from Driver receives a IO cmd start 931 - * from NVME Layer to WQ put is done on IO cmd. 932 - * Segment 3 - Time from Driver WQ put is done on IO cmd 933 - * to MSI-X ISR for IO cmpl. 934 - * Segment 4 - Time from MSI-X ISR for IO cmpl to when 935 - * cmpl is handled off to the NVME Layer. 936 - */ 937 - seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd; 938 - if (seg1 > 5000000) /* 5 ms - for sequential IOs only */ 939 - seg1 = 0; 940 - 941 - /* Calculate times relative to start of IO */ 942 - seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start); 943 - segsum = seg2; 944 - seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start; 945 - if (segsum > seg3) 946 - return; 947 - seg3 -= segsum; 948 - segsum += seg3; 949 - 950 - seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start; 951 - if (segsum > seg4) 952 - return; 953 - seg4 -= segsum; 954 - 955 - phba->ktime_data_samples++; 956 - phba->ktime_seg1_total += seg1; 957 - if (seg1 < phba->ktime_seg1_min) 958 - phba->ktime_seg1_min = seg1; 959 - else if (seg1 > phba->ktime_seg1_max) 960 - phba->ktime_seg1_max = seg1; 961 - phba->ktime_seg2_total += seg2; 962 - if (seg2 < phba->ktime_seg2_min) 963 - phba->ktime_seg2_min = seg2; 964 - else if (seg2 > phba->ktime_seg2_max) 965 - phba->ktime_seg2_max = seg2; 966 - phba->ktime_seg3_total += seg3; 967 - if (seg3 < phba->ktime_seg3_min) 968 - phba->ktime_seg3_min = seg3; 969 - else if (seg3 > phba->ktime_seg3_max) 970 - phba->ktime_seg3_max = seg3; 971 - phba->ktime_seg4_total += seg4; 972 - if (seg4 < phba->ktime_seg4_min) 973 - phba->ktime_seg4_min = seg4; 974 - else if (seg4 > phba->ktime_seg4_max) 975 - phba->ktime_seg4_max = seg4; 976 - 977 - lpfc_ncmd->ts_last_cmd = 0; 978 - lpfc_ncmd->ts_cmd_start = 0; 979 - lpfc_ncmd->ts_cmd_wqput = 0; 980 - lpfc_ncmd->ts_isr_cmpl = 0; 981 - lpfc_ncmd->ts_data_nvme = 0; 982 - } 983 - #endif 984 900 985 901 /** 986 902 * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO ··· 930 1010 uint32_t code, status, idx; 931 1011 uint16_t cid, sqhd, data; 932 1012 uint32_t *ptr; 1013 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1014 + int cpu; 1015 + #endif 933 1016 934 1017 /* Sanity check on return of outstanding command */ 935 1018 if (!lpfc_ncmd) { ··· 1101 1178 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1102 1179 if (lpfc_ncmd->ts_cmd_start) { 1103 1180 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp; 1104 - lpfc_ncmd->ts_data_nvme = ktime_get_ns(); 1105 - phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme; 1106 - lpfc_nvme_ktime(phba, lpfc_ncmd); 1181 + lpfc_ncmd->ts_data_io = ktime_get_ns(); 1182 + phba->ktime_last_cmd = lpfc_ncmd->ts_data_io; 1183 + lpfc_io_ktime(phba, lpfc_ncmd); 1107 1184 } 1108 - if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) { 1109 - uint32_t cpu; 1110 - idx = lpfc_ncmd->cur_iocbq.hba_wqidx; 1185 + if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) { 1111 1186 cpu = raw_smp_processor_id(); 1112 - if (cpu < LPFC_CHECK_CPU_CNT) { 1113 - if (lpfc_ncmd->cpu != cpu) 1114 - lpfc_printf_vlog(vport, 1115 - KERN_INFO, LOG_NVME_IOERR, 1116 - "6701 CPU Check cmpl: " 1117 - "cpu %d expect %d\n", 1118 - cpu, lpfc_ncmd->cpu); 1119 - phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++; 1120 - } 1187 + this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 1188 + if (lpfc_ncmd->cpu != cpu) 1189 + lpfc_printf_vlog(vport, 1190 + KERN_INFO, LOG_NVME_IOERR, 1191 + "6701 CPU Check cmpl: " 1192 + "cpu %d expect %d\n", 1193 + cpu, lpfc_ncmd->cpu); 1121 1194 } 1122 1195 #endif 1123 1196 ··· 1662 1743 if (lpfc_ncmd->ts_cmd_start) 1663 1744 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); 1664 1745 1665 - if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { 1746 + if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) { 1666 1747 cpu = raw_smp_processor_id(); 1667 - if (cpu < LPFC_CHECK_CPU_CNT) { 1668 - lpfc_ncmd->cpu = cpu; 1669 - if (idx != cpu) 1670 - lpfc_printf_vlog(vport, 1671 - KERN_INFO, LOG_NVME_IOERR, 1672 - "6702 CPU Check cmd: " 1673 - "cpu %d wq %d\n", 1674 - lpfc_ncmd->cpu, 1675 - lpfc_queue_info->index); 1676 - phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++; 1677 - } 1748 + this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); 1749 + lpfc_ncmd->cpu = cpu; 1750 + if (idx != cpu) 1751 + lpfc_printf_vlog(vport, 1752 + KERN_INFO, LOG_NVME_IOERR, 1753 + "6702 CPU Check cmd: " 1754 + "cpu %d wq %d\n", 1755 + lpfc_ncmd->cpu, 1756 + lpfc_queue_info->index); 1678 1757 } 1679 1758 #endif 1680 1759 return 0;
+30 -32
drivers/scsi/lpfc/lpfc_nvmet.c
··· 707 707 struct lpfc_nvmet_rcv_ctx *ctxp; 708 708 uint32_t status, result, op, start_clean, logerr; 709 709 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 710 - uint32_t id; 710 + int id; 711 711 #endif 712 712 713 713 ctxp = cmdwqe->context2; ··· 814 814 rsp->done(rsp); 815 815 } 816 816 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 817 - if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 817 + if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { 818 818 id = raw_smp_processor_id(); 819 - if (id < LPFC_CHECK_CPU_CNT) { 820 - if (ctxp->cpu != id) 821 - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, 822 - "6704 CPU Check cmdcmpl: " 823 - "cpu %d expect %d\n", 824 - id, ctxp->cpu); 825 - phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++; 826 - } 819 + this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 820 + if (ctxp->cpu != id) 821 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, 822 + "6704 CPU Check cmdcmpl: " 823 + "cpu %d expect %d\n", 824 + id, ctxp->cpu); 827 825 } 828 826 #endif 829 827 } ··· 929 931 struct lpfc_sli_ring *pring; 930 932 unsigned long iflags; 931 933 int rc; 934 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 935 + int id; 936 + #endif 932 937 933 938 if (phba->pport->load_flag & FC_UNLOADING) { 934 939 rc = -ENODEV; ··· 955 954 if (!ctxp->hdwq) 956 955 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid]; 957 956 958 - if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 959 - int id = raw_smp_processor_id(); 960 - if (id < LPFC_CHECK_CPU_CNT) { 961 - if (rsp->hwqid != id) 962 - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, 963 - "6705 CPU Check OP: " 964 - "cpu %d expect %d\n", 965 - id, rsp->hwqid); 966 - phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++; 967 - } 957 + if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { 958 + id = raw_smp_processor_id(); 959 + this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); 960 + if (rsp->hwqid != id) 961 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, 962 + "6705 CPU Check OP: " 963 + "cpu %d expect %d\n", 964 + id, rsp->hwqid); 968 965 ctxp->cpu = id; /* Setup cpu for cmpl check */ 969 966 } 970 967 #endif ··· 2269 2270 size = nvmebuf->bytes_recv; 2270 2271 2271 2272 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2272 - if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { 2273 - if (current_cpu < LPFC_CHECK_CPU_CNT) { 2274 - if (idx != current_cpu) 2275 - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, 2276 - "6703 CPU Check rcv: " 2277 - "cpu %d expect %d\n", 2278 - current_cpu, idx); 2279 - phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++; 2280 - } 2273 + if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { 2274 + this_cpu_inc(phba->sli4_hba.c_stat->rcv_io); 2275 + if (idx != current_cpu) 2276 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, 2277 + "6703 CPU Check rcv: " 2278 + "cpu %d expect %d\n", 2279 + current_cpu, idx); 2281 2280 } 2282 2281 #endif 2283 2282 ··· 2595 2598 union lpfc_wqe128 *wqe; 2596 2599 struct ulp_bde64 *bde; 2597 2600 dma_addr_t physaddr; 2598 - int i, cnt; 2601 + int i, cnt, nsegs; 2599 2602 int do_pbde; 2600 2603 int xc = 1; 2601 2604 ··· 2626 2629 phba->cfg_nvme_seg_cnt); 2627 2630 return NULL; 2628 2631 } 2632 + nsegs = rsp->sg_cnt; 2629 2633 2630 2634 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2631 2635 nvmewqe = ctxp->wqeq; ··· 2866 2868 wqe->fcp_trsp.rsvd_12_15[0] = 0; 2867 2869 2868 2870 /* Use rspbuf, NOT sg list */ 2869 - rsp->sg_cnt = 0; 2871 + nsegs = 0; 2870 2872 sgl->word2 = 0; 2871 2873 atomic_inc(&tgtp->xmt_fcp_rsp); 2872 2874 break; ··· 2883 2885 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; 2884 2886 nvmewqe->context1 = ndlp; 2885 2887 2886 - for_each_sg(rsp->sg, sgel, rsp->sg_cnt, i) { 2888 + for_each_sg(rsp->sg, sgel, nsegs, i) { 2887 2889 physaddr = sg_dma_address(sgel); 2888 2890 cnt = sg_dma_len(sgel); 2889 2891 sgl->addr_hi = putPaddrHigh(physaddr);
+25 -65
drivers/scsi/lpfc/lpfc_scsi.c
··· 3805 3805 struct Scsi_Host *shost; 3806 3806 int idx; 3807 3807 uint32_t logit = LOG_FCP; 3808 - #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 3809 - int cpu; 3810 - #endif 3811 3808 3812 3809 /* Guard against abort handler being called at same time */ 3813 3810 spin_lock(&lpfc_cmd->buf_lock); ··· 3823 3826 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; 3824 3827 3825 3828 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 3826 - if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) { 3827 - cpu = raw_smp_processor_id(); 3828 - if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq) 3829 - phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++; 3830 - } 3829 + if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 3830 + this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 3831 3831 #endif 3832 3832 shost = cmd->device->host; 3833 3833 ··· 4025 4031 lpfc_cmd->pCmd = NULL; 4026 4032 spin_unlock(&lpfc_cmd->buf_lock); 4027 4033 4034 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4035 + if (lpfc_cmd->ts_cmd_start) { 4036 + lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp; 4037 + lpfc_cmd->ts_data_io = ktime_get_ns(); 4038 + phba->ktime_last_cmd = lpfc_cmd->ts_data_io; 4039 + lpfc_io_ktime(phba, lpfc_cmd); 4040 + } 4041 + #endif 4028 4042 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4029 4043 cmd->scsi_done(cmd); 4030 4044 ··· 4506 4504 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 4507 4505 int err, idx; 4508 4506 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4509 - int cpu; 4507 + uint64_t start = 0L; 4508 + 4509 + if (phba->ktime_on) 4510 + start = ktime_get_ns(); 4510 4511 #endif 4511 4512 4512 4513 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); ··· 4631 4626 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 4632 4627 4633 4628 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4634 - if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) { 4635 - cpu = raw_smp_processor_id(); 4636 - if (cpu < LPFC_CHECK_CPU_CNT) { 4637 - struct lpfc_sli4_hdw_queue *hdwq = 4638 - &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no]; 4639 - hdwq->cpucheck_xmt_io[cpu]++; 4640 - } 4641 - } 4629 + if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 4630 + this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); 4642 4631 #endif 4643 4632 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, 4644 4633 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 4634 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4635 + if (start) { 4636 + lpfc_cmd->ts_cmd_start = start; 4637 + lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd; 4638 + lpfc_cmd->ts_cmd_wqput = ktime_get_ns(); 4639 + } else { 4640 + lpfc_cmd->ts_cmd_start = 0; 4641 + } 4642 + #endif 4645 4643 if (err) { 4646 4644 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4647 4645 "3376 FCP could not issue IOCB err %x" ··· 6031 6023 .track_queue_depth = 0, 6032 6024 }; 6033 6025 6034 - struct scsi_host_template lpfc_template_no_hr = { 6035 - .module = THIS_MODULE, 6036 - .name = LPFC_DRIVER_NAME, 6037 - .proc_name = LPFC_DRIVER_NAME, 6038 - .info = lpfc_info, 6039 - .queuecommand = lpfc_queuecommand, 6040 - .eh_timed_out = fc_eh_timed_out, 6041 - .eh_abort_handler = lpfc_abort_handler, 6042 - .eh_device_reset_handler = lpfc_device_reset_handler, 6043 - .eh_target_reset_handler = lpfc_target_reset_handler, 6044 - .eh_bus_reset_handler = lpfc_bus_reset_handler, 6045 - .slave_alloc = lpfc_slave_alloc, 6046 - .slave_configure = lpfc_slave_configure, 6047 - .slave_destroy = lpfc_slave_destroy, 6048 - .scan_finished = lpfc_scan_finished, 6049 - .this_id = -1, 6050 - .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 6051 - .cmd_per_lun = LPFC_CMD_PER_LUN, 6052 - .shost_attrs = lpfc_hba_attrs, 6053 - .max_sectors = 0xFFFFFFFF, 6054 - .vendor_id = LPFC_NL_VENDOR_ID, 6055 - .change_queue_depth = scsi_change_queue_depth, 6056 - .track_queue_depth = 1, 6057 - }; 6058 - 6059 6026 struct scsi_host_template lpfc_template = { 6060 6027 .module = THIS_MODULE, 6061 6028 .name = LPFC_DRIVER_NAME, ··· 6053 6070 .shost_attrs = lpfc_hba_attrs, 6054 6071 .max_sectors = 0xFFFF, 6055 6072 .vendor_id = LPFC_NL_VENDOR_ID, 6056 - .change_queue_depth = scsi_change_queue_depth, 6057 - .track_queue_depth = 1, 6058 - }; 6059 - 6060 - struct scsi_host_template lpfc_vport_template = { 6061 - .module = THIS_MODULE, 6062 - .name = LPFC_DRIVER_NAME, 6063 - .proc_name = LPFC_DRIVER_NAME, 6064 - .info = lpfc_info, 6065 - .queuecommand = lpfc_queuecommand, 6066 - .eh_timed_out = fc_eh_timed_out, 6067 - .eh_abort_handler = lpfc_abort_handler, 6068 - .eh_device_reset_handler = lpfc_device_reset_handler, 6069 - .eh_target_reset_handler = lpfc_target_reset_handler, 6070 - .slave_alloc = lpfc_slave_alloc, 6071 - .slave_configure = lpfc_slave_configure, 6072 - .slave_destroy = lpfc_slave_destroy, 6073 - .scan_finished = lpfc_scan_finished, 6074 - .this_id = -1, 6075 - .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 6076 - .cmd_per_lun = LPFC_CMD_PER_LUN, 6077 - .shost_attrs = lpfc_vport_attrs, 6078 - .max_sectors = 0xFFFF, 6079 6073 .change_queue_depth = scsi_change_queue_depth, 6080 6074 .track_queue_depth = 1, 6081 6075 };
+13 -34
drivers/scsi/lpfc/lpfc_sli.c
··· 230 230 * This routine will update the HBA index of a queue to reflect consumption of 231 231 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 232 232 * an entry the host calls this function to update the queue's internal 233 - * pointers. This routine returns the number of entries that were consumed by 234 - * the HBA. 233 + * pointers. 235 234 **/ 236 - static uint32_t 235 + static void 237 236 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 238 237 { 239 - uint32_t released = 0; 240 - 241 238 /* sanity check on queue memory */ 242 239 if (unlikely(!q)) 243 - return 0; 240 + return; 244 241 245 - if (q->hba_index == index) 246 - return 0; 247 - do { 248 - q->hba_index = ((q->hba_index + 1) % q->entry_count); 249 - released++; 250 - } while (q->hba_index != index); 251 - return released; 242 + q->hba_index = index; 252 243 } 253 244 254 245 /** ··· 2502 2511 !pmb->u.mb.mbxStatus) { 2503 2512 rpi = pmb->u.mb.un.varWords[0]; 2504 2513 vpi = pmb->u.mb.un.varRegLogin.vpi; 2514 + if (phba->sli_rev == LPFC_SLI_REV4) 2515 + vpi -= phba->sli4_hba.max_cfg_param.vpi_base; 2505 2516 lpfc_unreg_login(phba, vpi, rpi, pmb); 2506 2517 pmb->vport = vport; 2507 2518 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; ··· 4037 4044 struct lpfc_iocbq *piocb, *next_iocb; 4038 4045 4039 4046 spin_lock_irq(&phba->hbalock); 4047 + if (phba->hba_flag & HBA_IOQ_FLUSH || 4048 + !phba->sli4_hba.hdwq) { 4049 + spin_unlock_irq(&phba->hbalock); 4050 + return; 4051 + } 4040 4052 /* Indicate the I/O queues are flushed */ 4041 4053 phba->hba_flag |= HBA_IOQ_FLUSH; 4042 4054 spin_unlock_irq(&phba->hbalock); ··· 5032 5034 5033 5035 } else 5034 5036 phba->max_vpi = 0; 5035 - phba->fips_level = 0; 5036 - phba->fips_spec_rev = 0; 5037 - if (pmb->u.mb.un.varCfgPort.gdss) { 5038 - phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 5039 - phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 5040 - phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 5041 - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5042 - "2850 Security Crypto Active. FIPS x%d " 5043 - "(Spec Rev: x%d)", 5044 - phba->fips_level, phba->fips_spec_rev); 5045 - } 5046 - if (pmb->u.mb.un.varCfgPort.sec_err) { 5047 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5048 - "2856 Config Port Security Crypto " 5049 - "Error: x%x ", 5050 - pmb->u.mb.un.varCfgPort.sec_err); 5051 - } 5052 5037 if (pmb->u.mb.un.varCfgPort.gerbm) 5053 5038 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 5054 5039 if (pmb->u.mb.un.varCfgPort.gcrp) ··· 14423 14442 { 14424 14443 struct lpfc_hba *phba = eq->phba; 14425 14444 14426 - if (list_empty(&phba->poll_list)) { 14427 - timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 14428 - /* kickstart slowpath processing for this eq */ 14445 + /* kickstart slowpath processing if needed */ 14446 + if (list_empty(&phba->poll_list)) 14429 14447 mod_timer(&phba->cpuhp_poll_timer, 14430 14448 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 14431 - } 14432 14449 14433 14450 list_add_rcu(&eq->_poll_list, &phba->poll_list); 14434 14451 synchronize_rcu();
+1 -1
drivers/scsi/lpfc/lpfc_sli.h
··· 446 446 uint64_t ts_last_cmd; 447 447 uint64_t ts_cmd_wqput; 448 448 uint64_t ts_isr_cmpl; 449 - uint64_t ts_data_nvme; 449 + uint64_t ts_data_io; 450 450 #endif 451 451 };
+12 -7
drivers/scsi/lpfc/lpfc_sli4.h
··· 697 697 struct lpfc_lock_stat lock_conflict; 698 698 #endif 699 699 700 - #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 701 - #define LPFC_CHECK_CPU_CNT 128 702 - uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT]; 703 - uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT]; 704 - uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT]; 705 - #endif 706 - 707 700 /* Per HDWQ pool resources */ 708 701 struct list_head sgl_list; 709 702 struct list_head cmd_rsp_buf_list; ··· 731 738 #define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \ 732 739 spin_lock_irqsave(lock, flag) 733 740 #define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock) 741 + #endif 742 + 743 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 744 + struct lpfc_hdwq_stat { 745 + u32 hdwq_no; 746 + u32 rcv_io; 747 + u32 xmt_io; 748 + u32 cmpl_io; 749 + }; 734 750 #endif 735 751 736 752 struct lpfc_sli4_hba { ··· 923 921 struct cpumask numa_mask; 924 922 uint16_t curr_disp_cpu; 925 923 struct lpfc_eq_intr_info __percpu *eq_info; 924 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 925 + struct lpfc_hdwq_stat __percpu *c_stat; 926 + #endif 926 927 uint32_t conf_trunk; 927 928 #define lpfc_conf_trunk_port0_WORD conf_trunk 928 929 #define lpfc_conf_trunk_port0_SHIFT 0
+1 -1
drivers/scsi/lpfc/lpfc_version.h
··· 20 20 * included with this package. * 21 21 *******************************************************************/ 22 22 23 - #define LPFC_DRIVER_VERSION "12.6.0.4" 23 + #define LPFC_DRIVER_VERSION "12.8.0.0" 24 24 #define LPFC_DRIVER_NAME "lpfc" 25 25 26 26 /* Used for SLI 2/3 */
+4 -4
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 9908 9908 9909 9909 ioc->remove_host = 1; 9910 9910 9911 - mpt3sas_wait_for_commands_to_complete(ioc); 9912 - _scsih_flush_running_cmds(ioc); 9911 + if (!pci_device_is_present(pdev)) 9912 + _scsih_flush_running_cmds(ioc); 9913 9913 9914 9914 _scsih_fw_event_cleanup_queue(ioc); 9915 9915 ··· 9992 9992 9993 9993 ioc->remove_host = 1; 9994 9994 9995 - mpt3sas_wait_for_commands_to_complete(ioc); 9996 - _scsih_flush_running_cmds(ioc); 9995 + if (!pci_device_is_present(pdev)) 9996 + _scsih_flush_running_cmds(ioc); 9997 9997 9998 9998 _scsih_fw_event_cleanup_queue(ioc); 9999 9999
+3 -1
drivers/scsi/scsi_transport_iscsi.c
··· 2022 2022 if (session->target_id == ISCSI_MAX_TARGET) { 2023 2023 spin_unlock_irqrestore(&session->lock, flags); 2024 2024 mutex_unlock(&ihost->mutex); 2025 - return; 2025 + goto unbind_session_exit; 2026 2026 } 2027 2027 2028 2028 target_id = session->target_id; ··· 2034 2034 ida_simple_remove(&iscsi_sess_ida, target_id); 2035 2035 2036 2036 scsi_remove_target(&session->dev); 2037 + 2038 + unbind_session_exit: 2037 2039 iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); 2038 2040 ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n"); 2039 2041 }
+3 -1
drivers/scsi/sr.c
··· 550 550 static void sr_block_release(struct gendisk *disk, fmode_t mode) 551 551 { 552 552 struct scsi_cd *cd = scsi_cd(disk); 553 + 553 554 mutex_lock(&cd->lock); 554 555 cdrom_release(&cd->cdi, mode); 555 - scsi_cd_put(cd); 556 556 mutex_unlock(&cd->lock); 557 + 558 + scsi_cd_put(cd); 557 559 } 558 560 559 561 static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
+11 -2
drivers/scsi/ufs/ufs-mediatek.c
··· 499 499 500 500 if (ufshcd_is_link_hibern8(hba)) { 501 501 err = ufs_mtk_link_set_lpm(hba); 502 - if (err) 502 + if (err) { 503 + /* 504 + * Set link as off state enforcedly to trigger 505 + * ufshcd_host_reset_and_restore() in ufshcd_suspend() 506 + * for completed host reset. 507 + */ 508 + ufshcd_set_link_off(hba); 503 509 return -EAGAIN; 510 + } 504 511 } 505 512 506 513 if (!ufshcd_is_link_active(hba)) ··· 526 519 527 520 if (ufshcd_is_link_hibern8(hba)) { 528 521 err = ufs_mtk_link_set_hpm(hba); 529 - if (err) 522 + if (err) { 523 + err = ufshcd_link_recovery(hba); 530 524 return err; 525 + } 531 526 } 532 527 533 528 return 0;
+48 -39
drivers/scsi/ufs/ufshcd.c
··· 172 172 #define ufshcd_clear_eh_in_progress(h) \ 173 173 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) 174 174 175 - #define ufshcd_set_ufs_dev_active(h) \ 176 - ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) 177 - #define ufshcd_set_ufs_dev_sleep(h) \ 178 - ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE) 179 - #define ufshcd_set_ufs_dev_poweroff(h) \ 180 - ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE) 181 - #define ufshcd_is_ufs_dev_active(h) \ 182 - ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE) 183 - #define ufshcd_is_ufs_dev_sleep(h) \ 184 - ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE) 185 - #define ufshcd_is_ufs_dev_poweroff(h) \ 186 - ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE) 187 - 188 175 struct ufs_pm_lvl_states ufs_pm_lvl_states[] = { 189 176 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE}, 190 177 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE}, ··· 855 868 return false; 856 869 } 857 870 858 - static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) 871 + /** 872 + * ufshcd_set_clk_freq - set UFS controller clock frequencies 873 + * @hba: per adapter instance 874 + * @scale_up: If True, set max possible frequency othewise set low frequency 875 + * 876 + * Returns 0 if successful 877 + * Returns < 0 for any other errors 878 + */ 879 + static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up) 859 880 { 860 881 int ret = 0; 861 882 struct ufs_clk_info *clki; 862 883 struct list_head *head = &hba->clk_list_head; 863 - ktime_t start = ktime_get(); 864 - bool clk_state_changed = false; 865 884 866 885 if (list_empty(head)) 867 886 goto out; 868 - 869 - ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); 870 - if (ret) 871 - return ret; 872 887 873 888 list_for_each_entry(clki, head, list) { 874 889 if (!IS_ERR_OR_NULL(clki->clk)) { ··· 878 889 if (clki->curr_freq == clki->max_freq) 879 890 continue; 880 891 881 - clk_state_changed = true; 882 892 ret = clk_set_rate(clki->clk, clki->max_freq); 883 893 if (ret) { 884 894 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", ··· 896 908 if (clki->curr_freq == clki->min_freq) 897 909 continue; 898 910 899 - clk_state_changed = true; 900 911 ret = clk_set_rate(clki->clk, clki->min_freq); 901 912 if (ret) { 902 913 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", ··· 914 927 clki->name, clk_get_rate(clki->clk)); 915 928 } 916 929 930 + out: 931 + return ret; 932 + } 933 + 934 + /** 935 + * ufshcd_scale_clks - scale up or scale down UFS controller clocks 936 + * @hba: per adapter instance 937 + * @scale_up: True if scaling up and false if scaling down 938 + * 939 + * Returns 0 if successful 940 + * Returns < 0 for any other errors 941 + */ 942 + static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) 943 + { 944 + int ret = 0; 945 + ktime_t start = ktime_get(); 946 + 947 + ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); 948 + if (ret) 949 + goto out; 950 + 951 + ret = ufshcd_set_clk_freq(hba, scale_up); 952 + if (ret) 953 + goto out; 954 + 917 955 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); 956 + if (ret) 957 + ufshcd_set_clk_freq(hba, !scale_up); 918 958 919 959 out: 920 - if (clk_state_changed) 921 - trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), 960 + trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), 922 961 (scale_up ? "up" : "down"), 923 962 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 924 963 return ret; ··· 1078 1065 } 1079 1066 1080 1067 /* check if the power mode needs to be changed or not? */ 1081 - ret = ufshcd_change_power_mode(hba, &new_pwr_info); 1082 - 1068 + ret = ufshcd_config_pwr_mode(hba, &new_pwr_info); 1083 1069 if (ret) 1084 1070 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", 1085 1071 __func__, ret, ··· 1131 1119 1132 1120 ret = ufshcd_clock_scaling_prepare(hba); 1133 1121 if (ret) 1134 - return ret; 1122 + goto out; 1135 1123 1136 1124 /* scale down the gear before scaling down clocks */ 1137 1125 if (!scale_up) { 1138 1126 ret = ufshcd_scale_gear(hba, false); 1139 1127 if (ret) 1140 - goto out; 1128 + goto out_unprepare; 1141 1129 } 1142 1130 1143 1131 ret = ufshcd_scale_clks(hba, scale_up); 1144 1132 if (ret) { 1145 1133 if (!scale_up) 1146 1134 ufshcd_scale_gear(hba, true); 1147 - goto out; 1135 + goto out_unprepare; 1148 1136 } 1149 1137 1150 1138 /* scale up the gear after scaling up clocks */ 1151 1139 if (scale_up) { 1152 1140 ret = ufshcd_scale_gear(hba, true); 1153 - if (ret) { 1141 + if (ret) 1154 1142 ufshcd_scale_clks(hba, false); 1155 - goto out; 1156 - } 1157 1143 } 1158 1144 1159 - ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); 1160 - 1161 - out: 1145 + out_unprepare: 1162 1146 ufshcd_clock_scaling_unprepare(hba); 1147 + out: 1163 1148 ufshcd_release(hba); 1164 1149 return ret; 1165 1150 } ··· 3794 3785 return ret; 3795 3786 } 3796 3787 3797 - static int ufshcd_link_recovery(struct ufs_hba *hba) 3788 + int ufshcd_link_recovery(struct ufs_hba *hba) 3798 3789 { 3799 3790 int ret; 3800 3791 unsigned long flags; ··· 3821 3812 3822 3813 return ret; 3823 3814 } 3815 + EXPORT_SYMBOL_GPL(ufshcd_link_recovery); 3824 3816 3825 3817 static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) 3826 3818 { ··· 4122 4112 memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); 4123 4113 4124 4114 ret = ufshcd_change_power_mode(hba, &final_params); 4125 - if (!ret) 4126 - ufshcd_print_pwr_info(hba); 4127 4115 4128 4116 return ret; 4129 4117 } ··· 6307 6299 spin_unlock_irqrestore(hba->host->host_lock, flags); 6308 6300 6309 6301 /* scale up clocks to max frequency before full reinitialization */ 6310 - ufshcd_scale_clks(hba, true); 6302 + ufshcd_set_clk_freq(hba, true); 6311 6303 6312 6304 err = ufshcd_hba_enable(hba); 6313 6305 if (err) ··· 7135 7127 __func__, ret); 7136 7128 goto out; 7137 7129 } 7130 + ufshcd_print_pwr_info(hba); 7138 7131 } 7139 7132 7140 7133 /*
+15
drivers/scsi/ufs/ufshcd.h
··· 130 130 #define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \ 131 131 UIC_LINK_HIBERN8_STATE) 132 132 133 + #define ufshcd_set_ufs_dev_active(h) \ 134 + ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) 135 + #define ufshcd_set_ufs_dev_sleep(h) \ 136 + ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE) 137 + #define ufshcd_set_ufs_dev_poweroff(h) \ 138 + ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE) 139 + #define ufshcd_is_ufs_dev_active(h) \ 140 + ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE) 141 + #define ufshcd_is_ufs_dev_sleep(h) \ 142 + ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE) 143 + #define ufshcd_is_ufs_dev_poweroff(h) \ 144 + ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE) 145 + 133 146 /* 134 147 * UFS Power management levels. 135 148 * Each level is in increasing order of power savings. ··· 801 788 void ufshcd_dealloc_host(struct ufs_hba *); 802 789 int ufshcd_hba_enable(struct ufs_hba *hba); 803 790 int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); 791 + int ufshcd_link_recovery(struct ufs_hba *hba); 804 792 int ufshcd_make_hba_operational(struct ufs_hba *hba); 805 793 void ufshcd_remove(struct ufs_hba *); 806 794 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); ··· 1097 1083 { 1098 1084 if (hba->vops && hba->vops->device_reset) { 1099 1085 hba->vops->device_reset(hba); 1086 + ufshcd_set_ufs_dev_active(hba); 1100 1087 ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0); 1101 1088 } 1102 1089 }
+62 -125
drivers/target/target_core_xcopy.c
··· 134 134 * Assigned designator 135 135 */ 136 136 desig_len = desc[7]; 137 - if (desig_len != 16) { 137 + if (desig_len != XCOPY_NAA_IEEE_REGEX_LEN) { 138 138 pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len); 139 139 return -EINVAL; 140 140 } ··· 315 315 xop->nolb, (unsigned long long)xop->src_lba, 316 316 (unsigned long long)xop->dst_lba); 317 317 318 - if (dc != 0) { 319 - xop->dbl = get_unaligned_be24(&desc[29]); 320 - 321 - pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); 322 - } 323 318 return 0; 324 319 } 325 320 ··· 410 415 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, 411 416 struct xcopy_pt_cmd, se_cmd); 412 417 413 - kfree(xpt_cmd); 418 + /* xpt_cmd is on the stack, nothing to free here */ 419 + pr_debug("xpt_cmd done: %p\n", xpt_cmd); 414 420 } 415 421 416 422 static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd) ··· 500 504 * @cdb: SCSI CDB to be copied into @xpt_cmd. 501 505 * @remote_port: If false, use the LUN through which the XCOPY command has 502 506 * been received. If true, use @se_dev->xcopy_lun. 503 - * @alloc_mem: Whether or not to allocate an SGL list. 504 507 * 505 508 * Set up a SCSI command (READ or WRITE) that will be used to execute an 506 509 * XCOPY command. ··· 509 514 struct xcopy_op *xop, 510 515 struct se_device *se_dev, 511 516 unsigned char *cdb, 512 - bool remote_port, 513 - bool alloc_mem) 517 + bool remote_port) 514 518 { 515 519 struct se_cmd *cmd = &xpt_cmd->se_cmd; 516 - sense_reason_t sense_rc; 517 - int ret = 0, rc; 518 520 519 521 /* 520 522 * Setup LUN+port to honor reservations based upon xop->op_origin for ··· 527 535 cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 528 536 529 537 cmd->tag = 0; 530 - sense_rc = target_setup_cmd_from_cdb(cmd, cdb); 531 - if (sense_rc) { 532 - ret = -EINVAL; 533 - goto out; 534 - } 538 + if (target_setup_cmd_from_cdb(cmd, cdb)) 539 + return -EINVAL; 535 540 536 - if (alloc_mem) { 537 - rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 538 - cmd->data_length, false, false); 539 - if (rc < 0) { 540 - ret = rc; 541 - goto out; 542 - } 543 - /* 544 - * Set this bit so that transport_free_pages() allows the 545 - * caller to release SGLs + physical memory allocated by 546 - * transport_generic_get_mem().. 547 - */ 548 - cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 549 - } else { 550 - /* 551 - * Here the previously allocated SGLs for the internal READ 552 - * are mapped zero-copy to the internal WRITE. 553 - */ 554 - sense_rc = transport_generic_map_mem_to_cmd(cmd, 555 - xop->xop_data_sg, xop->xop_data_nents, 556 - NULL, 0); 557 - if (sense_rc) { 558 - ret = -EINVAL; 559 - goto out; 560 - } 541 + if (transport_generic_map_mem_to_cmd(cmd, xop->xop_data_sg, 542 + xop->xop_data_nents, NULL, 0)) 543 + return -EINVAL; 561 544 562 - pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:" 563 - " %u\n", cmd->t_data_sg, cmd->t_data_nents); 564 - } 545 + pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:" 546 + " %u\n", cmd->t_data_sg, cmd->t_data_nents); 565 547 566 548 return 0; 567 - 568 - out: 569 - return ret; 570 549 } 571 550 572 551 static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd) ··· 567 604 sector_t src_lba, 568 605 u32 src_sectors) 569 606 { 570 - struct xcopy_pt_cmd *xpt_cmd; 571 - struct se_cmd *se_cmd; 607 + struct xcopy_pt_cmd xpt_cmd; 608 + struct se_cmd *se_cmd = &xpt_cmd.se_cmd; 572 609 u32 length = (src_sectors * src_dev->dev_attrib.block_size); 573 610 int rc; 574 611 unsigned char cdb[16]; 575 612 bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP); 576 613 577 - xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL); 578 - if (!xpt_cmd) { 579 - pr_err("Unable to allocate xcopy_pt_cmd\n"); 580 - return -ENOMEM; 581 - } 582 - init_completion(&xpt_cmd->xpt_passthrough_sem); 583 - se_cmd = &xpt_cmd->se_cmd; 614 + memset(&xpt_cmd, 0, sizeof(xpt_cmd)); 615 + init_completion(&xpt_cmd.xpt_passthrough_sem); 584 616 585 617 memset(&cdb[0], 0, 16); 586 618 cdb[0] = READ_16; ··· 585 627 (unsigned long long)src_lba, src_sectors, length); 586 628 587 629 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length, 588 - DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]); 589 - xop->src_pt_cmd = xpt_cmd; 630 + DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0]); 590 631 591 - rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], 592 - remote_port, true); 632 + rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0], 633 + remote_port); 593 634 if (rc < 0) { 594 - ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; 595 - transport_generic_free_cmd(se_cmd, 0); 596 - return rc; 635 + ec_cmd->scsi_status = se_cmd->scsi_status; 636 + goto out; 597 637 } 598 638 599 - xop->xop_data_sg = se_cmd->t_data_sg; 600 - xop->xop_data_nents = se_cmd->t_data_nents; 601 639 pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ" 602 640 " memory\n", xop->xop_data_sg, xop->xop_data_nents); 603 641 604 - rc = target_xcopy_issue_pt_cmd(xpt_cmd); 605 - if (rc < 0) { 606 - ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; 607 - transport_generic_free_cmd(se_cmd, 0); 608 - return rc; 609 - } 610 - /* 611 - * Clear off the allocated t_data_sg, that has been saved for 612 - * zero-copy WRITE submission reuse in struct xcopy_op.. 613 - */ 614 - se_cmd->t_data_sg = NULL; 615 - se_cmd->t_data_nents = 0; 616 - 617 - return 0; 642 + rc = target_xcopy_issue_pt_cmd(&xpt_cmd); 643 + if (rc < 0) 644 + ec_cmd->scsi_status = se_cmd->scsi_status; 645 + out: 646 + transport_generic_free_cmd(se_cmd, 0); 647 + return rc; 618 648 } 619 649 620 650 static int target_xcopy_write_destination( ··· 612 666 sector_t dst_lba, 613 667 u32 dst_sectors) 614 668 { 615 - struct xcopy_pt_cmd *xpt_cmd; 616 - struct se_cmd *se_cmd; 669 + struct xcopy_pt_cmd xpt_cmd; 670 + struct se_cmd *se_cmd = &xpt_cmd.se_cmd; 617 671 u32 length = (dst_sectors * dst_dev->dev_attrib.block_size); 618 672 int rc; 619 673 unsigned char cdb[16]; 620 674 bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP); 621 675 622 - xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL); 623 - if (!xpt_cmd) { 624 - pr_err("Unable to allocate xcopy_pt_cmd\n"); 625 - return -ENOMEM; 626 - } 627 - init_completion(&xpt_cmd->xpt_passthrough_sem); 628 - se_cmd = &xpt_cmd->se_cmd; 676 + memset(&xpt_cmd, 0, sizeof(xpt_cmd)); 677 + init_completion(&xpt_cmd.xpt_passthrough_sem); 629 678 630 679 memset(&cdb[0], 0, 16); 631 680 cdb[0] = WRITE_16; ··· 630 689 (unsigned long long)dst_lba, dst_sectors, length); 631 690 632 691 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length, 633 - DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]); 634 - xop->dst_pt_cmd = xpt_cmd; 692 + DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0]); 635 693 636 - rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0], 637 - remote_port, false); 694 + rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0], 695 + remote_port); 638 696 if (rc < 0) { 639 - struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd; 640 - ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; 641 - /* 642 - * If the failure happened before the t_mem_list hand-off in 643 - * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that 644 - * core releases this memory on error during X-COPY WRITE I/O. 645 - */ 646 - src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 647 - src_cmd->t_data_sg = xop->xop_data_sg; 648 - src_cmd->t_data_nents = xop->xop_data_nents; 649 - 650 - transport_generic_free_cmd(se_cmd, 0); 651 - return rc; 697 + ec_cmd->scsi_status = se_cmd->scsi_status; 698 + goto out; 652 699 } 653 700 654 - rc = target_xcopy_issue_pt_cmd(xpt_cmd); 655 - if (rc < 0) { 656 - ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; 657 - se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 658 - transport_generic_free_cmd(se_cmd, 0); 659 - return rc; 660 - } 661 - 662 - return 0; 701 + rc = target_xcopy_issue_pt_cmd(&xpt_cmd); 702 + if (rc < 0) 703 + ec_cmd->scsi_status = se_cmd->scsi_status; 704 + out: 705 + transport_generic_free_cmd(se_cmd, 0); 706 + return rc; 663 707 } 664 708 665 709 static void target_xcopy_do_work(struct work_struct *work) ··· 655 729 sector_t src_lba, dst_lba, end_lba; 656 730 unsigned int max_sectors; 657 731 int rc = 0; 658 - unsigned short nolb, cur_nolb, max_nolb, copied_nolb = 0; 732 + unsigned short nolb, max_nolb, copied_nolb = 0; 659 733 660 734 if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE) 661 735 goto err_free; ··· 685 759 (unsigned long long)src_lba, (unsigned long long)dst_lba); 686 760 687 761 while (src_lba < end_lba) { 688 - cur_nolb = min(nolb, max_nolb); 762 + unsigned short cur_nolb = min(nolb, max_nolb); 763 + u32 cur_bytes = cur_nolb * src_dev->dev_attrib.block_size; 764 + 765 + if (cur_bytes != xop->xop_data_bytes) { 766 + /* 767 + * (Re)allocate a buffer large enough to hold the XCOPY 768 + * I/O size, which can be reused each read / write loop. 769 + */ 770 + target_free_sgl(xop->xop_data_sg, xop->xop_data_nents); 771 + rc = target_alloc_sgl(&xop->xop_data_sg, 772 + &xop->xop_data_nents, 773 + cur_bytes, 774 + false, false); 775 + if (rc < 0) 776 + goto out; 777 + xop->xop_data_bytes = cur_bytes; 778 + } 689 779 690 780 pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu," 691 781 " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb); ··· 719 777 720 778 rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev, 721 779 dst_lba, cur_nolb); 722 - if (rc < 0) { 723 - transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0); 780 + if (rc < 0) 724 781 goto out; 725 - } 726 782 727 783 dst_lba += cur_nolb; 728 784 pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n", ··· 728 788 729 789 copied_nolb += cur_nolb; 730 790 nolb -= cur_nolb; 731 - 732 - transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0); 733 - xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 734 - 735 - transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0); 736 791 } 737 792 738 793 xcopy_pt_undepend_remotedev(xop); 794 + target_free_sgl(xop->xop_data_sg, xop->xop_data_nents); 739 795 kfree(xop); 740 796 741 797 pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n", ··· 745 809 746 810 out: 747 811 xcopy_pt_undepend_remotedev(xop); 812 + target_free_sgl(xop->xop_data_sg, xop->xop_data_nents); 748 813 749 814 err_free: 750 815 kfree(xop);
+2 -7
drivers/target/target_core_xcopy.h
··· 5 5 #define XCOPY_TARGET_DESC_LEN 32 6 6 #define XCOPY_SEGMENT_DESC_LEN 28 7 7 #define XCOPY_NAA_IEEE_REGEX_LEN 16 8 - #define XCOPY_MAX_SECTORS 1024 8 + #define XCOPY_MAX_SECTORS 4096 9 9 10 10 /* 11 11 * SPC4r37 6.4.6.1 ··· 17 17 XCOL_SOURCE_RECV_OP = 0x01, 18 18 XCOL_DEST_RECV_OP = 0x02, 19 19 }; 20 - 21 - struct xcopy_pt_cmd; 22 20 23 21 struct xcopy_op { 24 22 int op_origin; ··· 33 35 unsigned short stdi; 34 36 unsigned short dtdi; 35 37 unsigned short nolb; 36 - unsigned int dbl; 37 38 38 - struct xcopy_pt_cmd *src_pt_cmd; 39 - struct xcopy_pt_cmd *dst_pt_cmd; 40 - 39 + u32 xop_data_bytes; 41 40 u32 xop_data_nents; 42 41 struct scatterlist *xop_data_sg; 43 42 struct work_struct xop_work;