Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull more SCSI updates from James Bottomley:
"These are mostly minor updates.

There are two drivers (lpfc and mpi3mr) which missed the initial
pull and a core change to retry a start/stop unit which affect
suspend/resume"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (32 commits)
scsi: lpfc: Update lpfc version to 14.4.0.5
scsi: lpfc: Support loopback tests with VMID enabled
scsi: lpfc: Revise TRACE_EVENT log flag severities from KERN_ERR to KERN_WARNING
scsi: lpfc: Ensure DA_ID handling completion before deleting an NPIV instance
scsi: lpfc: Fix kref imbalance on fabric ndlps from dev_loss_tmo handler
scsi: lpfc: Restrict support for 32 byte CDBs to specific HBAs
scsi: lpfc: Update phba link state conditional before sending CMF_SYNC_WQE
scsi: lpfc: Add ELS_RSP cmd to the list of WQEs to flush in lpfc_els_flush_cmd()
scsi: mpi3mr: Update driver version to 8.12.0.0.50
scsi: mpi3mr: Improve wait logic while controller transitions to READY state
scsi: mpi3mr: Update MPI Headers to revision 34
scsi: mpi3mr: Use firmware-provided timestamp update interval
scsi: mpi3mr: Enhance the Enable Controller retry logic
scsi: sd: Fix off-by-one error in sd_read_block_characteristics()
scsi: pm8001: Do not overwrite PCI queue mapping
scsi: scsi_debug: Remove a useless memset()
scsi: pmcraid: Convert comma to semicolon
scsi: sd: Retry START STOP UNIT commands
scsi: mpi3mr: A performance fix
scsi: ufs: qcom: Update MODE_MAX cfg_bw value
...

+415 -154
+1 -1
drivers/message/fusion/mptctl.c
··· 1609 1609 maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); 1610 1610 1611 1611 1612 - max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents; 1612 + max = min(maxEvents, MPTCTL_EVENT_LOG_SIZE); 1613 1613 1614 1614 /* If fewer than 1 event is requested, there must have 1615 1615 * been some type of error.
-3
drivers/scsi/cxgbi/libcxgbi.h
··· 485 485 unsigned char nmtus; 486 486 unsigned char nports; 487 487 struct pci_dev *pdev; 488 - struct dentry *debugfs_root; 489 488 struct iscsi_transport *itp; 490 489 struct module *owner; 491 490 ··· 498 499 unsigned int rxq_idx_cntr; 499 500 struct cxgbi_ports_map pmap; 500 501 501 - void (*dev_ddp_cleanup)(struct cxgbi_device *); 502 502 struct cxgbi_ppm* (*cdev2ppm)(struct cxgbi_device *); 503 503 int (*csk_ddp_set_map)(struct cxgbi_ppm *, struct cxgbi_sock *, 504 504 struct cxgbi_task_tag_info *); ··· 510 512 unsigned int, int); 511 513 512 514 void (*csk_release_offload_resources)(struct cxgbi_sock *); 513 - int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); 514 515 u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32); 515 516 int (*csk_push_tx_frames)(struct cxgbi_sock *, int); 516 517 void (*csk_send_abort_req)(struct cxgbi_sock *);
+1 -1
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
··· 2421 2421 spin_lock_irqsave(&device->done_lock, flags); 2422 2422 if (test_bit(SAS_HA_FROZEN, &ha->state)) { 2423 2423 spin_unlock_irqrestore(&device->done_lock, flags); 2424 - dev_info(dev, "slot complete: task(%pK) ignored\n ", 2424 + dev_info(dev, "slot complete: task(%pK) ignored\n", 2425 2425 task); 2426 2426 return; 2427 2427 }
+18 -3
drivers/scsi/ibmvscsi/ibmvfc.c
··· 37 37 static u64 max_lun = IBMVFC_MAX_LUN; 38 38 static unsigned int max_targets = IBMVFC_MAX_TARGETS; 39 39 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT; 40 + static u16 max_sectors = IBMVFC_MAX_SECTORS; 40 41 static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH; 41 42 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS; 42 43 static unsigned int ibmvfc_debug = IBMVFC_DEBUG; ··· 84 83 module_param_named(max_requests, max_requests, uint, S_IRUGO); 85 84 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. " 86 85 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]"); 86 + module_param_named(max_sectors, max_sectors, ushort, S_IRUGO); 87 + MODULE_PARM_DESC(max_sectors, "Maximum sectors for this adapter. " 88 + "[Default=" __stringify(IBMVFC_MAX_SECTORS) "]"); 87 89 module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO); 88 90 MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. " 89 91 "[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]"); ··· 1498 1494 memset(login_info, 0, sizeof(*login_info)); 1499 1495 1500 1496 login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX); 1501 - login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9); 1497 + login_info->max_dma_len = cpu_to_be64(max_sectors << 9); 1502 1498 login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu)); 1503 1499 login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp)); 1504 1500 login_info->partition_num = cpu_to_be32(vhost->partition_number); ··· 5234 5230 } 5235 5231 5236 5232 vhost->logged_in = 1; 5237 - npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS); 5233 + npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), max_sectors); 5238 5234 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", 5239 5235 rsp->partition_name, rsp->device_name, rsp->port_loc_code, 5240 5236 rsp->drc_name, npiv_max_sectors); ··· 6333 6329 shost->can_queue = scsi_qdepth; 6334 6330 shost->max_lun = max_lun; 6335 6331 shost->max_id = max_targets; 6336 - shost->max_sectors = IBMVFC_MAX_SECTORS; 6332 + shost->max_sectors = max_sectors; 6337 6333 shost->max_cmd_len = IBMVFC_MAX_CDB_LEN; 6338 6334 shost->unique_id = shost->host_no; 6339 6335 shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1; ··· 6560 6556 **/ 6561 6557 static int __init ibmvfc_module_init(void) 6562 6558 { 6559 + int min_max_sectors = PAGE_SIZE >> 9; 6563 6560 int rc; 6564 6561 6565 6562 if (!firmware_has_feature(FW_FEATURE_VIO)) ··· 6568 6563 6569 6564 printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n", 6570 6565 IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE); 6566 + 6567 + /* 6568 + * Range check the max_sectors module parameter. The upper bounds is 6569 + * implicity checked since the parameter is a ushort. 6570 + */ 6571 + if (max_sectors < min_max_sectors) { 6572 + printk(KERN_ERR IBMVFC_NAME ": max_sectors must be at least %d.\n", 6573 + min_max_sectors); 6574 + max_sectors = min_max_sectors; 6575 + } 6571 6576 6572 6577 ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions); 6573 6578 if (!ibmvfc_transport_template)
+1 -1
drivers/scsi/ibmvscsi/ibmvfc.h
··· 32 32 #define IBMVFC_DEBUG 0 33 33 #define IBMVFC_MAX_TARGETS 1024 34 34 #define IBMVFC_MAX_LUN 0xffffffff 35 - #define IBMVFC_MAX_SECTORS 0xffffu 35 + #define IBMVFC_MAX_SECTORS 2048 36 36 #define IBMVFC_MAX_DISC_THREADS 4 37 37 #define IBMVFC_TGT_MEMPOOL_SZ 64 38 38 #define IBMVFC_MAX_CMDS_PER_LUN 64
+3
drivers/scsi/lpfc/lpfc_bsg.c
··· 3208 3208 cmdiocbq->num_bdes = num_bde; 3209 3209 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; 3210 3210 cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK; 3211 + if (phba->cfg_vmid_app_header) 3212 + cmdiocbq->cmd_flag |= LPFC_IO_VMID; 3213 + 3211 3214 cmdiocbq->vport = phba->pport; 3212 3215 cmdiocbq->cmd_cmpl = NULL; 3213 3216 cmdiocbq->bpl_dmabuf = txbmp;
+17 -5
drivers/scsi/lpfc/lpfc_ct.c
··· 1572 1572 } 1573 1573 } 1574 1574 } else 1575 - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1576 - "3065 GFT_ID failed x%08x\n", ulp_status); 1575 + lpfc_vlog_msg(vport, KERN_WARNING, LOG_DISCOVERY, 1576 + "3065 GFT_ID status x%08x\n", ulp_status); 1577 1577 1578 1578 out: 1579 1579 lpfc_ct_free_iocb(phba, cmdiocb); ··· 1647 1647 } 1648 1648 1649 1649 out: 1650 + /* If the caller wanted a synchronous DA_ID completion, signal the 1651 + * wait obj and clear flag to reset the vport. 1652 + */ 1653 + if (ndlp->save_flags & NLP_WAIT_FOR_DA_ID) { 1654 + if (ndlp->da_id_waitq) 1655 + wake_up(ndlp->da_id_waitq); 1656 + } 1657 + 1658 + spin_lock_irq(&ndlp->lock); 1659 + ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID; 1660 + spin_unlock_irq(&ndlp->lock); 1661 + 1650 1662 lpfc_ct_free_iocb(phba, cmdiocb); 1651 1663 lpfc_nlp_put(ndlp); 1652 1664 return; ··· 2258 2246 } 2259 2247 2260 2248 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2261 - "0229 FDMI cmd %04x failed, latt = %d " 2249 + "0229 FDMI cmd %04x latt = %d " 2262 2250 "ulp_status: x%x, rid x%x\n", 2263 2251 be16_to_cpu(fdmi_cmd), latt, ulp_status, 2264 2252 ulp_word4); ··· 2275 2263 /* Check for a CT LS_RJT response */ 2276 2264 cmd = be16_to_cpu(fdmi_cmd); 2277 2265 if (be16_to_cpu(fdmi_rsp) == SLI_CT_RESPONSE_FS_RJT) { 2278 - /* FDMI rsp failed */ 2266 + /* Log FDMI reject */ 2279 2267 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS, 2280 - "0220 FDMI cmd failed FS_RJT Data: x%x", cmd); 2268 + "0220 FDMI cmd FS_RJT Data: x%x", cmd); 2281 2269 2282 2270 /* Should we fallback to FDMI-2 / FDMI-1 ? */ 2283 2271 switch (cmd) {
+7
drivers/scsi/lpfc/lpfc_disc.h
··· 90 90 NLP_IN_RECOV_POST_DEV_LOSS = 0x1, 91 91 /* wait for outstanding LOGO to cmpl */ 92 92 NLP_WAIT_FOR_LOGO = 0x2, 93 + /* wait for outstanding DA_ID to finish */ 94 + NLP_WAIT_FOR_DA_ID = 0x4 93 95 }; 94 96 95 97 struct lpfc_nodelist { ··· 161 159 uint32_t nvme_fb_size; /* NVME target's supported byte cnt */ 162 160 #define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */ 163 161 uint32_t nlp_defer_did; 162 + 163 + /* These wait objects are NPIV specific. These IOs must complete 164 + * synchronously. 165 + */ 164 166 wait_queue_head_t *logo_waitq; 167 + wait_queue_head_t *da_id_waitq; 165 168 }; 166 169 167 170 struct lpfc_node_rrq {
+63 -69
drivers/scsi/lpfc/lpfc_els.c
··· 979 979 phba->fcoe_cvl_eventtag_attn = 980 980 phba->fcoe_cvl_eventtag; 981 981 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 982 - "2611 FLOGI failed on FCF (x%x), " 982 + "2611 FLOGI FCF (x%x), " 983 983 "status:x%x/x%x, tmo:x%x, perform " 984 984 "roundrobin FCF failover\n", 985 985 phba->fcf.current_rec.fcf_indx, ··· 997 997 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 998 998 ((ulp_word4 & IOERR_PARAM_MASK) == 999 999 IOERR_LOOP_OPEN_FAILURE))) 1000 - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1001 - "2858 FLOGI failure Status:x%x/x%x TMO" 1002 - ":x%x Data x%lx x%x\n", 1003 - ulp_status, ulp_word4, tmo, 1004 - phba->hba_flag, phba->fcf.fcf_flag); 1000 + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 1001 + "2858 FLOGI Status:x%x/x%x TMO" 1002 + ":x%x Data x%lx x%x\n", 1003 + ulp_status, ulp_word4, tmo, 1004 + phba->hba_flag, phba->fcf.fcf_flag); 1005 1005 1006 1006 /* Check for retry */ 1007 1007 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { ··· 1023 1023 lpfc_nlp_put(ndlp); 1024 1024 1025 1025 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 1026 - "0150 FLOGI failure Status:x%x/x%x " 1026 + "0150 FLOGI Status:x%x/x%x " 1027 1027 "xri x%x TMO:x%x refcnt %d\n", 1028 1028 ulp_status, ulp_word4, cmdiocb->sli4_xritag, 1029 1029 tmo, kref_read(&ndlp->kref)); ··· 1032 1032 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1033 1033 ((ulp_word4 & IOERR_PARAM_MASK) == 1034 1034 IOERR_LOOP_OPEN_FAILURE))) { 1035 - /* FLOGI failure */ 1036 - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1037 - "0100 FLOGI failure Status:x%x/x%x " 1038 - "TMO:x%x\n", 1039 - ulp_status, ulp_word4, tmo); 1035 + /* Warn FLOGI status */ 1036 + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 1037 + "0100 FLOGI Status:x%x/x%x " 1038 + "TMO:x%x\n", 1039 + ulp_status, ulp_word4, tmo); 1040 1040 goto flogifail; 1041 1041 } 1042 1042 ··· 1964 1964 1965 1965 if (ulp_status) { 1966 1966 /* Check for retry */ 1967 - /* RRQ failed Don't print the vport to vport rjts */ 1967 + /* Warn RRQ status Don't print the vport to vport rjts */ 1968 1968 if (ulp_status != IOSTAT_LS_RJT || 1969 1969 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 1970 1970 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 1971 1971 (phba)->pport->cfg_log_verbose & LOG_ELS) 1972 - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1973 - "2881 RRQ failure DID:%06X Status:" 1974 - "x%x/x%x\n", 1975 - ndlp->nlp_DID, ulp_status, 1976 - ulp_word4); 1972 + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 1973 + "2881 RRQ DID:%06X Status:" 1974 + "x%x/x%x\n", 1975 + ndlp->nlp_DID, ulp_status, 1976 + ulp_word4); 1977 1977 } 1978 1978 1979 1979 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); ··· 2077 2077 } 2078 2078 goto out; 2079 2079 } 2080 - /* PLOGI failed Don't print the vport to vport rjts */ 2080 + /* Warn PLOGI status Don't print the vport to vport rjts */ 2081 2081 if (ulp_status != IOSTAT_LS_RJT || 2082 2082 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 2083 2083 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 2084 2084 (phba)->pport->cfg_log_verbose & LOG_ELS) 2085 - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2086 - "2753 PLOGI failure DID:%06X " 2087 - "Status:x%x/x%x\n", 2088 - ndlp->nlp_DID, ulp_status, 2089 - ulp_word4); 2085 + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 2086 + "2753 PLOGI DID:%06X " 2087 + "Status:x%x/x%x\n", 2088 + ndlp->nlp_DID, ulp_status, 2089 + ulp_word4); 2090 2090 2091 2091 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2092 2092 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) ··· 2323 2323 struct lpfc_vport *vport = cmdiocb->vport; 2324 2324 struct lpfc_nodelist *ndlp; 2325 2325 char *mode; 2326 - u32 loglevel; 2327 2326 u32 ulp_status; 2328 2327 u32 ulp_word4; 2329 2328 bool release_node = false; ··· 2371 2372 * could be expected. 2372 2373 */ 2373 2374 if (test_bit(FC_FABRIC, &vport->fc_flag) || 2374 - vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH) { 2375 - mode = KERN_ERR; 2376 - loglevel = LOG_TRACE_EVENT; 2377 - } else { 2375 + vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH) 2376 + mode = KERN_WARNING; 2377 + else 2378 2378 mode = KERN_INFO; 2379 - loglevel = LOG_ELS; 2380 - } 2381 2379 2382 - /* PRLI failed */ 2383 - lpfc_printf_vlog(vport, mode, loglevel, 2384 - "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2380 + /* Warn PRLI status */ 2381 + lpfc_printf_vlog(vport, mode, LOG_ELS, 2382 + "2754 PRLI DID:%06X Status:x%x/x%x, " 2385 2383 "data: x%x x%x x%x\n", 2386 2384 ndlp->nlp_DID, ulp_status, 2387 2385 ulp_word4, ndlp->nlp_state, ··· 2850 2854 } 2851 2855 goto out; 2852 2856 } 2853 - /* ADISC failed */ 2854 - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2855 - "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2856 - ndlp->nlp_DID, ulp_status, 2857 - ulp_word4); 2857 + /* Warn ADISC status */ 2858 + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 2859 + "2755 ADISC DID:%06X Status:x%x/x%x\n", 2860 + ndlp->nlp_DID, ulp_status, 2861 + ulp_word4); 2858 2862 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2859 2863 NLP_EVT_CMPL_ADISC); 2860 2864 ··· 3041 3045 * discovery. The PLOGI will retry. 3042 3046 */ 3043 3047 if (ulp_status) { 3044 - /* LOGO failed */ 3045 - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3046 - "2756 LOGO failure, No Retry DID:%06X " 3047 - "Status:x%x/x%x\n", 3048 - ndlp->nlp_DID, ulp_status, 3049 - ulp_word4); 3048 + /* Warn LOGO status */ 3049 + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 3050 + "2756 LOGO, No Retry DID:%06X " 3051 + "Status:x%x/x%x\n", 3052 + ndlp->nlp_DID, ulp_status, 3053 + ulp_word4); 3050 3054 3051 3055 if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 3052 3056 skip_recovery = 1; ··· 4833 4837 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4834 4838 (cmd == ELS_CMD_FDISC) && 4835 4839 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4836 - lpfc_printf_vlog(vport, KERN_ERR, 4837 - LOG_TRACE_EVENT, 4838 - "0125 FDISC Failed (x%x). " 4839 - "Fabric out of resources\n", 4840 - stat.un.lsRjtError); 4840 + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 4841 + "0125 FDISC (x%x). " 4842 + "Fabric out of resources\n", 4843 + stat.un.lsRjtError); 4841 4844 lpfc_vport_set_state(vport, 4842 4845 FC_VPORT_NO_FABRIC_RSCS); 4843 4846 } ··· 4872 4877 LSEXP_NOTHING_MORE) { 4873 4878 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4874 4879 retry = 1; 4875 - lpfc_printf_vlog(vport, KERN_ERR, 4876 - LOG_TRACE_EVENT, 4877 - "0820 FLOGI Failed (x%x). " 4878 - "BBCredit Not Supported\n", 4879 - stat.un.lsRjtError); 4880 + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 4881 + "0820 FLOGI (x%x). " 4882 + "BBCredit Not Supported\n", 4883 + stat.un.lsRjtError); 4880 4884 } 4881 4885 break; 4882 4886 ··· 4885 4891 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4886 4892 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4887 4893 ) { 4888 - lpfc_printf_vlog(vport, KERN_ERR, 4889 - LOG_TRACE_EVENT, 4890 - "0122 FDISC Failed (x%x). " 4891 - "Fabric Detected Bad WWN\n", 4892 - stat.un.lsRjtError); 4894 + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 4895 + "0122 FDISC (x%x). " 4896 + "Fabric Detected Bad WWN\n", 4897 + stat.un.lsRjtError); 4893 4898 lpfc_vport_set_state(vport, 4894 4899 FC_VPORT_FABRIC_REJ_WWN); 4895 4900 } ··· 5348 5355 u32 ulp_status, ulp_word4, tmo, did, iotag; 5349 5356 5350 5357 if (!vport) { 5351 - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5352 - "3177 ELS response failed\n"); 5358 + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 5359 + "3177 null vport in ELS rsp\n"); 5353 5360 goto out; 5354 5361 } 5355 5362 if (cmdiocb->context_un.mbox) ··· 9651 9658 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err) 9652 9659 continue; 9653 9660 9654 - /* On the ELS ring we can have ELS_REQUESTs or 9655 - * GEN_REQUESTs waiting for a response. 9661 + /* On the ELS ring we can have ELS_REQUESTs, ELS_RSPs, 9662 + * or GEN_REQUESTs waiting for a CQE response. 9656 9663 */ 9657 9664 ulp_command = get_job_cmnd(phba, piocb); 9658 - if (ulp_command == CMD_ELS_REQUEST64_CR) { 9665 + if (ulp_command == CMD_ELS_REQUEST64_WQE || 9666 + ulp_command == CMD_XMIT_ELS_RSP64_WQE) { 9659 9667 list_add_tail(&piocb->dlist, &abort_list); 9660 9668 9661 9669 /* If the link is down when flushing ELS commands ··· 11321 11327 /* Check for retry */ 11322 11328 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 11323 11329 goto out; 11324 - /* FDISC failed */ 11325 - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11326 - "0126 FDISC failed. (x%x/x%x)\n", 11327 - ulp_status, ulp_word4); 11330 + /* Warn FDISC status */ 11331 + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 11332 + "0126 FDISC cmpl status: x%x/x%x)\n", 11333 + ulp_status, ulp_word4); 11328 11334 goto fdisc_failed; 11329 11335 } 11330 11336
+4 -6
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 527 527 * the following lpfc_nlp_put is necessary after fabric node is 528 528 * recovered. 529 529 */ 530 + spin_lock_irqsave(&ndlp->lock, iflags); 531 + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; 532 + spin_unlock_irqrestore(&ndlp->lock, iflags); 530 533 if (recovering) { 531 534 lpfc_printf_vlog(vport, KERN_INFO, 532 535 LOG_DISCOVERY | LOG_NODE, ··· 542 539 spin_lock_irqsave(&ndlp->lock, iflags); 543 540 ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS; 544 541 spin_unlock_irqrestore(&ndlp->lock, iflags); 542 + return fcf_inuse; 545 543 } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 546 544 /* Fabric node fully recovered before this dev_loss_tmo 547 545 * queue work is processed. Thus, ignore the ··· 556 552 ndlp->nlp_DID, kref_read(&ndlp->kref), 557 553 ndlp, ndlp->nlp_flag, 558 554 vport->port_state); 559 - spin_lock_irqsave(&ndlp->lock, iflags); 560 - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; 561 - spin_unlock_irqrestore(&ndlp->lock, iflags); 562 555 return fcf_inuse; 563 556 } 564 557 565 - spin_lock_irqsave(&ndlp->lock, iflags); 566 - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; 567 - spin_unlock_irqrestore(&ndlp->lock, iflags); 568 558 lpfc_nlp_put(ndlp); 569 559 return fcf_inuse; 570 560 }
+21
drivers/scsi/lpfc/lpfc_hw.h
··· 562 562 #include <uapi/scsi/fc/fc_els.h> 563 563 564 564 /* 565 + * Application Header 566 + */ 567 + struct fc_app_header { 568 + uint32_t dst_app_id; 569 + uint32_t src_app_id; 570 + #define LOOPBACK_SRC_APPID 0x4321 571 + uint32_t word2; 572 + uint32_t word3; 573 + }; 574 + 575 + /* 576 + * dfctl optional header definition 577 + */ 578 + enum lpfc_fc_dfctl { 579 + LPFC_FC_NO_DEVICE_HEADER, 580 + LPFC_FC_16B_DEVICE_HEADER, 581 + LPFC_FC_32B_DEVICE_HEADER, 582 + LPFC_FC_64B_DEVICE_HEADER, 583 + }; 584 + 585 + /* 565 586 * Extended Link Service LS_COMMAND codes (Payload Word 0) 566 587 */ 567 588 #ifdef __BIG_ENDIAN_BITFIELD
+3
drivers/scsi/lpfc/lpfc_hw4.h
··· 4847 4847 #define cmd_buff_len_SHIFT 16 4848 4848 #define cmd_buff_len_MASK 0x00000ffff 4849 4849 #define cmd_buff_len_WORD word3 4850 + /* Note: payload_offset_len field depends on ASIC support */ 4850 4851 #define payload_offset_len_SHIFT 0 4851 4852 #define payload_offset_len_MASK 0x0000ffff 4852 4853 #define payload_offset_len_WORD word3 ··· 4864 4863 #define cmd_buff_len_SHIFT 16 4865 4864 #define cmd_buff_len_MASK 0x00000ffff 4866 4865 #define cmd_buff_len_WORD word3 4866 + /* Note: payload_offset_len field depends on ASIC support */ 4867 4867 #define payload_offset_len_SHIFT 0 4868 4868 #define payload_offset_len_MASK 0x0000ffff 4869 4869 #define payload_offset_len_WORD word3 ··· 4881 4879 #define cmd_buff_len_SHIFT 16 4882 4880 #define cmd_buff_len_MASK 0x00000ffff 4883 4881 #define cmd_buff_len_WORD word3 4882 + /* Note: payload_offset_len field depends on ASIC support */ 4884 4883 #define payload_offset_len_SHIFT 0 4885 4884 #define payload_offset_len_MASK 0x0000ffff 4886 4885 #define payload_offset_len_WORD word3
+27 -5
drivers/scsi/lpfc/lpfc_init.c
··· 4699 4699 uint64_t wwn; 4700 4700 bool use_no_reset_hba = false; 4701 4701 int rc; 4702 + u8 if_type; 4702 4703 4703 4704 if (lpfc_no_hba_reset_cnt) { 4704 4705 if (phba->sli_rev < LPFC_SLI_REV4 && ··· 4774 4773 shost->max_id = LPFC_MAX_TARGET; 4775 4774 shost->max_lun = vport->cfg_max_luns; 4776 4775 shost->this_id = -1; 4777 - if (phba->sli_rev == LPFC_SLI_REV4) 4778 - shost->max_cmd_len = LPFC_FCP_CDB_LEN_32; 4779 - else 4776 + 4777 + /* Set max_cmd_len applicable to ASIC support */ 4778 + if (phba->sli_rev == LPFC_SLI_REV4) { 4779 + if_type = bf_get(lpfc_sli_intf_if_type, 4780 + &phba->sli4_hba.sli_intf); 4781 + switch (if_type) { 4782 + case LPFC_SLI_INTF_IF_TYPE_2: 4783 + fallthrough; 4784 + case LPFC_SLI_INTF_IF_TYPE_6: 4785 + shost->max_cmd_len = LPFC_FCP_CDB_LEN_32; 4786 + break; 4787 + default: 4788 + shost->max_cmd_len = LPFC_FCP_CDB_LEN; 4789 + break; 4790 + } 4791 + } else { 4780 4792 shost->max_cmd_len = LPFC_FCP_CDB_LEN; 4793 + } 4781 4794 4782 4795 if (phba->sli_rev == LPFC_SLI_REV4) { 4783 4796 if (!phba->cfg_fcp_mq_threshold || ··· 10451 10436 struct lpfc_vector_map_info *cpup; 10452 10437 struct lpfc_vector_map_info *eqcpup; 10453 10438 struct lpfc_eq_intr_info *eqi; 10439 + u32 wqesize; 10454 10440 10455 10441 /* 10456 10442 * Create HBA Record arrays. ··· 10671 10655 * Create ELS Work Queues 10672 10656 */ 10673 10657 10674 - /* Create slow-path ELS Work Queue */ 10658 + /* 10659 + * Create slow-path ELS Work Queue. 10660 + * Increase the ELS WQ size when WQEs contain an embedded cdb 10661 + */ 10662 + wqesize = (phba->fcp_embed_io) ? 10663 + LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 10664 + 10675 10665 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10676 - phba->sli4_hba.wq_esize, 10666 + wqesize, 10677 10667 phba->sli4_hba.wq_ecount, cpu); 10678 10668 if (!qdesc) { 10679 10669 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+1 -1
drivers/scsi/lpfc/lpfc_scsi.c
··· 4760 4760 4761 4761 /* Word 3 */ 4762 4762 bf_set(payload_offset_len, &wqe->fcp_icmd, 4763 - sizeof(struct fcp_cmnd32) + sizeof(struct fcp_rsp)); 4763 + sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 4764 4764 4765 4765 /* Word 6 */ 4766 4766 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+45 -7
drivers/scsi/lpfc/lpfc_sli.c
··· 1940 1940 atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0); 1941 1941 wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0); 1942 1942 1943 + spin_lock_irqsave(&phba->hbalock, iflags); 1944 + 1943 1945 /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */ 1944 1946 if (phba->cmf_active_mode != LPFC_CFG_MANAGED || 1945 - phba->link_state == LPFC_LINK_DOWN) 1946 - return 0; 1947 + phba->link_state < LPFC_LINK_UP) { 1948 + ret_val = 0; 1949 + goto out_unlock; 1950 + } 1947 1951 1948 - spin_lock_irqsave(&phba->hbalock, iflags); 1949 1952 sync_buf = __lpfc_sli_get_iocbq(phba); 1950 1953 if (!sync_buf) { 1951 1954 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, ··· 8821 8818 rc = lpfc_sli4_queue_setup(phba); 8822 8819 if (unlikely(rc)) { 8823 8820 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8824 - "0381 Error %d during queue setup.\n ", rc); 8821 + "0381 Error %d during queue setup.\n", rc); 8825 8822 goto out_stop_timers; 8826 8823 } 8827 8824 /* Initialize the driver internal SLI layer lists. */ ··· 11093 11090 /* Word 9 */ 11094 11091 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id); 11095 11092 11096 - /* Word 12 */ 11097 - if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) 11093 + if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) { 11094 + /* Word 10 */ 11095 + if (cmdiocbq->cmd_flag & LPFC_IO_VMID) { 11096 + bf_set(wqe_appid, &wqe->xmit_sequence.wqe_com, 1); 11097 + bf_set(wqe_wqes, &wqe->xmit_sequence.wqe_com, 1); 11098 + wqe->words[31] = LOOPBACK_SRC_APPID; 11099 + } 11100 + 11101 + /* Word 12 */ 11098 11102 wqe->xmit_sequence.xmit_len = full_size; 11103 + } 11099 11104 else 11100 11105 wqe->xmit_sequence.xmit_len = 11101 11106 wqe->xmit_sequence.bde.tus.f.bdeSize; ··· 18442 18431 { 18443 18432 /* make rctl_names static to save stack space */ 18444 18433 struct fc_vft_header *fc_vft_hdr; 18434 + struct fc_app_header *fc_app_hdr; 18445 18435 uint32_t *header = (uint32_t *) fc_hdr; 18446 18436 18447 18437 #define FC_RCTL_MDS_DIAGS 0xF4 ··· 18496 18484 case FC_TYPE_ILS: 18497 18485 default: 18498 18486 goto drop; 18487 + } 18488 + 18489 + if (unlikely(phba->link_flag == LS_LOOPBACK_MODE && 18490 + phba->cfg_vmid_app_header)) { 18491 + /* Application header is 16B device header */ 18492 + if (fc_hdr->fh_df_ctl & LPFC_FC_16B_DEVICE_HEADER) { 18493 + fc_app_hdr = (struct fc_app_header *) (fc_hdr + 1); 18494 + if (be32_to_cpu(fc_app_hdr->src_app_id) != 18495 + LOOPBACK_SRC_APPID) { 18496 + lpfc_printf_log(phba, KERN_WARNING, 18497 + LOG_ELS | LOG_LIBDFC, 18498 + "1932 Loopback src app id " 18499 + "not matched, app_id:x%x\n", 18500 + be32_to_cpu(fc_app_hdr->src_app_id)); 18501 + 18502 + goto drop; 18503 + } 18504 + } else { 18505 + lpfc_printf_log(phba, KERN_WARNING, 18506 + LOG_ELS | LOG_LIBDFC, 18507 + "1933 Loopback df_ctl bit not set, " 18508 + "df_ctl:x%x\n", 18509 + fc_hdr->fh_df_ctl); 18510 + 18511 + goto drop; 18512 + } 18499 18513 } 18500 18514 18501 18515 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, ··· 21187 21149 if (!piocbq) { 21188 21150 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21189 21151 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 21190 - "2823 txq empty and txq_cnt is %d\n ", 21152 + "2823 txq empty and txq_cnt is %d\n", 21191 21153 txq_cnt); 21192 21154 break; 21193 21155 }
+1 -1
drivers/scsi/lpfc/lpfc_version.h
··· 20 20 * included with this package. * 21 21 *******************************************************************/ 22 22 23 - #define LPFC_DRIVER_VERSION "14.4.0.4" 23 + #define LPFC_DRIVER_VERSION "14.4.0.5" 24 24 #define LPFC_DRIVER_NAME "lpfc" 25 25 26 26 /* Used for SLI 2/3 */
+36 -7
drivers/scsi/lpfc/lpfc_vport.c
··· 626 626 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 627 627 struct lpfc_hba *phba = vport->phba; 628 628 int rc; 629 + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 629 630 630 631 if (vport->port_type == LPFC_PHYSICAL_PORT) { 631 632 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, ··· 680 679 if (!ndlp) 681 680 goto skip_logo; 682 681 682 + /* Send the DA_ID and Fabric LOGO to cleanup the NPIV fabric entries. */ 683 683 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && 684 684 phba->link_state >= LPFC_LINK_UP && 685 685 phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 686 686 if (vport->cfg_enable_da_id) { 687 - /* Send DA_ID and wait for a completion. */ 687 + /* Send DA_ID and wait for a completion. This is best 688 + * effort. If the DA_ID fails, likely the fabric will 689 + * "leak" NportIDs but at least the driver issued the 690 + * command. 691 + */ 692 + ndlp = lpfc_findnode_did(vport, NameServer_DID); 693 + if (!ndlp) 694 + goto issue_logo; 695 + 696 + spin_lock_irq(&ndlp->lock); 697 + ndlp->da_id_waitq = &waitq; 698 + ndlp->save_flags |= NLP_WAIT_FOR_DA_ID; 699 + spin_unlock_irq(&ndlp->lock); 700 + 688 701 rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0); 689 - if (rc) { 690 - lpfc_printf_log(vport->phba, KERN_WARNING, 691 - LOG_VPORT, 692 - "1829 CT command failed to " 693 - "delete objects on fabric, " 694 - "rc %d\n", rc); 702 + if (!rc) { 703 + wait_event_timeout(waitq, 704 + !(ndlp->save_flags & NLP_WAIT_FOR_DA_ID), 705 + msecs_to_jiffies(phba->fc_ratov * 2000)); 695 706 } 707 + 708 + lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS, 709 + "1829 DA_ID issue status %d. " 710 + "SFlag x%x NState x%x, NFlag x%x " 711 + "Rpi x%x\n", 712 + rc, ndlp->save_flags, ndlp->nlp_state, 713 + ndlp->nlp_flag, ndlp->nlp_rpi); 714 + 715 + /* Remove the waitq and save_flags. It no 716 + * longer matters if the wake happened. 717 + */ 718 + spin_lock_irq(&ndlp->lock); 719 + ndlp->da_id_waitq = NULL; 720 + ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID; 721 + spin_unlock_irq(&ndlp->lock); 696 722 } 697 723 724 + issue_logo: 698 725 /* 699 726 * If the vpi is not registered, then a valid FDISC doesn't 700 727 * exist and there is no need for a ELS LOGO. Just cleanup
+1 -1
drivers/scsi/megaraid/megaraid_sas_base.c
··· 6380 6380 GFP_KERNEL); 6381 6381 if (!fusion->stream_detect_by_ld[i]) { 6382 6382 dev_err(&instance->pdev->dev, 6383 - "unable to allocate stream detect by LD\n "); 6383 + "unable to allocate stream detect by LD\n"); 6384 6384 for (j = 0; j < i; ++j) 6385 6385 kfree(fusion->stream_detect_by_ld[j]); 6386 6386 kfree(fusion->stream_detect_by_ld);
+32 -3
drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
··· 67 67 #define MPI3_SECURITY_PGAD_SLOT_GROUP_MASK (0x0000ff00) 68 68 #define MPI3_SECURITY_PGAD_SLOT_GROUP_SHIFT (8) 69 69 #define MPI3_SECURITY_PGAD_SLOT_MASK (0x000000ff) 70 + #define MPI3_INSTANCE_PGAD_INSTANCE_MASK (0x0000ffff) 70 71 struct mpi3_config_request { 71 72 __le16 host_tag; 72 73 u8 ioc_use_only02; ··· 76 75 u8 ioc_use_only06; 77 76 u8 msg_flags; 78 77 __le16 change_count; 79 - __le16 reserved0a; 78 + u8 proxy_ioc_number; 79 + u8 reserved0b; 80 80 u8 page_version; 81 81 u8 page_number; 82 82 u8 page_type; ··· 208 206 #define MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT (0x00b5) 209 207 #define MPI3_MFGPAGE_DEVID_SAS5116_NVME_MGMT (0x00b6) 210 208 #define MPI3_MFGPAGE_DEVID_SAS5116_PCIE_SWITCH (0x00b8) 209 + #define MPI3_MFGPAGE_DEVID_SAS5248_MPI (0x00f0) 210 + #define MPI3_MFGPAGE_DEVID_SAS5248_MPI_NS (0x00f1) 211 + #define MPI3_MFGPAGE_DEVID_SAS5248_PCIE_SWITCH (0x00f2) 211 212 struct mpi3_man_page0 { 212 213 struct mpi3_config_page_header header; 213 214 u8 chip_revision[8]; ··· 1079 1074 #define MPI3_IOUNIT8_SBSTATE_SVN_UPDATE_PENDING (0x04) 1080 1075 #define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02) 1081 1076 #define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01) 1077 + #define MPI3_IOUNIT8_SBMODE_CURRENT_KEY_IOUNIT17 (0x10) 1078 + #define MPI3_IOUNIT8_SBMODE_HARD_SECURE_RECERTIFIED (0x08) 1082 1079 struct mpi3_io_unit_page9 { 1083 1080 struct mpi3_config_page_header header; 1084 1081 __le32 flags; ··· 1096 1089 #define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_BACKPLANE_TYPE (0x00000004) 1097 1090 #define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x00000001) 1098 1091 #define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff) 1092 + #define MPI3_IOUNIT9_FIRSTDEVICE_IN_DRIVER_PAGE_0 (0xfffe) 1093 + 1099 1094 struct mpi3_io_unit_page10 { 1100 1095 struct mpi3_config_page_header header; 1101 1096 u8 flags; ··· 1233 1224 #define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITHOUT_POWER_BRAKE_GPIO (0x01) 1234 1225 #define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITH_POWER_BRAKE_GPIO (0x02) 1235 1226 #define MPI3_IOUNIT15_NUMPOWERBUDGETDATA_POWER_BUDGETING_DISABLED (0x00) 1227 + 1228 + struct mpi3_io_unit_page17 { 1229 + struct mpi3_config_page_header header; 1230 + u8 num_instances; 1231 + u8 instance; 1232 + __le16 reserved0a; 1233 + __le32 reserved0c[4]; 1234 + __le16 key_length; 1235 + u8 encryption_algorithm; 1236 + u8 reserved1f; 1237 + __le32 current_key[]; 1238 + }; 1239 + #define MPI3_IOUNIT17_PAGEVERSION (0x00) 1236 1240 struct mpi3_ioc_page0 { 1237 1241 struct mpi3_config_page_header header; 1238 1242 __le32 reserved08; ··· 1333 1311 u8 tur_interval; 1334 1312 u8 reserved10; 1335 1313 u8 security_key_timeout; 1336 - __le16 reserved12; 1314 + __le16 first_device; 1337 1315 __le32 reserved14; 1338 1316 __le32 reserved18; 1339 1317 }; ··· 1346 1324 #define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000) 1347 1325 #define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001) 1348 1326 #define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_INTERNAL_DEVS (0x00000002) 1327 + #define MPI3_DRIVER0_FIRSTDEVICE_IGNORE1 (0x0000) 1328 + #define MPI3_DRIVER0_FIRSTDEVICE_IGNORE2 (0xffff) 1349 1329 struct mpi3_driver_page1 { 1350 1330 struct mpi3_config_page_header header; 1351 1331 __le32 flags; 1352 - __le32 reserved0c; 1332 + u8 time_stamp_update; 1333 + u8 reserved0d[3]; 1353 1334 __le16 host_diag_trace_max_size; 1354 1335 __le16 host_diag_trace_min_size; 1355 1336 __le16 host_diag_trace_decrement_size; ··· 2372 2347 #define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001) 2373 2348 #define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK (0xf000) 2374 2349 #define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_SHIFT (12) 2350 + #define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_MASK (0x0003) 2351 + #define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_HDD (0x0000) 2352 + #define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_SSD (0x0001) 2353 + #define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_NO_GUIDANCE (0x0002) 2375 2354 union mpi3_device0_dev_spec_format { 2376 2355 struct mpi3_device0_sas_sata_format sas_sata_format; 2377 2356 struct mpi3_device0_pcie_format pcie_format;
+10 -3
drivers/scsi/mpi3mr/mpi/mpi30_image.h
··· 205 205 u8 hash_image_type; 206 206 u8 hash_algorithm; 207 207 u8 encryption_algorithm; 208 - u8 reserved03; 208 + u8 flags; 209 209 __le16 public_key_size; 210 210 __le16 signature_size; 211 211 __le32 public_key[MPI3_PUBLIC_KEY_MAX]; 212 212 }; 213 - 214 - #define MPI3_HASH_IMAGE_TYPE_KEY_WITH_SIGNATURE (0x03) 213 + #define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH (0x03) 214 + #define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_1_OF_2 (0x04) 215 + #define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_2_OF_2 (0x05) 215 216 #define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0) 216 217 #define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00) 217 218 #define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20) ··· 230 229 #define MPI3_ENCRYPTION_ALGORITHM_RSA2048 (0x04) 231 230 #define MPI3_ENCRYPTION_ALGORITHM_RSA4096 (0x05) 232 231 #define MPI3_ENCRYPTION_ALGORITHM_RSA3072 (0x06) 232 + 233 + /* hierarchical signature system (hss) */ 234 + #define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_87 (0x0b) 235 + #define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_65 (0x0c) 236 + #define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_44 (0x0d) 237 + #define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_MASK (0x0f) 233 238 234 239 #ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX 235 240 #define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1)
+8
drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
··· 39 39 #define MPI3_WHOINIT_HOST_DRIVER (0x03) 40 40 #define MPI3_WHOINIT_MANUFACTURER (0x04) 41 41 42 + #define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_MASK (0x00000003) 43 + #define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_GUIDANCE (0x00000000) 44 + #define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_SPECIAL (0x00000001) 45 + #define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_HDD (0x00000002) 46 + #define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_SSD (0x00000003) 47 + 42 48 struct mpi3_ioc_facts_request { 43 49 __le16 host_tag; 44 50 u8 ioc_use_only02; ··· 146 140 #define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020) 147 141 #define MPI3_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0010) 148 142 #define MPI3_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0008) 143 + #define MPI3_IOCFACTS_EXCEPT_BLOCKING_BOOT_EVENT (0x0004) 144 + #define MPI3_IOCFACTS_EXCEPT_SECURITY_SELFTEST_FAILURE (0x0002) 149 145 #define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001) 150 146 #define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000) 151 147 #define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001)
+3 -1
drivers/scsi/mpi3mr/mpi/mpi30_transport.h
··· 18 18 19 19 #define MPI3_VERSION_MAJOR (3) 20 20 #define MPI3_VERSION_MINOR (0) 21 - #define MPI3_VERSION_UNIT (31) 21 + #define MPI3_VERSION_UNIT (34) 22 22 #define MPI3_VERSION_DEV (0) 23 23 #define MPI3_DEVHANDLE_INVALID (0xffff) 24 24 struct mpi3_sysif_oper_queue_indexes { ··· 158 158 #define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000f004) 159 159 #define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000f005) 160 160 #define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000f006) 161 + #define MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER (0x0000f007) 161 162 #define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14) 162 163 #define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18) 163 164 #define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c) ··· 411 410 #define MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006) 412 411 #define MPI3_IOCSTATUS_INVALID_FIELD (0x0007) 413 412 #define MPI3_IOCSTATUS_INVALID_STATE (0x0008) 413 + #define MPI3_IOCSTATUS_SHUTDOWN_ACTIVE (0x0009) 414 414 #define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a) 415 415 #define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b) 416 416 #define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000c)
+6 -4
drivers/scsi/mpi3mr/mpi3mr.h
··· 57 57 extern int prot_mask; 58 58 extern atomic64_t event_counter; 59 59 60 - #define MPI3MR_DRIVER_VERSION "8.10.0.5.50" 61 - #define MPI3MR_DRIVER_RELDATE "08-Aug-2024" 60 + #define MPI3MR_DRIVER_VERSION "8.12.0.0.50" 61 + #define MPI3MR_DRIVER_RELDATE "05-Sept-2024" 62 62 63 63 #define MPI3MR_DRIVER_NAME "mpi3mr" 64 64 #define MPI3MR_DRIVER_LICENSE "GPL" ··· 178 178 #define MPI3MR_DEFAULT_SDEV_QD 32 179 179 180 180 /* Definitions for Threaded IRQ poll*/ 181 - #define MPI3MR_IRQ_POLL_SLEEP 2 181 + #define MPI3MR_IRQ_POLL_SLEEP 20 182 182 #define MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT 8 183 183 184 184 /* Definitions for the controller security status*/ ··· 1090 1090 * @evtack_cmds_bitmap: Event Ack bitmap 1091 1091 * @delayed_evtack_cmds_list: Delayed event acknowledgment list 1092 1092 * @ts_update_counter: Timestamp update counter 1093 + * @ts_update_interval: Timestamp update interval 1093 1094 * @reset_in_progress: Reset in progress flag 1094 1095 * @unrecoverable: Controller unrecoverable flag 1095 1096 * @prev_reset_result: Result of previous reset ··· 1278 1277 unsigned long *evtack_cmds_bitmap; 1279 1278 struct list_head delayed_evtack_cmds_list; 1280 1279 1281 - u32 ts_update_counter; 1280 + u16 ts_update_counter; 1281 + u16 ts_update_interval; 1282 1282 u8 reset_in_progress; 1283 1283 u8 unrecoverable; 1284 1284 int prev_reset_result;
+62 -17
drivers/scsi/mpi3mr/mpi3mr_fw.c
··· 728 728 mpi3mr_process_op_reply_q(mrioc, 729 729 intr_info->op_reply_q); 730 730 731 - usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP); 731 + usleep_range(MPI3MR_IRQ_POLL_SLEEP, MPI3MR_IRQ_POLL_SLEEP + 1); 732 732 733 733 } while (atomic_read(&intr_info->op_reply_q->pend_ios) && 734 734 (num_op_reply < mrioc->max_host_ios)); ··· 1362 1362 int retval = 0; 1363 1363 enum mpi3mr_iocstate ioc_state; 1364 1364 u64 base_info; 1365 + u8 retry = 0; 1366 + u64 start_time, elapsed_time_sec; 1367 + 1368 + retry_bring_ioc_ready: 1365 1369 1366 1370 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1367 1371 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); ··· 1384 1380 ioc_info(mrioc, "controller is in %s state during detection\n", 1385 1381 mpi3mr_iocstate_name(ioc_state)); 1386 1382 1387 - if (ioc_state == MRIOC_STATE_BECOMING_READY || 1388 - ioc_state == MRIOC_STATE_RESET_REQUESTED) { 1389 - timeout = mrioc->ready_timeout * 10; 1390 - do { 1391 - msleep(100); 1392 - } while (--timeout); 1383 + timeout = mrioc->ready_timeout * 10; 1384 + 1385 + do { 1386 + ioc_state = mpi3mr_get_iocstate(mrioc); 1387 + 1388 + if (ioc_state != MRIOC_STATE_BECOMING_READY && 1389 + ioc_state != MRIOC_STATE_RESET_REQUESTED) 1390 + break; 1393 1391 1394 1392 if (!pci_device_is_present(mrioc->pdev)) { 1395 1393 mrioc->unrecoverable = 1; 1396 - ioc_err(mrioc, 1397 - "controller is not present while waiting to reset\n"); 1398 - retval = -1; 1394 + ioc_err(mrioc, "controller is not present while waiting to reset\n"); 1399 1395 goto out_device_not_present; 1400 1396 } 1401 1397 1402 - ioc_state = mpi3mr_get_iocstate(mrioc); 1403 - ioc_info(mrioc, 1404 - "controller is in %s state after waiting to reset\n", 1405 - mpi3mr_iocstate_name(ioc_state)); 1406 - } 1398 + msleep(100); 1399 + } while (--timeout); 1407 1400 1408 1401 if (ioc_state == MRIOC_STATE_READY) { 1409 1402 ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n"); ··· 1461 1460 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 1462 1461 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1463 1462 1463 + if (retry == 0) 1464 + start_time = jiffies; 1465 + 1464 1466 timeout = mrioc->ready_timeout * 10; 1465 1467 do { 1466 1468 ioc_state = mpi3mr_get_iocstate(mrioc); ··· 1473 1469 mpi3mr_iocstate_name(ioc_state)); 1474 1470 return 0; 1475 1471 } 1472 + ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1473 + if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 1474 + (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 1475 + mpi3mr_print_fault_info(mrioc); 1476 + goto out_failed; 1477 + } 1476 1478 if (!pci_device_is_present(mrioc->pdev)) { 1477 1479 mrioc->unrecoverable = 1; 1478 1480 ioc_err(mrioc, ··· 1487 1477 goto out_device_not_present; 1488 1478 } 1489 1479 msleep(100); 1490 - } while (--timeout); 1480 + elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000; 1481 + } while (elapsed_time_sec < mrioc->ready_timeout); 1491 1482 1492 1483 out_failed: 1484 + elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000; 1485 + if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) { 1486 + retry++; 1487 + 1488 + ioc_warn(mrioc, "retrying to bring IOC ready, retry_count:%d\n" 1489 + " elapsed time =%llu\n", retry, elapsed_time_sec); 1490 + 1491 + goto retry_bring_ioc_ready; 1492 + } 1493 1493 ioc_state = mpi3mr_get_iocstate(mrioc); 1494 1494 ioc_err(mrioc, 1495 1495 "failed to bring to ready state, current state: %s\n", ··· 2691 2671 return; 2692 2672 } 2693 2673 2694 - if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) { 2674 + if (mrioc->ts_update_counter++ >= mrioc->ts_update_interval) { 2695 2675 mrioc->ts_update_counter = 0; 2696 2676 mpi3mr_sync_timestamp(mrioc); 2697 2677 } ··· 3865 3845 } 3866 3846 3867 3847 /** 3848 + * mpi3mr_read_tsu_interval - Update time stamp interval 3849 + * @mrioc: Adapter instance reference 3850 + * 3851 + * Update time stamp interval if its defined in driver page 1, 3852 + * otherwise use default value. 3853 + * 3854 + * Return: Nothing 3855 + */ 3856 + static void 3857 + mpi3mr_read_tsu_interval(struct mpi3mr_ioc *mrioc) 3858 + { 3859 + struct mpi3_driver_page1 driver_pg1; 3860 + u16 pg_sz = sizeof(driver_pg1); 3861 + int retval = 0; 3862 + 3863 + mrioc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL; 3864 + 3865 + retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz); 3866 + if (!retval && driver_pg1.time_stamp_update) 3867 + mrioc->ts_update_interval = (driver_pg1.time_stamp_update * 60); 3868 + } 3869 + 3870 + /** 3868 3871 * mpi3mr_print_ioc_info - Display controller information 3869 3872 * @mrioc: Adapter instance reference 3870 3873 * ··· 4183 4140 goto out_failed_noretry; 4184 4141 } 4185 4142 4143 + mpi3mr_read_tsu_interval(mrioc); 4186 4144 mpi3mr_print_ioc_info(mrioc); 4187 4145 4188 4146 if (!mrioc->cfg_page) { ··· 4365 4321 goto out_failed_noretry; 4366 4322 } 4367 4323 4324 + mpi3mr_read_tsu_interval(mrioc); 4368 4325 mpi3mr_print_ioc_info(mrioc); 4369 4326 4370 4327 if (is_resume) {
+2 -3
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 8898 8898 ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL); 8899 8899 if (!device_remove_in_progress) { 8900 8900 ioc_info(ioc, 8901 - "Unable to allocate the memory for " 8902 - "device_remove_in_progress of sz: %d\n " 8903 - , pd_handles_sz); 8901 + "Unable to allocate the memory for device_remove_in_progress of sz: %d\n", 8902 + pd_handles_sz); 8904 8903 return -ENOMEM; 8905 8904 } 8906 8905 memset(device_remove_in_progress +
+4 -2
drivers/scsi/pm8001/pm8001_init.c
··· 100 100 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 101 101 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 102 102 103 - if (pm8001_ha->number_of_intr > 1) 103 + if (pm8001_ha->number_of_intr > 1) { 104 104 blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1); 105 + return; 106 + } 105 107 106 - return blk_mq_map_queues(qmap); 108 + blk_mq_map_queues(qmap); 107 109 } 108 110 109 111 /*
+1 -1
drivers/scsi/pm8001/pm80xx_hwi.c
··· 2037 2037 atomic_dec(&pm8001_dev->running_req); 2038 2038 break; 2039 2039 } 2040 - pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n ", 2040 + pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n", 2041 2041 psspPayload->ssp_resp_iu.status); 2042 2042 spin_lock_irqsave(&t->task_state_lock, flags); 2043 2043 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+1 -1
drivers/scsi/pmcraid.c
··· 1946 1946 } 1947 1947 1948 1948 iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg); 1949 - ioread32(pinstance->int_regs.host_ioa_interrupt_reg), 1949 + ioread32(pinstance->int_regs.host_ioa_interrupt_reg); 1950 1950 int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); 1951 1951 1952 1952 pmcraid_info("Waiting for IOA to become operational %x:%x\n",
+1 -1
drivers/scsi/qedf/qedf_io.c
··· 310 310 311 311 if (!free_sqes) { 312 312 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 313 - "Returning NULL, free_sqes=%d.\n ", 313 + "Returning NULL, free_sqes=%d.\n", 314 314 free_sqes); 315 315 goto out_failed; 316 316 }
-1
drivers/scsi/scsi_debug.c
··· 2760 2760 else 2761 2761 bd_len = 0; 2762 2762 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7); 2763 - memset(arr, 0, SDEBUG_MAX_MSENSE_SZ); 2764 2763 if (0x3 == pcontrol) { /* Saving values not supported */ 2765 2764 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0); 2766 2765 return check_condition_result;
+30 -2
drivers/scsi/sd.c
··· 38 38 #include <linux/fs.h> 39 39 #include <linux/kernel.h> 40 40 #include <linux/mm.h> 41 - #include <linux/bio-integrity.h> 42 41 #include <linux/hdreg.h> 43 42 #include <linux/errno.h> 44 43 #include <linux/idr.h> ··· 3403 3404 rcu_read_lock(); 3404 3405 vpd = rcu_dereference(sdkp->device->vpd_pgb1); 3405 3406 3406 - if (!vpd || vpd->len < 8) { 3407 + if (!vpd || vpd->len <= 8) { 3407 3408 rcu_read_unlock(); 3408 3409 return; 3409 3410 } ··· 4092 4093 { 4093 4094 unsigned char cmd[6] = { START_STOP }; /* START_VALID */ 4094 4095 struct scsi_sense_hdr sshdr; 4096 + struct scsi_failure failure_defs[] = { 4097 + { 4098 + /* Power on, reset, or bus device reset occurred */ 4099 + .sense = UNIT_ATTENTION, 4100 + .asc = 0x29, 4101 + .ascq = 0, 4102 + .result = SAM_STAT_CHECK_CONDITION, 4103 + }, 4104 + { 4105 + /* Power on occurred */ 4106 + .sense = UNIT_ATTENTION, 4107 + .asc = 0x29, 4108 + .ascq = 1, 4109 + .result = SAM_STAT_CHECK_CONDITION, 4110 + }, 4111 + { 4112 + /* SCSI bus reset */ 4113 + .sense = UNIT_ATTENTION, 4114 + .asc = 0x29, 4115 + .ascq = 2, 4116 + .result = SAM_STAT_CHECK_CONDITION, 4117 + }, 4118 + {} 4119 + }; 4120 + struct scsi_failures failures = { 4121 + .total_allowed = 3, 4122 + .failure_definitions = failure_defs, 4123 + }; 4095 4124 const struct scsi_exec_args exec_args = { 4096 4125 .sshdr = &sshdr, 4097 4126 .req_flags = BLK_MQ_REQ_PM, 4127 + .failures = &failures, 4098 4128 }; 4099 4129 struct scsi_device *sdp = sdkp->device; 4100 4130 int res;
+3 -2
drivers/scsi/st.c
··· 834 834 int backspace, result; 835 835 struct st_partstat *STps; 836 836 837 + if (STp->ready != ST_READY) 838 + return 0; 839 + 837 840 /* 838 841 * If there was a bus reset, block further access 839 842 * to this device. ··· 844 841 if (STp->pos_unknown) 845 842 return (-EIO); 846 843 847 - if (STp->ready != ST_READY) 848 - return 0; 849 844 STps = &(STp->ps[STp->partition]); 850 845 if (STps->rw == ST_WRITING) /* Writing */ 851 846 return st_flush_write_buffer(STp);
+1 -1
drivers/scsi/zalon.c
··· 139 139 return -ENODEV; 140 140 141 141 if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { 142 - dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ", 142 + dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n", 143 143 dev->irq); 144 144 goto fail; 145 145 }
+1 -1
drivers/ufs/host/ufs-qcom.c
··· 93 93 [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 }, 94 94 [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 }, 95 95 [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 }, 96 - [MODE_MAX][0][0] = { 7643136, 307200 }, 96 + [MODE_MAX][0][0] = { 7643136, 819200 }, 97 97 }; 98 98 99 99 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);