Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
"Updates to the usual drivers (ufs, lpfc, qla2xxx, mpi3mr) plus some
misc small fixes.

The only core changes are to both bsg and scsi to pass in the device
instead of setting it afterwards as q->queuedata, so no functional
change"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (69 commits)
scsi: aha152x: Use DECLARE_COMPLETION_ONSTACK for non-constant completion
scsi: qla2xxx: Convert comma to semicolon
scsi: qla2xxx: Update version to 10.02.09.300-k
scsi: qla2xxx: Use QP lock to search for bsg
scsi: qla2xxx: Reduce fabric scan duplicate code
scsi: qla2xxx: Fix optrom version displayed in FDMI
scsi: qla2xxx: During vport delete send async logout explicitly
scsi: qla2xxx: Complete command early within lock
scsi: qla2xxx: Fix flash read failure
scsi: qla2xxx: Return ENOBUFS if sg_cnt is more than one for ELS cmds
scsi: qla2xxx: Fix for possible memory corruption
scsi: qla2xxx: validate nvme_local_port correctly
scsi: qla2xxx: Unable to act on RSCN for port online
scsi: ufs: exynos: Add support for Flash Memory Protector (FMP)
scsi: ufs: core: Add UFSHCD_QUIRK_KEYS_IN_PRDT
scsi: ufs: core: Add fill_crypto_prdt variant op
scsi: ufs: core: Add UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE
scsi: ufs: core: fold ufshcd_clear_keyslot() into its caller
scsi: ufs: core: Add UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE
scsi: ufs: mcq: Make .get_hba_mac() optional
...

+3144 -618
+8 -6
Documentation/ABI/testing/sysfs-driver-ufs
··· 920 920 921 921 What: /sys/bus/platform/drivers/ufshcd/*/attributes/max_number_of_rtt 922 922 What: /sys/bus/platform/devices/*.ufs/attributes/max_number_of_rtt 923 - Date: February 2018 924 - Contact: Stanislav Nijnikov <stanislav.nijnikov@wdc.com> 923 + Date: May 2024 924 + Contact: Avri Altman <avri.altman@wdc.com> 925 925 Description: This file provides the maximum current number of 926 - outstanding RTTs in device that is allowed. The full 927 - information about the attribute could be found at 928 - UFS specifications 2.1. 926 + outstanding RTTs in device that is allowed. bMaxNumOfRTT is a 927 + read-write persistent attribute and is equal to two after device 928 + manufacturing. It shall not be set to a value greater than 929 + bDeviceRTTCap value, and it may be set only when the hw queues are 930 + empty. 929 931 930 - The file is read only. 932 + The file is read write. 931 933 932 934 What: /sys/bus/platform/drivers/ufshcd/*/attributes/exception_event_control 933 935 What: /sys/bus/platform/devices/*.ufs/attributes/exception_event_control
+1 -2
block/bsg-lib.c
··· 385 385 if (blk_mq_alloc_tag_set(set)) 386 386 goto out_tag_set; 387 387 388 - q = blk_mq_alloc_queue(set, lim, NULL); 388 + q = blk_mq_alloc_queue(set, lim, dev); 389 389 if (IS_ERR(q)) { 390 390 ret = PTR_ERR(q); 391 391 goto out_queue; 392 392 } 393 393 394 - q->queuedata = dev; 395 394 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); 396 395 397 396 bset->bd = bsg_register_queue(q, dev, name, bsg_transport_sg_io_fn);
+1
drivers/scsi/BusLogic.c
··· 78 78 BusLogic can be assigned a string by insmod. 79 79 */ 80 80 81 + MODULE_DESCRIPTION("BusLogic MultiMaster and FlashPoint SCSI Host Adapter driver"); 81 82 MODULE_LICENSE("GPL"); 82 83 #ifdef MODULE 83 84 static char *BusLogic;
+1
drivers/scsi/advansys.c
··· 11545 11545 module_init(advansys_init); 11546 11546 module_exit(advansys_exit); 11547 11547 11548 + MODULE_DESCRIPTION("AdvanSys SCSI Adapter driver"); 11548 11549 MODULE_LICENSE("GPL"); 11549 11550 MODULE_FIRMWARE("advansys/mcode.bin"); 11550 11551 MODULE_FIRMWARE("advansys/3550.bin");
+1 -1
drivers/scsi/aha152x.c
··· 1072 1072 static int aha152x_device_reset(struct scsi_cmnd * SCpnt) 1073 1073 { 1074 1074 struct Scsi_Host *shpnt = SCpnt->device->host; 1075 - DECLARE_COMPLETION(done); 1075 + DECLARE_COMPLETION_ONSTACK(done); 1076 1076 int ret, issued, disconnected; 1077 1077 unsigned char old_cmd_len = SCpnt->cmd_len; 1078 1078 unsigned long flags;
+2
drivers/scsi/aha1542.c
··· 1009 1009 1010 1010 return 0; 1011 1011 } 1012 + 1013 + MODULE_DESCRIPTION("Adaptec AHA-1542 SCSI host adapter driver"); 1012 1014 MODULE_LICENSE("GPL"); 1013 1015 1014 1016 static int aha1542_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+1
drivers/scsi/aha1740.c
··· 681 681 module_init (aha1740_init); 682 682 module_exit (aha1740_exit); 683 683 684 + MODULE_DESCRIPTION("Adaptec AHA1740 SCSI host adapter driver"); 684 685 MODULE_LICENSE("GPL");
+4 -5
drivers/scsi/arm/acornscsi.c
··· 2450 2450 return 0; 2451 2451 } 2452 2452 2453 - DEF_SCSI_QCMD(acornscsi_queuecmd) 2453 + static DEF_SCSI_QCMD(acornscsi_queuecmd) 2454 2454 2455 2455 enum res_abort { res_not_running, res_success, res_success_clear, res_snooze }; 2456 2456 ··· 2552 2552 * Params : SCpnt - command to abort 2553 2553 * Returns : one of SCSI_ABORT_ macros 2554 2554 */ 2555 - int acornscsi_abort(struct scsi_cmnd *SCpnt) 2555 + static int acornscsi_abort(struct scsi_cmnd *SCpnt) 2556 2556 { 2557 2557 AS_Host *host = (AS_Host *) SCpnt->device->host->hostdata; 2558 2558 int result; ··· 2634 2634 * Params : SCpnt - command causing reset 2635 2635 * Returns : one of SCSI_RESET_ macros 2636 2636 */ 2637 - int acornscsi_host_reset(struct scsi_cmnd *SCpnt) 2637 + static int acornscsi_host_reset(struct scsi_cmnd *SCpnt) 2638 2638 { 2639 2639 AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; 2640 2640 struct scsi_cmnd *SCptr; ··· 2679 2679 * Params : host - host to give information on 2680 2680 * Returns : a constant string 2681 2681 */ 2682 - const 2683 - char *acornscsi_info(struct Scsi_Host *host) 2682 + static const char *acornscsi_info(struct Scsi_Host *host) 2684 2683 { 2685 2684 static char string[100], *p; 2686 2685
+1 -1
drivers/scsi/arm/cumana_2.c
··· 296 296 * Params : host - driver host structure to return info for. 297 297 * Returns : pointer to a static buffer containing null terminated string. 298 298 */ 299 - const char *cumanascsi_2_info(struct Scsi_Host *host) 299 + static const char *cumanascsi_2_info(struct Scsi_Host *host) 300 300 { 301 301 struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; 302 302 static char string[150];
+1 -1
drivers/scsi/arm/eesox.c
··· 381 381 * Params : host - driver host structure to return info for. 382 382 * Returns : pointer to a static buffer containing null terminated string. 383 383 */ 384 - const char *eesoxscsi_info(struct Scsi_Host *host) 384 + static const char *eesoxscsi_info(struct Scsi_Host *host) 385 385 { 386 386 struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; 387 387 static char string[150];
+1 -1
drivers/scsi/arm/powertec.c
··· 184 184 * Params : host - driver host structure to return info for. 185 185 * Returns : pointer to a static buffer containing null terminated string. 186 186 */ 187 - const char *powertecscsi_info(struct Scsi_Host *host) 187 + static const char *powertecscsi_info(struct Scsi_Host *host) 188 188 { 189 189 struct powertec_info *info = (struct powertec_info *)host->hostdata; 190 190 static char string[150];
+1
drivers/scsi/atari_scsi.c
··· 894 894 module_platform_driver_probe(atari_scsi_driver, atari_scsi_probe); 895 895 896 896 MODULE_ALIAS("platform:" DRV_MODULE_NAME); 897 + MODULE_DESCRIPTION("Atari TT/Falcon NCR5380 SCSI driver"); 897 898 MODULE_LICENSE("GPL");
+2
drivers/scsi/atp870u.c
··· 1724 1724 atp870u_free_tables(pshost); 1725 1725 scsi_host_put(pshost); 1726 1726 } 1727 + 1728 + MODULE_DESCRIPTION("ACARD SCSI host adapter driver"); 1727 1729 MODULE_LICENSE("GPL"); 1728 1730 1729 1731 static const struct scsi_host_template atp870u_template = {
+1
drivers/scsi/elx/efct/efct_driver.c
··· 778 778 module_init(efct_init); 779 779 module_exit(efct_exit); 780 780 MODULE_VERSION(EFCT_DRIVER_VERSION); 781 + MODULE_DESCRIPTION("Emulex Fibre Channel Target driver"); 781 782 MODULE_LICENSE("GPL"); 782 783 MODULE_AUTHOR("Broadcom");
+1
drivers/scsi/g_NCR5380.c
··· 110 110 MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)"); 111 111 112 112 MODULE_ALIAS("g_NCR5380_mmio"); 113 + MODULE_DESCRIPTION("Generic NCR5380/NCR53C400 SCSI driver"); 113 114 MODULE_LICENSE("GPL"); 114 115 115 116 static void g_NCR5380_trigger_irq(struct Scsi_Host *instance)
+1
drivers/scsi/imm.c
··· 1279 1279 }; 1280 1280 module_parport_driver(imm_driver); 1281 1281 1282 + MODULE_DESCRIPTION("IOMEGA MatchMaker parallel port SCSI host adapter driver"); 1282 1283 MODULE_LICENSE("GPL");
+1
drivers/scsi/isci/init.c
··· 758 758 sas_release_transport(isci_transport_template); 759 759 } 760 760 761 + MODULE_DESCRIPTION("Intel(R) C600 Series Chipset SAS Controller driver"); 761 762 MODULE_LICENSE("Dual BSD/GPL"); 762 763 MODULE_FIRMWARE(ISCI_FW_NAME); 763 764 module_init(isci_init);
+9 -1
drivers/scsi/lpfc/lpfc_attr.c
··· 1831 1831 lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out) 1832 1832 { 1833 1833 LPFC_MBOXQ_t *mbox = NULL; 1834 + u32 payload_len; 1834 1835 unsigned long val = 0; 1835 1836 char *pval = NULL; 1836 1837 int rc = 0; ··· 1870 1869 if (!mbox) 1871 1870 return -ENOMEM; 1872 1871 1872 + payload_len = sizeof(struct lpfc_mbx_set_trunk_mode) - 1873 + sizeof(struct lpfc_sli4_cfg_mhdr); 1873 1874 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 1874 1875 LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE, 1875 - 12, LPFC_SLI4_MBX_EMBED); 1876 + payload_len, LPFC_SLI4_MBX_EMBED); 1876 1877 1877 1878 bf_set(lpfc_mbx_set_trunk_mode, 1878 1879 &mbox->u.mqe.un.set_trunk_mode, ··· 1910 1907 1911 1908 /* Get transceiver information */ 1912 1909 rdp_context = kmalloc(sizeof(*rdp_context), GFP_KERNEL); 1910 + if (!rdp_context) { 1911 + len = scnprintf(buf, PAGE_SIZE - len, 1912 + "SPF info NA: alloc failure\n"); 1913 + return len; 1914 + } 1913 1915 1914 1916 rc = lpfc_get_sfp_info_wait(phba, rdp_context); 1915 1917 if (rc) {
+4 -12
drivers/scsi/lpfc/lpfc_ct.c
··· 1553 1553 if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE && 1554 1554 ndlp->nlp_fc4_type) { 1555 1555 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1556 - /* This is a fabric topology so if discovery 1557 - * started with an unsolicited PLOGI, don't 1558 - * send a PRLI. Targets don't issue PLOGI or 1559 - * PRLI when acting as a target. Likely this is 1560 - * an initiator function. 1561 - */ 1562 - if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { 1563 - lpfc_nlp_set_state(vport, ndlp, 1564 - NLP_STE_PRLI_ISSUE); 1565 - lpfc_issue_els_prli(vport, ndlp, 0); 1566 - } 1556 + lpfc_nlp_set_state(vport, ndlp, 1557 + NLP_STE_PRLI_ISSUE); 1558 + lpfc_issue_els_prli(vport, ndlp, 0); 1567 1559 } else if (!ndlp->nlp_fc4_type) { 1568 1560 /* If fc4 type is still unknown, then LOGO */ 1569 1561 lpfc_printf_vlog(vport, KERN_INFO, 1570 1562 LOG_DISCOVERY | LOG_NODE, 1571 - "6443 Sending LOGO ndlp x%px," 1563 + "6443 Sending LOGO ndlp x%px, " 1572 1564 "DID x%06x with fc4_type: " 1573 1565 "x%08x, state: %d\n", 1574 1566 ndlp, did, ndlp->nlp_fc4_type,
+12 -7
drivers/scsi/lpfc/lpfc_els.c
··· 7302 7302 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7303 7303 } 7304 7304 mbox->vport = phba->pport; 7305 - 7306 - rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); 7305 + rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO); 7307 7306 if (rc == MBX_NOT_FINISHED) { 7308 7307 rc = 1; 7309 7308 goto error; 7310 7309 } 7311 - 7310 + if (rc == MBX_TIMEOUT) 7311 + goto error; 7312 7312 if (phba->sli_rev == LPFC_SLI_REV4) 7313 7313 mp = mbox->ctx_buf; 7314 7314 else ··· 7361 7361 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7362 7362 } 7363 7363 7364 - rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); 7364 + rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO); 7365 + 7366 + if (rc == MBX_TIMEOUT) 7367 + goto error; 7365 7368 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { 7366 7369 rc = 1; 7367 7370 goto error; ··· 7375 7372 DMP_SFF_PAGE_A2_SIZE); 7376 7373 7377 7374 error: 7378 - mbox->ctx_buf = mpsave; 7379 - lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7375 + if (mbox->mbox_flag & LPFC_MBX_WAKE) { 7376 + mbox->ctx_buf = mpsave; 7377 + lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7378 + } 7380 7379 7381 7380 return rc; 7382 7381 ··· 9670 9665 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9671 9666 spin_lock_irqsave(&phba->hbalock, iflags); 9672 9667 list_del_init(&piocb->dlist); 9673 - if (mbx_tmo_err) 9668 + if (mbx_tmo_err || !(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) 9674 9669 list_move_tail(&piocb->list, &cancel_list); 9675 9670 else 9676 9671 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
+9 -1
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 214 214 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) 215 215 return; 216 216 217 + /* check for recovered fabric node */ 218 + if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && 219 + ndlp->nlp_DID == Fabric_DID) 220 + return; 221 + 217 222 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) 218 223 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 219 224 "6789 rport name %llx != node port name %llx", ··· 551 546 ndlp->nlp_DID, kref_read(&ndlp->kref), 552 547 ndlp, ndlp->nlp_flag, 553 548 vport->port_state); 549 + spin_lock_irqsave(&ndlp->lock, iflags); 550 + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; 551 + spin_unlock_irqrestore(&ndlp->lock, iflags); 554 552 return fcf_inuse; 555 553 } 556 554 ··· 5733 5725 return ndlp; 5734 5726 5735 5727 if (ndlp->nlp_state > NLP_STE_UNUSED_NODE && 5736 - ndlp->nlp_state < NLP_STE_PRLI_ISSUE) { 5728 + ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) { 5737 5729 lpfc_disc_state_machine(vport, ndlp, NULL, 5738 5730 NLP_EVT_DEVICE_RECOVERY); 5739 5731 }
+21 -22
drivers/scsi/lpfc/lpfc_sli.c
··· 10579 10579 { 10580 10580 struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq; 10581 10581 union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe; 10582 - struct sli4_sge *sgl; 10582 + struct sli4_sge_le *sgl; 10583 + u32 type_size; 10583 10584 10584 10585 /* 128 byte wqe support here */ 10585 - sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 10586 + sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl; 10586 10587 10587 10588 if (phba->fcp_embed_io) { 10588 10589 struct fcp_cmnd *fcp_cmnd; ··· 10592 10591 fcp_cmnd = lpfc_cmd->fcp_cmnd; 10593 10592 10594 10593 /* Word 0-2 - FCP_CMND */ 10595 - wqe->generic.bde.tus.f.bdeFlags = 10596 - BUFF_TYPE_BDE_IMMED; 10597 - wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 10594 + type_size = le32_to_cpu(sgl->sge_len); 10595 + type_size |= ULP_BDE64_TYPE_BDE_IMMED; 10596 + wqe->generic.bde.tus.w = type_size; 10598 10597 wqe->generic.bde.addrHigh = 0; 10599 10598 wqe->generic.bde.addrLow = 72; /* Word 18 */ 10600 10599 ··· 10603 10602 10604 10603 /* Word 18-29 FCP CMND Payload */ 10605 10604 ptr = &wqe->words[18]; 10606 - memcpy(ptr, fcp_cmnd, sgl->sge_len); 10605 + lpfc_sli_pcimem_bcopy(fcp_cmnd, ptr, le32_to_cpu(sgl->sge_len)); 10607 10606 } else { 10608 10607 /* Word 0-2 - Inline BDE */ 10609 10608 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 10610 - wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 10611 - wqe->generic.bde.addrHigh = sgl->addr_hi; 10612 - wqe->generic.bde.addrLow = sgl->addr_lo; 10609 + wqe->generic.bde.tus.f.bdeSize = le32_to_cpu(sgl->sge_len); 10610 + wqe->generic.bde.addrHigh = le32_to_cpu(sgl->addr_hi); 10611 + wqe->generic.bde.addrLow = le32_to_cpu(sgl->addr_lo); 10613 10612 10614 10613 /* Word 10 */ 10615 10614 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); ··· 12302 12301 goto release_iocb; 12303 12302 } 12304 12303 } 12305 - 12306 - lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 12307 - "0327 Cannot abort els iocb x%px " 12308 - "with io cmd xri %x abort tag : x%x, " 12309 - "abort status %x abort code %x\n", 12310 - cmdiocb, get_job_abtsiotag(phba, cmdiocb), 12311 - (phba->sli_rev == LPFC_SLI_REV4) ? 12312 - get_wqe_reqtag(cmdiocb) : 12313 - cmdiocb->iocb.un.acxri.abortContextTag, 12314 - ulp_status, ulp_word4); 12315 - 12316 12304 } 12305 + 12306 + lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, 12307 + "0327 Abort els iocb complete x%px with io cmd xri %x " 12308 + "abort tag x%x abort status %x abort code %x\n", 12309 + cmdiocb, get_job_abtsiotag(phba, cmdiocb), 12310 + (phba->sli_rev == LPFC_SLI_REV4) ? 12311 + get_wqe_reqtag(cmdiocb) : 12312 + cmdiocb->iocb.ulpIoTag, 12313 + ulp_status, ulp_word4); 12317 12314 release_iocb: 12318 12315 lpfc_sli_release_iocbq(phba, cmdiocb); 12319 12316 return; ··· 12508 12509 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 12509 12510 "0339 Abort IO XRI x%x, Original iotag x%x, " 12510 12511 "abort tag x%x Cmdjob : x%px Abortjob : x%px " 12511 - "retval x%x : IA %d\n", 12512 + "retval x%x : IA %d cmd_cmpl %ps\n", 12512 12513 ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ? 12513 12514 cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp, 12514 - retval, ia); 12515 + retval, ia, abtsiocbp->cmd_cmpl); 12515 12516 if (retval) { 12516 12517 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED; 12517 12518 __lpfc_sli_release_iocbq(phba, abtsiocbp);
+1 -1
drivers/scsi/lpfc/lpfc_version.h
··· 20 20 * included with this package. * 21 21 *******************************************************************/ 22 22 23 - #define LPFC_DRIVER_VERSION "14.4.0.2" 23 + #define LPFC_DRIVER_VERSION "14.4.0.3" 24 24 #define LPFC_DRIVER_NAME "lpfc" 25 25 26 26 /* Used for SLI 2/3 */
+1
drivers/scsi/mac_scsi.c
··· 550 550 module_platform_driver_probe(mac_scsi_driver, mac_scsi_probe); 551 551 552 552 MODULE_ALIAS("platform:" DRV_MODULE_NAME); 553 + MODULE_DESCRIPTION("Macintosh NCR5380 SCSI driver"); 553 554 MODULE_LICENSE("GPL");
+44
drivers/scsi/mpi3mr/mpi/mpi30_tool.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * Copyright 2016-2024 Broadcom Inc. All rights reserved. 4 + */ 5 + #ifndef MPI30_TOOL_H 6 + #define MPI30_TOOL_H 1 7 + 8 + #define MPI3_DIAG_BUFFER_TYPE_TRACE (0x01) 9 + #define MPI3_DIAG_BUFFER_TYPE_FW (0x02) 10 + #define MPI3_DIAG_BUFFER_ACTION_RELEASE (0x01) 11 + 12 + struct mpi3_diag_buffer_post_request { 13 + __le16 host_tag; 14 + u8 ioc_use_only02; 15 + u8 function; 16 + __le16 ioc_use_only04; 17 + u8 ioc_use_only06; 18 + u8 msg_flags; 19 + __le16 change_count; 20 + __le16 reserved0a; 21 + u8 type; 22 + u8 reserved0d; 23 + __le16 reserved0e; 24 + __le64 address; 25 + __le32 length; 26 + __le32 reserved1c; 27 + }; 28 + 29 + struct mpi3_diag_buffer_manage_request { 30 + __le16 host_tag; 31 + u8 ioc_use_only02; 32 + u8 function; 33 + __le16 ioc_use_only04; 34 + u8 ioc_use_only06; 35 + u8 msg_flags; 36 + __le16 change_count; 37 + __le16 reserved0a; 38 + u8 type; 39 + u8 action; 40 + __le16 reserved0e; 41 + }; 42 + 43 + 44 + #endif
+138 -2
drivers/scsi/mpi3mr/mpi3mr.h
··· 23 23 #include <linux/miscdevice.h> 24 24 #include <linux/module.h> 25 25 #include <linux/pci.h> 26 + #include <linux/aer.h> 26 27 #include <linux/poll.h> 27 28 #include <linux/sched.h> 28 29 #include <linux/slab.h> ··· 48 47 #include "mpi/mpi30_ioc.h" 49 48 #include "mpi/mpi30_sas.h" 50 49 #include "mpi/mpi30_pci.h" 50 + #include "mpi/mpi30_tool.h" 51 51 #include "mpi3mr_debug.h" 52 52 53 53 /* Global list and lock for storing multiple adapters managed by the driver */ ··· 57 55 extern int prot_mask; 58 56 extern atomic64_t event_counter; 59 57 60 - #define MPI3MR_DRIVER_VERSION "8.8.1.0.50" 61 - #define MPI3MR_DRIVER_RELDATE "5-March-2024" 58 + #define MPI3MR_DRIVER_VERSION "8.9.1.0.51" 59 + #define MPI3MR_DRIVER_RELDATE "29-May-2024" 62 60 63 61 #define MPI3MR_DRIVER_NAME "mpi3mr" 64 62 #define MPI3MR_DRIVER_LICENSE "GPL" ··· 130 128 #define MPI3MR_PREPARE_FOR_RESET_TIMEOUT 180 131 129 #define MPI3MR_RESET_ACK_TIMEOUT 30 132 130 #define MPI3MR_MUR_TIMEOUT 120 131 + #define MPI3MR_RESET_TIMEOUT 510 133 132 134 133 #define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */ 135 134 ··· 190 187 #define MPI3MR_HARD_SECURE_DEVICE 0x08 191 188 #define MPI3MR_TAMPERED_DEVICE 0x0C 192 189 190 + #define MPI3MR_DEFAULT_HDB_MAX_SZ (4 * 1024 * 1024) 191 + #define MPI3MR_DEFAULT_HDB_DEC_SZ (1 * 1024 * 1024) 192 + #define MPI3MR_DEFAULT_HDB_MIN_SZ (2 * 1024 * 1024) 193 + #define MPI3MR_MAX_NUM_HDB 2 194 + 195 + #define MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN 0 196 + #define MPI3MR_HDB_TRIGGER_TYPE_FAULT 1 197 + #define MPI3MR_HDB_TRIGGER_TYPE_ELEMENT 2 198 + #define MPI3MR_HDB_TRIGGER_TYPE_GLOBAL 3 199 + #define MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET 4 200 + #define MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED 5 201 + 202 + #define MPI3MR_HDB_REFRESH_TYPE_RESERVED 0 203 + #define MPI3MR_HDB_REFRESH_TYPE_CURRENT 1 204 + #define MPI3MR_HDB_REFRESH_TYPE_DEFAULT 2 205 + #define MPI3MR_HDB_HDB_REFRESH_TYPE_PERSISTENT 3 206 + 207 + #define MPI3MR_DEFAULT_HDB_SZ (4 * 1024 * 1024) 208 + #define MPI3MR_MAX_NUM_HDB 2 209 + 210 + #define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_INDEX 0 211 + #define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_DATA 1 212 + 213 + 193 214 /* SGE Flag definition */ 194 215 #define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \ 195 216 (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | MPI3_SGE_FLAGS_DLAS_SYSTEM | \ ··· 236 209 237 210 #define MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS 256 238 211 #define MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS 2048 212 + 213 + #define MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER (0xFFFD) 239 214 240 215 /** 241 216 * struct mpi3mr_nvme_pt_sge - Structure to store SGEs for NVMe ··· 318 289 MPI3MR_RESET_FROM_PELABORT_TIMEOUT = 22, 319 290 MPI3MR_RESET_FROM_SYSFS = 23, 320 291 MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24, 292 + MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT = 25, 293 + MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT = 26, 321 294 MPI3MR_RESET_FROM_FIRMWARE = 27, 322 295 MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT = 29, 323 296 MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT = 30, 297 + MPI3MR_RESET_FROM_TRIGGER = 31, 324 298 }; 325 299 326 300 #define MPI3MR_RESET_REASON_OSTYPE_LINUX 1 ··· 359 327 u32 ioc_capabilities; 360 328 struct mpi3mr_compimg_ver fw_ver; 361 329 u32 mpi_version; 330 + u32 diag_trace_sz; 331 + u32 diag_fw_sz; 332 + u32 diag_drvr_sz; 362 333 u16 max_reqs; 363 334 u16 product_id; 364 335 u16 op_req_sz; ··· 519 484 520 485 /* HBA port flags */ 521 486 #define MPI3MR_HBA_PORT_FLAG_DIRTY 0x01 487 + #define MPI3MR_HBA_PORT_FLAG_NEW 0x02 522 488 523 489 /* IOCTL data transfer sge*/ 524 490 #define MPI3MR_NUM_IOCTL_SGE 256 ··· 889 853 }; 890 854 891 855 /** 856 + * union mpi3mr_trigger_data - Trigger data information 857 + * @fault: Fault code 858 + * @global: Global trigger data 859 + * @element: element trigger data 860 + */ 861 + union mpi3mr_trigger_data { 862 + u16 fault; 863 + u64 global; 864 + union mpi3_driver2_trigger_element element; 865 + }; 866 + 867 + /** 868 + * struct trigger_event_data - store trigger related 869 + * information. 870 + * 871 + * @trace_hdb: Trace diag buffer descriptor reference 872 + * @fw_hdb: FW diag buffer descriptor reference 873 + * @trigger_type: Trigger type 874 + * @trigger_specific_data: Trigger specific data 875 + * @snapdump: Snapdump enable or disable flag 876 + */ 877 + struct trigger_event_data { 878 + struct diag_buffer_desc *trace_hdb; 879 + struct diag_buffer_desc *fw_hdb; 880 + u8 trigger_type; 881 + union mpi3mr_trigger_data trigger_specific_data; 882 + bool snapdump; 883 + }; 884 + 885 + /** 886 + * struct diag_buffer_desc - memory descriptor structure to 887 + * store virtual, dma addresses, size, buffer status for host 888 + * diagnostic buffers. 889 + * 890 + * @type: Buffer type 891 + * @trigger_data: Trigger data 892 + * @trigger_type: Trigger type 893 + * @status: Buffer status 894 + * @size: Buffer size 895 + * @addr: Virtual address 896 + * @dma_addr: Buffer DMA address 897 + */ 898 + struct diag_buffer_desc { 899 + u8 type; 900 + union mpi3mr_trigger_data trigger_data; 901 + u8 trigger_type; 902 + u8 status; 903 + u32 size; 904 + void *addr; 905 + dma_addr_t dma_addr; 906 + }; 907 + 908 + /** 892 909 * struct dma_memory_desc - memory descriptor structure to store 893 910 * virtual address, dma address and size for any generic dma 894 911 * memory allocations in the driver. ··· 1143 1054 * @sas_node_lock: Lock to protect SAS node list 1144 1055 * @hba_port_table_list: List of HBA Ports 1145 1056 * @enclosure_list: List of Enclosure objects 1057 + * @diag_buffers: Host diagnostic buffers 1058 + * @driver_pg2: Driver page 2 pointer 1059 + * @reply_trigger_present: Reply trigger present flag 1060 + * @event_trigger_present: Event trigger present flag 1061 + * @scsisense_trigger_present: Scsi sense trigger present flag 1146 1062 * @ioctl_dma_pool: DMA pool for IOCTL data buffers 1147 1063 * @ioctl_sge: DMA buffer descriptors for IOCTL data 1148 1064 * @ioctl_chain_sge: DMA buffer descriptor for IOCTL chain 1149 1065 * @ioctl_resp_sge: DMA buffer descriptor for Mgmt cmd response 1150 1066 * @ioctl_sges_allocated: Flag for IOCTL SGEs allocated or not 1067 + * @trace_release_trigger_active: Trace trigger active flag 1068 + * @fw_release_trigger_active: Fw release trigger active flag 1069 + * @snapdump_trigger_active: Snapdump trigger active flag 1070 + * @pci_err_recovery: PCI error recovery in progress 1071 + * @block_on_pci_err: Block IO during PCI error recovery 1151 1072 */ 1152 1073 struct mpi3mr_ioc { 1153 1074 struct list_head list; ··· 1349 1250 struct dma_memory_desc ioctl_chain_sge; 1350 1251 struct dma_memory_desc ioctl_resp_sge; 1351 1252 bool ioctl_sges_allocated; 1253 + bool reply_trigger_present; 1254 + bool event_trigger_present; 1255 + bool scsisense_trigger_present; 1256 + struct diag_buffer_desc diag_buffers[MPI3MR_MAX_NUM_HDB]; 1257 + struct mpi3_driver_page2 *driver_pg2; 1258 + spinlock_t trigger_lock; 1259 + bool snapdump_trigger_active; 1260 + bool trace_release_trigger_active; 1261 + bool fw_release_trigger_active; 1262 + bool pci_err_recovery; 1263 + bool block_on_pci_err; 1352 1264 }; 1353 1265 1354 1266 /** ··· 1516 1406 struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz); 1517 1407 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc, 1518 1408 struct mpi3_driver_page1 *driver_pg1, u16 pg_sz); 1409 + int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc, 1410 + struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_type); 1519 1411 1520 1412 u8 mpi3mr_is_expander_device(u16 device_info); 1521 1413 int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle); ··· 1551 1439 int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc); 1552 1440 void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc, 1553 1441 struct mpi3mr_sas_node *sas_expander); 1442 + void mpi3mr_alloc_diag_bufs(struct mpi3mr_ioc *mrioc); 1443 + int mpi3mr_post_diag_bufs(struct mpi3mr_ioc *mrioc); 1444 + int mpi3mr_issue_diag_buf_release(struct mpi3mr_ioc *mrioc, 1445 + struct diag_buffer_desc *diag_buffer); 1446 + void mpi3mr_release_diag_bufs(struct mpi3mr_ioc *mrioc, u8 skip_rel_action); 1447 + void mpi3mr_set_trigger_data_in_hdb(struct diag_buffer_desc *hdb, 1448 + u8 type, union mpi3mr_trigger_data *trigger_data, bool force); 1449 + int mpi3mr_refresh_trigger(struct mpi3mr_ioc *mrioc, u8 page_type); 1450 + struct diag_buffer_desc *mpi3mr_diag_buffer_for_type(struct mpi3mr_ioc *mrioc, 1451 + u8 buf_type); 1452 + int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc, 1453 + struct diag_buffer_desc *diag_buffer); 1454 + void mpi3mr_set_trigger_data_in_all_hdb(struct mpi3mr_ioc *mrioc, 1455 + u8 type, union mpi3mr_trigger_data *trigger_data, bool force); 1456 + void mpi3mr_reply_trigger(struct mpi3mr_ioc *mrioc, u16 iocstatus, 1457 + u32 iocloginfo); 1458 + void mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc *mrioc, 1459 + struct trigger_event_data *event_data); 1460 + void mpi3mr_scsisense_trigger(struct mpi3mr_ioc *mrioc, u8 senseky, u8 asc, 1461 + u8 ascq); 1462 + void mpi3mr_event_trigger(struct mpi3mr_ioc *mrioc, u8 event); 1463 + void mpi3mr_global_trigger(struct mpi3mr_ioc *mrioc, u64 trigger_data); 1464 + void mpi3mr_hdbstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 1465 + struct mpi3_event_notification_reply *event_reply); 1554 1466 #endif /*MPI3MR_H_INCLUDED*/
+1087 -3
drivers/scsi/mpi3mr/mpi3mr_app.c
··· 12 12 #include <uapi/scsi/scsi_bsg_mpi3mr.h> 13 13 14 14 /** 15 + * mpi3mr_alloc_trace_buffer: Allocate trace buffer 16 + * @mrioc: Adapter instance reference 17 + * @trace_size: Trace buffer size 18 + * 19 + * Allocate trace buffer 20 + * Return: 0 on success, non-zero on failure. 21 + */ 22 + static int mpi3mr_alloc_trace_buffer(struct mpi3mr_ioc *mrioc, u32 trace_size) 23 + { 24 + struct diag_buffer_desc *diag_buffer = &mrioc->diag_buffers[0]; 25 + 26 + diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev, 27 + trace_size, &diag_buffer->dma_addr, GFP_KERNEL); 28 + if (diag_buffer->addr) { 29 + dprint_init(mrioc, "trace diag buffer is allocated successfully\n"); 30 + return 0; 31 + } 32 + return -1; 33 + } 34 + 35 + /** 36 + * mpi3mr_alloc_diag_bufs - Allocate memory for diag buffers 37 + * @mrioc: Adapter instance reference 38 + * 39 + * This functions checks whether the driver defined buffer sizes 40 + * are greater than IOCFacts provided controller local buffer 41 + * sizes and if the driver defined sizes are more then the 42 + * driver allocates the specific buffer by reading driver page1 43 + * 44 + * Return: Nothing. 45 + */ 46 + void mpi3mr_alloc_diag_bufs(struct mpi3mr_ioc *mrioc) 47 + { 48 + struct diag_buffer_desc *diag_buffer; 49 + struct mpi3_driver_page1 driver_pg1; 50 + u32 trace_dec_size, trace_min_size, fw_dec_size, fw_min_size, 51 + trace_size, fw_size; 52 + u16 pg_sz = sizeof(driver_pg1); 53 + int retval = 0; 54 + bool retry = false; 55 + 56 + if (mrioc->diag_buffers[0].addr || mrioc->diag_buffers[1].addr) 57 + return; 58 + 59 + retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz); 60 + if (retval) { 61 + ioc_warn(mrioc, 62 + "%s: driver page 1 read failed, allocating trace\n" 63 + "and firmware diag buffers of default size\n", __func__); 64 + trace_size = fw_size = MPI3MR_DEFAULT_HDB_MAX_SZ; 65 + trace_dec_size = fw_dec_size = MPI3MR_DEFAULT_HDB_DEC_SZ; 66 + trace_min_size = fw_min_size = MPI3MR_DEFAULT_HDB_MIN_SZ; 67 + 68 + } else { 69 + trace_size = driver_pg1.host_diag_trace_max_size * 1024; 70 + trace_dec_size = driver_pg1.host_diag_trace_decrement_size 71 + * 1024; 72 + trace_min_size = driver_pg1.host_diag_trace_min_size * 1024; 73 + fw_size = driver_pg1.host_diag_fw_max_size * 1024; 74 + fw_dec_size = driver_pg1.host_diag_fw_decrement_size * 1024; 75 + fw_min_size = driver_pg1.host_diag_fw_min_size * 1024; 76 + dprint_init(mrioc, 77 + "%s:trace diag buffer sizes read from driver\n" 78 + "page1: maximum size = %dKB, decrement size = %dKB\n" 79 + ", minimum size = %dKB\n", __func__, driver_pg1.host_diag_trace_max_size, 80 + driver_pg1.host_diag_trace_decrement_size, 81 + driver_pg1.host_diag_trace_min_size); 82 + dprint_init(mrioc, 83 + "%s:firmware diag buffer sizes read from driver\n" 84 + "page1: maximum size = %dKB, decrement size = %dKB\n" 85 + ", minimum size = %dKB\n", __func__, driver_pg1.host_diag_fw_max_size, 86 + driver_pg1.host_diag_fw_decrement_size, 87 + driver_pg1.host_diag_fw_min_size); 88 + if ((trace_size == 0) && (fw_size == 0)) 89 + return; 90 + } 91 + 92 + 93 + retry_trace: 94 + diag_buffer = &mrioc->diag_buffers[0]; 95 + diag_buffer->type = MPI3_DIAG_BUFFER_TYPE_TRACE; 96 + diag_buffer->status = MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED; 97 + if ((mrioc->facts.diag_trace_sz < trace_size) && (trace_size >= 98 + trace_min_size)) { 99 + if (!retry) 100 + dprint_init(mrioc, 101 + "trying to allocate trace diag buffer of size = %dKB\n", 102 + trace_size / 1024); 103 + if (mpi3mr_alloc_trace_buffer(mrioc, trace_size)) { 104 + retry = true; 105 + trace_size -= trace_dec_size; 106 + dprint_init(mrioc, "trace diag buffer allocation failed\n" 107 + "retrying smaller size %dKB\n", trace_size / 1024); 108 + goto retry_trace; 109 + } else 110 + diag_buffer->size = trace_size; 111 + } 112 + 113 + retry = false; 114 + retry_fw: 115 + 116 + diag_buffer = &mrioc->diag_buffers[1]; 117 + 118 + diag_buffer->type = MPI3_DIAG_BUFFER_TYPE_FW; 119 + diag_buffer->status = MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED; 120 + if ((mrioc->facts.diag_fw_sz < fw_size) && (fw_size >= fw_min_size)) { 121 + diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev, 122 + fw_size, &diag_buffer->dma_addr, GFP_KERNEL); 123 + if (!retry) 124 + dprint_init(mrioc, 125 + "%s:trying to allocate firmware diag buffer of size = %dKB\n", 126 + __func__, fw_size / 1024); 127 + if (diag_buffer->addr) { 128 + dprint_init(mrioc, "%s:firmware diag buffer allocated successfully\n", 129 + __func__); 130 + diag_buffer->size = fw_size; 131 + } else { 132 + retry = true; 133 + fw_size -= fw_dec_size; 134 + dprint_init(mrioc, "%s:trace diag buffer allocation failed,\n" 135 + "retrying smaller size %dKB\n", 136 + __func__, fw_size / 1024); 137 + goto retry_fw; 138 + } 139 + } 140 + } 141 + 142 + /** 143 + * mpi3mr_issue_diag_buf_post - Send diag buffer post req 144 + * @mrioc: Adapter instance reference 145 + * @diag_buffer: Diagnostic buffer descriptor 146 + * 147 + * Issue diagnostic buffer post MPI request through admin queue 148 + * and wait for the completion of it or time out. 149 + * 150 + * Return: 0 on success, non-zero on failures. 151 + */ 152 + int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc, 153 + struct diag_buffer_desc *diag_buffer) 154 + { 155 + struct mpi3_diag_buffer_post_request diag_buf_post_req; 156 + u8 prev_status; 157 + int retval = 0; 158 + 159 + memset(&diag_buf_post_req, 0, sizeof(diag_buf_post_req)); 160 + mutex_lock(&mrioc->init_cmds.mutex); 161 + if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 162 + dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); 163 + mutex_unlock(&mrioc->init_cmds.mutex); 164 + return -1; 165 + } 166 + mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 167 + mrioc->init_cmds.is_waiting = 1; 168 + mrioc->init_cmds.callback = NULL; 169 + diag_buf_post_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 170 + diag_buf_post_req.function = MPI3_FUNCTION_DIAG_BUFFER_POST; 171 + diag_buf_post_req.type = diag_buffer->type; 172 + diag_buf_post_req.address = le64_to_cpu(diag_buffer->dma_addr); 173 + diag_buf_post_req.length = le32_to_cpu(diag_buffer->size); 174 + 175 + dprint_bsg_info(mrioc, "%s: posting diag buffer type %d\n", __func__, 176 + diag_buffer->type); 177 + prev_status = diag_buffer->status; 178 + diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED; 179 + init_completion(&mrioc->init_cmds.done); 180 + retval = mpi3mr_admin_request_post(mrioc, &diag_buf_post_req, 181 + sizeof(diag_buf_post_req), 1); 182 + if (retval) { 183 + dprint_bsg_err(mrioc, "%s: admin request post failed\n", 184 + __func__); 185 + goto out_unlock; 186 + } 187 + wait_for_completion_timeout(&mrioc->init_cmds.done, 188 + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 189 + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 190 + mrioc->init_cmds.is_waiting = 0; 191 + dprint_bsg_err(mrioc, "%s: command timedout\n", __func__); 192 + mpi3mr_check_rh_fault_ioc(mrioc, 193 + MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT); 194 + retval = -1; 195 + goto out_unlock; 196 + } 197 + if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 198 + != MPI3_IOCSTATUS_SUCCESS) { 199 + dprint_bsg_err(mrioc, 200 + "%s: command failed, buffer_type (%d) ioc_status(0x%04x) log_info(0x%08x)\n", 201 + __func__, diag_buffer->type, 202 + (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 203 + mrioc->init_cmds.ioc_loginfo); 204 + retval = -1; 205 + goto out_unlock; 206 + } 207 + dprint_bsg_info(mrioc, "%s: diag buffer type %d posted successfully\n", 208 + __func__, diag_buffer->type); 209 + 210 + out_unlock: 211 + if (retval) 212 + diag_buffer->status = prev_status; 213 + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 214 + mutex_unlock(&mrioc->init_cmds.mutex); 215 + return retval; 216 + } 217 + 218 + /** 219 + * mpi3mr_post_diag_bufs - Post diag buffers to the controller 220 + * @mrioc: Adapter instance reference 221 + * 222 + * This function calls helper function to post both trace and 223 + * firmware buffers to the controller. 224 + * 225 + * Return: None 226 + */ 227 + int mpi3mr_post_diag_bufs(struct mpi3mr_ioc *mrioc) 228 + { 229 + u8 i; 230 + struct diag_buffer_desc *diag_buffer; 231 + 232 + for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { 233 + diag_buffer = &mrioc->diag_buffers[i]; 234 + if (!(diag_buffer->addr)) 235 + continue; 236 + if (mpi3mr_issue_diag_buf_post(mrioc, diag_buffer)) 237 + return -1; 238 + } 239 + return 0; 240 + } 241 + 242 + /** 243 + * mpi3mr_issue_diag_buf_release - Send diag buffer release req 244 + * @mrioc: Adapter instance reference 245 + * @diag_buffer: Diagnostic buffer descriptor 246 + * 247 + * Issue diagnostic buffer manage MPI request with release 248 + * action request through admin queue and wait for the 249 + * completion of it or time out. 250 + * 251 + * Return: 0 on success, non-zero on failures. 252 + */ 253 + int mpi3mr_issue_diag_buf_release(struct mpi3mr_ioc *mrioc, 254 + struct diag_buffer_desc *diag_buffer) 255 + { 256 + struct mpi3_diag_buffer_manage_request diag_buf_manage_req; 257 + int retval = 0; 258 + 259 + if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && 260 + (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) 261 + return retval; 262 + 263 + memset(&diag_buf_manage_req, 0, sizeof(diag_buf_manage_req)); 264 + mutex_lock(&mrioc->init_cmds.mutex); 265 + if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 266 + dprint_reset(mrioc, "%s: command is in use\n", __func__); 267 + mutex_unlock(&mrioc->init_cmds.mutex); 268 + return -1; 269 + } 270 + mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 271 + mrioc->init_cmds.is_waiting = 1; 272 + mrioc->init_cmds.callback = NULL; 273 + diag_buf_manage_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 274 + diag_buf_manage_req.function = MPI3_FUNCTION_DIAG_BUFFER_MANAGE; 275 + diag_buf_manage_req.type = diag_buffer->type; 276 + diag_buf_manage_req.action = MPI3_DIAG_BUFFER_ACTION_RELEASE; 277 + 278 + 279 + dprint_reset(mrioc, "%s: releasing diag buffer type %d\n", __func__, 280 + diag_buffer->type); 281 + init_completion(&mrioc->init_cmds.done); 282 + retval = mpi3mr_admin_request_post(mrioc, &diag_buf_manage_req, 283 + sizeof(diag_buf_manage_req), 1); 284 + if (retval) { 285 + dprint_reset(mrioc, "%s: admin request post failed\n", __func__); 286 + mpi3mr_set_trigger_data_in_hdb(diag_buffer, 287 + MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1); 288 + goto out_unlock; 289 + } 290 + wait_for_completion_timeout(&mrioc->init_cmds.done, 291 + (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 292 + if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 293 + mrioc->init_cmds.is_waiting = 0; 294 + dprint_reset(mrioc, "%s: command timedout\n", __func__); 295 + mpi3mr_check_rh_fault_ioc(mrioc, 296 + MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT); 297 + retval = -1; 298 + goto out_unlock; 299 + } 300 + if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 301 + != MPI3_IOCSTATUS_SUCCESS) { 302 + dprint_reset(mrioc, 303 + "%s: command failed, buffer_type (%d) ioc_status(0x%04x) log_info(0x%08x)\n", 304 + __func__, diag_buffer->type, 305 + (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 306 + mrioc->init_cmds.ioc_loginfo); 307 + retval = -1; 308 + goto out_unlock; 309 + } 310 + dprint_reset(mrioc, "%s: diag buffer type %d released successfully\n", 311 + __func__, diag_buffer->type); 312 + 313 + out_unlock: 314 + mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 315 + mutex_unlock(&mrioc->init_cmds.mutex); 316 + return retval; 317 + } 318 + 319 + /** 320 + * mpi3mr_process_trigger - Generic HDB Trigger handler 321 + * @mrioc: Adapter instance reference 322 + * @trigger_type: Trigger type 323 + * @trigger_data: Trigger data 324 + * @trigger_flags: Trigger flags 325 + * 326 + * This function checks validity of HDB, triggers and based on 327 + * trigger information, creates an event to be processed in the 328 + * firmware event worker thread . 329 + * 330 + * This function should be called with trigger spinlock held 331 + * 332 + * Return: Nothing 333 + */ 334 + static void mpi3mr_process_trigger(struct mpi3mr_ioc *mrioc, u8 trigger_type, 335 + union mpi3mr_trigger_data *trigger_data, u8 trigger_flags) 336 + { 337 + struct trigger_event_data event_data; 338 + struct diag_buffer_desc *trace_hdb = NULL; 339 + struct diag_buffer_desc *fw_hdb = NULL; 340 + u64 global_trigger; 341 + 342 + trace_hdb = mpi3mr_diag_buffer_for_type(mrioc, 343 + MPI3_DIAG_BUFFER_TYPE_TRACE); 344 + if (trace_hdb && 345 + (trace_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && 346 + (trace_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) 347 + trace_hdb = NULL; 348 + 349 + fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW); 350 + 351 + if (fw_hdb && 352 + (fw_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && 353 + (fw_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) 354 + fw_hdb = NULL; 355 + 356 + if (mrioc->snapdump_trigger_active || (mrioc->fw_release_trigger_active 357 + && mrioc->trace_release_trigger_active) || 358 + (!trace_hdb && !fw_hdb) || (!mrioc->driver_pg2) || 359 + ((trigger_type == MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) 360 + && (!mrioc->driver_pg2->num_triggers))) 361 + return; 362 + 363 + memset(&event_data, 0, sizeof(event_data)); 364 + event_data.trigger_type = trigger_type; 365 + memcpy(&event_data.trigger_specific_data, trigger_data, 366 + sizeof(*trigger_data)); 367 + global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger); 368 + 369 + if (global_trigger & MPI3_DRIVER2_GLOBALTRIGGER_SNAPDUMP_ENABLED) { 370 + event_data.snapdump = true; 371 + event_data.trace_hdb = trace_hdb; 372 + event_data.fw_hdb = fw_hdb; 373 + mrioc->snapdump_trigger_active = true; 374 + } else if (trigger_type == MPI3MR_HDB_TRIGGER_TYPE_GLOBAL) { 375 + if ((trace_hdb) && (global_trigger & 376 + MPI3_DRIVER2_GLOBALTRIGGER_DIAG_TRACE_RELEASE) && 377 + (!mrioc->trace_release_trigger_active)) { 378 + event_data.trace_hdb = trace_hdb; 379 + mrioc->trace_release_trigger_active = true; 380 + } 381 + if ((fw_hdb) && (global_trigger & 382 + MPI3_DRIVER2_GLOBALTRIGGER_DIAG_FW_RELEASE) && 383 + (!mrioc->fw_release_trigger_active)) { 384 + event_data.fw_hdb = fw_hdb; 385 + mrioc->fw_release_trigger_active = true; 386 + } 387 + } else if (trigger_type == MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) { 388 + if ((trace_hdb) && (trigger_flags & 389 + MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_TRACE_RELEASE) && 390 + (!mrioc->trace_release_trigger_active)) { 391 + event_data.trace_hdb = trace_hdb; 392 + mrioc->trace_release_trigger_active = true; 393 + } 394 + if ((fw_hdb) && (trigger_flags & 395 + MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE) && 396 + (!mrioc->fw_release_trigger_active)) { 397 + event_data.fw_hdb = fw_hdb; 398 + mrioc->fw_release_trigger_active = true; 399 + } 400 + } 401 + 402 + if (event_data.trace_hdb || event_data.fw_hdb) 403 + mpi3mr_hdb_trigger_data_event(mrioc, &event_data); 404 + } 405 + 406 + /** 407 + * mpi3mr_global_trigger - Global HDB trigger handler 408 + * @mrioc: Adapter instance reference 409 + * @trigger_data: Trigger data 410 + * 411 + * This function checks whether the given global trigger is 412 + * enabled in the driver page 2 and if so calls generic trigger 413 + * handler to queue event for HDB release. 414 + * 415 + * Return: Nothing 416 + */ 417 + void mpi3mr_global_trigger(struct mpi3mr_ioc *mrioc, u64 trigger_data) 418 + { 419 + unsigned long flags; 420 + union mpi3mr_trigger_data trigger_specific_data; 421 + 422 + spin_lock_irqsave(&mrioc->trigger_lock, flags); 423 + if (le64_to_cpu(mrioc->driver_pg2->global_trigger) & trigger_data) { 424 + memset(&trigger_specific_data, 0, 425 + sizeof(trigger_specific_data)); 426 + trigger_specific_data.global = trigger_data; 427 + mpi3mr_process_trigger(mrioc, MPI3MR_HDB_TRIGGER_TYPE_GLOBAL, 428 + &trigger_specific_data, 0); 429 + } 430 + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 431 + } 432 + 433 + /** 434 + * mpi3mr_scsisense_trigger - SCSI sense HDB trigger handler 435 + * @mrioc: Adapter instance reference 436 + * @sensekey: Sense Key 437 + * @asc: Additional Sense Code 438 + * @ascq: Additional Sense Code Qualifier 439 + * 440 + * This function compares SCSI sense trigger values with driver 441 + * page 2 values and calls generic trigger handler to release 442 + * HDBs if match found 443 + * 444 + * Return: Nothing 445 + */ 446 + void mpi3mr_scsisense_trigger(struct mpi3mr_ioc *mrioc, u8 sensekey, u8 asc, 447 + u8 ascq) 448 + { 449 + struct mpi3_driver2_trigger_scsi_sense *scsi_sense_trigger = NULL; 450 + u64 i = 0; 451 + unsigned long flags; 452 + u8 num_triggers, trigger_flags; 453 + 454 + if (mrioc->scsisense_trigger_present) { 455 + spin_lock_irqsave(&mrioc->trigger_lock, flags); 456 + scsi_sense_trigger = (struct mpi3_driver2_trigger_scsi_sense *) 457 + mrioc->driver_pg2->trigger; 458 + num_triggers = mrioc->driver_pg2->num_triggers; 459 + for (i = 0; i < num_triggers; i++, scsi_sense_trigger++) { 460 + if (scsi_sense_trigger->type != 461 + MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE) 462 + continue; 463 + if (!(scsi_sense_trigger->sense_key == 464 + MPI3_DRIVER2_TRIGGER_SCSI_SENSE_SENSE_KEY_MATCH_ALL 465 + || scsi_sense_trigger->sense_key == sensekey)) 466 + continue; 467 + if (!(scsi_sense_trigger->asc == 468 + MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASC_MATCH_ALL || 469 + scsi_sense_trigger->asc == asc)) 470 + continue; 471 + if (!(scsi_sense_trigger->ascq == 472 + MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASCQ_MATCH_ALL || 473 + scsi_sense_trigger->ascq == ascq)) 474 + continue; 475 + trigger_flags = scsi_sense_trigger->flags; 476 + mpi3mr_process_trigger(mrioc, 477 + MPI3MR_HDB_TRIGGER_TYPE_ELEMENT, 478 + (union mpi3mr_trigger_data *)scsi_sense_trigger, 479 + trigger_flags); 480 + break; 481 + } 482 + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 483 + } 484 + } 485 + 486 + /** 487 + * mpi3mr_event_trigger - MPI event HDB trigger handler 488 + * @mrioc: Adapter instance reference 489 + * @event: MPI Event 490 + * 491 + * This function compares event trigger values with driver page 492 + * 2 values and calls generic trigger handler to release 493 + * HDBs if match found. 494 + * 495 + * Return: Nothing 496 + */ 497 + void mpi3mr_event_trigger(struct mpi3mr_ioc *mrioc, u8 event) 498 + { 499 + struct mpi3_driver2_trigger_event *event_trigger = NULL; 500 + u64 i = 0; 501 + unsigned long flags; 502 + u8 num_triggers, trigger_flags; 503 + 504 + if (mrioc->event_trigger_present) { 505 + spin_lock_irqsave(&mrioc->trigger_lock, flags); 506 + event_trigger = (struct mpi3_driver2_trigger_event *) 507 + mrioc->driver_pg2->trigger; 508 + num_triggers = mrioc->driver_pg2->num_triggers; 509 + 510 + for (i = 0; i < num_triggers; i++, event_trigger++) { 511 + if (event_trigger->type != 512 + MPI3_DRIVER2_TRIGGER_TYPE_EVENT) 513 + continue; 514 + if (event_trigger->event != event) 515 + continue; 516 + trigger_flags = event_trigger->flags; 517 + mpi3mr_process_trigger(mrioc, 518 + MPI3MR_HDB_TRIGGER_TYPE_ELEMENT, 519 + (union mpi3mr_trigger_data *)event_trigger, 520 + trigger_flags); 521 + break; 522 + } 523 + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 524 + } 525 + } 526 + 527 + /** 528 + * mpi3mr_reply_trigger - MPI Reply HDB trigger handler 529 + * @mrioc: Adapter instance reference 530 + * @ioc_status: Masked value of IOC Status from MPI Reply 531 + * @ioc_loginfo: IOC Log Info from MPI Reply 532 + * 533 + * This function compares IOC status and IOC log info trigger 534 + * values with driver page 2 values and calls generic trigger 535 + * handler to release HDBs if match found. 536 + * 537 + * Return: Nothing 538 + */ 539 + void mpi3mr_reply_trigger(struct mpi3mr_ioc *mrioc, u16 ioc_status, 540 + u32 ioc_loginfo) 541 + { 542 + struct mpi3_driver2_trigger_reply *reply_trigger = NULL; 543 + u64 i = 0; 544 + unsigned long flags; 545 + u8 num_triggers, trigger_flags; 546 + 547 + if (mrioc->reply_trigger_present) { 548 + spin_lock_irqsave(&mrioc->trigger_lock, flags); 549 + reply_trigger = (struct mpi3_driver2_trigger_reply *) 550 + mrioc->driver_pg2->trigger; 551 + num_triggers = mrioc->driver_pg2->num_triggers; 552 + for (i = 0; i < num_triggers; i++, reply_trigger++) { 553 + if (reply_trigger->type != 554 + MPI3_DRIVER2_TRIGGER_TYPE_REPLY) 555 + continue; 556 + if ((le16_to_cpu(reply_trigger->ioc_status) != 557 + ioc_status) 558 + && (le16_to_cpu(reply_trigger->ioc_status) != 559 + MPI3_DRIVER2_TRIGGER_REPLY_IOCSTATUS_MATCH_ALL)) 560 + continue; 561 + if ((le32_to_cpu(reply_trigger->ioc_log_info) != 562 + (le32_to_cpu(reply_trigger->ioc_log_info_mask) & 563 + ioc_loginfo))) 564 + continue; 565 + trigger_flags = reply_trigger->flags; 566 + mpi3mr_process_trigger(mrioc, 567 + MPI3MR_HDB_TRIGGER_TYPE_ELEMENT, 568 + (union mpi3mr_trigger_data *)reply_trigger, 569 + trigger_flags); 570 + break; 571 + } 572 + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 573 + } 574 + } 575 + 576 + /** 577 + * mpi3mr_get_num_trigger - Gets number of HDB triggers 578 + * @mrioc: Adapter instance reference 579 + * @num_triggers: Number of triggers 580 + * @page_action: Page action 581 + * 582 + * This function reads number of triggers by reading driver page 583 + * 2 584 + * 585 + * Return: 0 on success and proper error codes on failure 586 + */ 587 + static int mpi3mr_get_num_trigger(struct mpi3mr_ioc *mrioc, u8 *num_triggers, 588 + u8 page_action) 589 + { 590 + struct mpi3_driver_page2 drvr_page2; 591 + int retval = 0; 592 + 593 + *num_triggers = 0; 594 + 595 + retval = mpi3mr_cfg_get_driver_pg2(mrioc, &drvr_page2, 596 + sizeof(struct mpi3_driver_page2), page_action); 597 + 598 + if (retval) { 599 + dprint_init(mrioc, "%s: driver page 2 read failed\n", __func__); 600 + return retval; 601 + } 602 + *num_triggers = drvr_page2.num_triggers; 603 + return retval; 604 + } 605 + 606 + /** 607 + * mpi3mr_refresh_trigger - Handler for Refresh trigger BSG 608 + * @mrioc: Adapter instance reference 609 + * @page_action: Page action 610 + * 611 + * This function caches the driver page 2 in the driver's memory 612 + * by reading driver page 2 from the controller for a given page 613 + * type and updates the HDB trigger values 614 + * 615 + * Return: 0 on success and proper error codes on failure 616 + */ 617 + int mpi3mr_refresh_trigger(struct mpi3mr_ioc *mrioc, u8 page_action) 618 + { 619 + u16 pg_sz = sizeof(struct mpi3_driver_page2); 620 + struct mpi3_driver_page2 *drvr_page2 = NULL; 621 + u8 trigger_type, num_triggers; 622 + int retval; 623 + int i = 0; 624 + unsigned long flags; 625 + 626 + retval = mpi3mr_get_num_trigger(mrioc, &num_triggers, page_action); 627 + 628 + if (retval) 629 + goto out; 630 + 631 + pg_sz = offsetof(struct mpi3_driver_page2, trigger) + 632 + (num_triggers * sizeof(union mpi3_driver2_trigger_element)); 633 + drvr_page2 = kzalloc(pg_sz, GFP_KERNEL); 634 + if (!drvr_page2) { 635 + retval = -ENOMEM; 636 + goto out; 637 + } 638 + 639 + retval = mpi3mr_cfg_get_driver_pg2(mrioc, drvr_page2, pg_sz, page_action); 640 + if (retval) { 641 + dprint_init(mrioc, "%s: driver page 2 read failed\n", __func__); 642 + kfree(drvr_page2); 643 + goto out; 644 + } 645 + spin_lock_irqsave(&mrioc->trigger_lock, flags); 646 + kfree(mrioc->driver_pg2); 647 + mrioc->driver_pg2 = drvr_page2; 648 + mrioc->reply_trigger_present = false; 649 + mrioc->event_trigger_present = false; 650 + mrioc->scsisense_trigger_present = false; 651 + 652 + for (i = 0; (i < mrioc->driver_pg2->num_triggers); i++) { 653 + trigger_type = mrioc->driver_pg2->trigger[i].event.type; 654 + switch (trigger_type) { 655 + case MPI3_DRIVER2_TRIGGER_TYPE_REPLY: 656 + mrioc->reply_trigger_present = true; 657 + break; 658 + case MPI3_DRIVER2_TRIGGER_TYPE_EVENT: 659 + mrioc->event_trigger_present = true; 660 + break; 661 + case MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE: 662 + mrioc->scsisense_trigger_present = true; 663 + break; 664 + default: 665 + break; 666 + } 667 + } 668 + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 669 + out: 670 + return retval; 671 + } 672 + 673 + /** 674 + * mpi3mr_release_diag_bufs - Release diag buffers 675 + * @mrioc: Adapter instance reference 676 + * @skip_rel_action: Skip release action and set buffer state 677 + * 678 + * This function calls helper function to release both trace and 679 + * firmware buffers from the controller. 680 + * 681 + * Return: None 682 + */ 683 + void mpi3mr_release_diag_bufs(struct mpi3mr_ioc *mrioc, u8 skip_rel_action) 684 + { 685 + u8 i; 686 + struct diag_buffer_desc *diag_buffer; 687 + 688 + for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { 689 + diag_buffer = &mrioc->diag_buffers[i]; 690 + if (!(diag_buffer->addr)) 691 + continue; 692 + if (diag_buffer->status == MPI3MR_HDB_BUFSTATUS_RELEASED) 693 + continue; 694 + if (!skip_rel_action) 695 + mpi3mr_issue_diag_buf_release(mrioc, diag_buffer); 696 + diag_buffer->status = MPI3MR_HDB_BUFSTATUS_RELEASED; 697 + atomic64_inc(&event_counter); 698 + } 699 + } 700 + 701 + /** 702 + * mpi3mr_set_trigger_data_in_hdb - Updates HDB trigger type and 703 + * trigger data 704 + * 705 + * @hdb: HDB pointer 706 + * @type: Trigger type 707 + * @data: Trigger data 708 + * @force: Trigger overwrite flag 709 + * @trigger_data: Pointer to trigger data information 710 + * 711 + * Updates trigger type and trigger data based on parameter 712 + * passed to this function 713 + * 714 + * Return: Nothing 715 + */ 716 + void mpi3mr_set_trigger_data_in_hdb(struct diag_buffer_desc *hdb, 717 + u8 type, union mpi3mr_trigger_data *trigger_data, bool force) 718 + { 719 + if ((!force) && (hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN)) 720 + return; 721 + hdb->trigger_type = type; 722 + if (!trigger_data) 723 + memset(&hdb->trigger_data, 0, sizeof(*trigger_data)); 724 + else 725 + memcpy(&hdb->trigger_data, trigger_data, sizeof(*trigger_data)); 726 + } 727 + 728 + /** 729 + * mpi3mr_set_trigger_data_in_all_hdb - Updates HDB trigger type 730 + * and trigger data for all HDB 731 + * 732 + * @mrioc: Adapter instance reference 733 + * @type: Trigger type 734 + * @data: Trigger data 735 + * @force: Trigger overwrite flag 736 + * @trigger_data: Pointer to trigger data information 737 + * 738 + * Updates trigger type and trigger data based on parameter 739 + * passed to this function 740 + * 741 + * Return: Nothing 742 + */ 743 + void mpi3mr_set_trigger_data_in_all_hdb(struct mpi3mr_ioc *mrioc, 744 + u8 type, union mpi3mr_trigger_data *trigger_data, bool force) 745 + { 746 + struct diag_buffer_desc *hdb = NULL; 747 + 748 + hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_TRACE); 749 + if (hdb) 750 + mpi3mr_set_trigger_data_in_hdb(hdb, type, trigger_data, force); 751 + hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW); 752 + if (hdb) 753 + mpi3mr_set_trigger_data_in_hdb(hdb, type, trigger_data, force); 754 + } 755 + 756 + /** 757 + * mpi3mr_hdbstatuschg_evt_th - HDB status change evt tophalf 758 + * @mrioc: Adapter instance reference 759 + * @event_reply: event data 760 + * 761 + * Modifies the status of the applicable diag buffer descriptors 762 + * 763 + * Return: Nothing 764 + */ 765 + void mpi3mr_hdbstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 766 + struct mpi3_event_notification_reply *event_reply) 767 + { 768 + struct mpi3_event_data_diag_buffer_status_change *evtdata; 769 + struct diag_buffer_desc *diag_buffer; 770 + 771 + evtdata = (struct mpi3_event_data_diag_buffer_status_change *) 772 + event_reply->event_data; 773 + 774 + diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, evtdata->type); 775 + if (!diag_buffer) 776 + return; 777 + if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) && 778 + (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) 779 + return; 780 + switch (evtdata->reason_code) { 781 + case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED: 782 + { 783 + diag_buffer->status = MPI3MR_HDB_BUFSTATUS_RELEASED; 784 + mpi3mr_set_trigger_data_in_hdb(diag_buffer, 785 + MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0); 786 + atomic64_inc(&event_counter); 787 + break; 788 + } 789 + case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED: 790 + { 791 + diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED; 792 + break; 793 + } 794 + case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED: 795 + { 796 + diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED; 797 + break; 798 + } 799 + default: 800 + dprint_event_th(mrioc, "%s: unknown reason_code(%d)\n", 801 + __func__, evtdata->reason_code); 802 + break; 803 + } 804 + } 805 + 806 + /** 807 + * mpi3mr_diag_buffer_for_type - returns buffer desc for type 808 + * @mrioc: Adapter instance reference 809 + * @buf_type: Diagnostic buffer type 810 + * 811 + * Identifies matching diag descriptor from mrioc for given diag 812 + * buffer type. 813 + * 814 + * Return: diag buffer descriptor on success, NULL on failures. 815 + */ 816 + 817 + struct diag_buffer_desc * 818 + mpi3mr_diag_buffer_for_type(struct mpi3mr_ioc *mrioc, u8 buf_type) 819 + { 820 + u8 i; 821 + 822 + for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { 823 + if (mrioc->diag_buffers[i].type == buf_type) 824 + return &mrioc->diag_buffers[i]; 825 + } 826 + return NULL; 827 + } 828 + 829 + /** 15 830 * mpi3mr_bsg_pel_abort - sends PEL abort request 16 831 * @mrioc: Adapter instance reference 17 832 * ··· 846 31 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 847 32 return -1; 848 33 } 849 - if (mrioc->stop_bsgs) { 34 + if (mrioc->stop_bsgs || mrioc->block_on_pci_err) { 850 35 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); 851 36 return -1; 852 37 } ··· 939 124 spin_unlock(&mrioc_list_lock); 940 125 return NULL; 941 126 } 127 + 128 + /** 129 + * mpi3mr_bsg_refresh_hdb_triggers - Refresh HDB trigger data 130 + * @mrioc: Adapter instance reference 131 + * @job: BSG Job pointer 132 + * 133 + * This function reads the controller trigger config page as 134 + * defined by the input page type and refreshes the driver's 135 + * local trigger information structures with the controller's 136 + * config page data. 137 + * 138 + * Return: 0 on success and proper error codes on failure 139 + */ 140 + static long 141 + mpi3mr_bsg_refresh_hdb_triggers(struct mpi3mr_ioc *mrioc, 142 + struct bsg_job *job) 143 + { 144 + struct mpi3mr_bsg_out_refresh_hdb_triggers refresh_triggers; 145 + uint32_t data_out_sz; 146 + u8 page_action; 147 + long rval = -EINVAL; 148 + 149 + data_out_sz = job->request_payload.payload_len; 150 + 151 + if (data_out_sz != sizeof(refresh_triggers)) { 152 + dprint_bsg_err(mrioc, "%s: invalid size argument\n", 153 + __func__); 154 + return rval; 155 + } 156 + 157 + if (mrioc->unrecoverable) { 158 + dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", 159 + __func__); 160 + return -EFAULT; 161 + } 162 + if (mrioc->reset_in_progress) { 163 + dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 164 + return -EAGAIN; 165 + } 166 + 167 + sg_copy_to_buffer(job->request_payload.sg_list, 168 + job->request_payload.sg_cnt, 169 + &refresh_triggers, sizeof(refresh_triggers)); 170 + 171 + switch (refresh_triggers.page_type) { 172 + case MPI3MR_HDB_REFRESH_TYPE_CURRENT: 173 + page_action = MPI3_CONFIG_ACTION_READ_CURRENT; 174 + break; 175 + case MPI3MR_HDB_REFRESH_TYPE_DEFAULT: 176 + page_action = MPI3_CONFIG_ACTION_READ_DEFAULT; 177 + break; 178 + case MPI3MR_HDB_HDB_REFRESH_TYPE_PERSISTENT: 179 + page_action = MPI3_CONFIG_ACTION_READ_PERSISTENT; 180 + break; 181 + default: 182 + dprint_bsg_err(mrioc, 183 + "%s: unsupported refresh trigger, page_type %d\n", 184 + __func__, refresh_triggers.page_type); 185 + return rval; 186 + } 187 + rval = mpi3mr_refresh_trigger(mrioc, page_action); 188 + 189 + return rval; 190 + } 191 + 192 + /** 193 + * mpi3mr_bsg_upload_hdb - Upload a specific HDB to user space 194 + * @mrioc: Adapter instance reference 195 + * @job: BSG Job pointer 196 + * 197 + * Return: 0 on success and proper error codes on failure 198 + */ 199 + static long mpi3mr_bsg_upload_hdb(struct mpi3mr_ioc *mrioc, 200 + struct bsg_job *job) 201 + { 202 + struct mpi3mr_bsg_out_upload_hdb upload_hdb; 203 + struct diag_buffer_desc *diag_buffer; 204 + uint32_t data_out_size; 205 + uint32_t data_in_size; 206 + 207 + data_out_size = job->request_payload.payload_len; 208 + data_in_size = job->reply_payload.payload_len; 209 + 210 + if (data_out_size != sizeof(upload_hdb)) { 211 + dprint_bsg_err(mrioc, "%s: invalid size argument\n", 212 + __func__); 213 + return -EINVAL; 214 + } 215 + 216 + sg_copy_to_buffer(job->request_payload.sg_list, 217 + job->request_payload.sg_cnt, 218 + &upload_hdb, sizeof(upload_hdb)); 219 + 220 + if ((!upload_hdb.length) || (data_in_size != upload_hdb.length)) { 221 + dprint_bsg_err(mrioc, "%s: invalid length argument\n", 222 + __func__); 223 + return -EINVAL; 224 + } 225 + diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, upload_hdb.buf_type); 226 + if ((!diag_buffer) || (!diag_buffer->addr)) { 227 + dprint_bsg_err(mrioc, "%s: invalid buffer type %d\n", 228 + __func__, upload_hdb.buf_type); 229 + return -EINVAL; 230 + } 231 + 232 + if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_RELEASED) && 233 + (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) { 234 + dprint_bsg_err(mrioc, 235 + "%s: invalid buffer status %d for type %d\n", 236 + __func__, diag_buffer->status, upload_hdb.buf_type); 237 + return -EINVAL; 238 + } 239 + 240 + if ((upload_hdb.start_offset + upload_hdb.length) > diag_buffer->size) { 241 + dprint_bsg_err(mrioc, 242 + "%s: invalid start offset %d, length %d for type %d\n", 243 + __func__, upload_hdb.start_offset, upload_hdb.length, 244 + upload_hdb.buf_type); 245 + return -EINVAL; 246 + } 247 + sg_copy_from_buffer(job->reply_payload.sg_list, 248 + job->reply_payload.sg_cnt, 249 + (diag_buffer->addr + upload_hdb.start_offset), 250 + data_in_size); 251 + return 0; 252 + } 253 + 254 + /** 255 + * mpi3mr_bsg_repost_hdb - Re-post HDB 256 + * @mrioc: Adapter instance reference 257 + * @job: BSG job pointer 258 + * 259 + * This function retrieves the HDB descriptor corresponding to a 260 + * given buffer type and if the HDB is in released status then 261 + * posts the HDB with the firmware. 262 + * 263 + * Return: 0 on success and proper error codes on failure 264 + */ 265 + static long mpi3mr_bsg_repost_hdb(struct mpi3mr_ioc *mrioc, 266 + struct bsg_job *job) 267 + { 268 + struct mpi3mr_bsg_out_repost_hdb repost_hdb; 269 + struct diag_buffer_desc *diag_buffer; 270 + uint32_t data_out_sz; 271 + 272 + data_out_sz = job->request_payload.payload_len; 273 + 274 + if (data_out_sz != sizeof(repost_hdb)) { 275 + dprint_bsg_err(mrioc, "%s: invalid size argument\n", 276 + __func__); 277 + return -EINVAL; 278 + } 279 + if (mrioc->unrecoverable) { 280 + dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", 281 + __func__); 282 + return -EFAULT; 283 + } 284 + if (mrioc->reset_in_progress) { 285 + dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 286 + return -EAGAIN; 287 + } 288 + 289 + sg_copy_to_buffer(job->request_payload.sg_list, 290 + job->request_payload.sg_cnt, 291 + &repost_hdb, sizeof(repost_hdb)); 292 + 293 + diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, repost_hdb.buf_type); 294 + if ((!diag_buffer) || (!diag_buffer->addr)) { 295 + dprint_bsg_err(mrioc, "%s: invalid buffer type %d\n", 296 + __func__, repost_hdb.buf_type); 297 + return -EINVAL; 298 + } 299 + 300 + if (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_RELEASED) { 301 + dprint_bsg_err(mrioc, 302 + "%s: invalid buffer status %d for type %d\n", 303 + __func__, diag_buffer->status, repost_hdb.buf_type); 304 + return -EINVAL; 305 + } 306 + 307 + if (mpi3mr_issue_diag_buf_post(mrioc, diag_buffer)) { 308 + dprint_bsg_err(mrioc, "%s: post failed for type %d\n", 309 + __func__, repost_hdb.buf_type); 310 + return -EFAULT; 311 + } 312 + mpi3mr_set_trigger_data_in_hdb(diag_buffer, 313 + MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1); 314 + 315 + return 0; 316 + } 317 + 318 + /** 319 + * mpi3mr_bsg_query_hdb - Handler for query HDB command 320 + * @mrioc: Adapter instance reference 321 + * @job: BSG job pointer 322 + * 323 + * This function prepares and copies the host diagnostic buffer 324 + * entries to the user buffer. 325 + * 326 + * Return: 0 on success and proper error codes on failure 327 + */ 328 + static long mpi3mr_bsg_query_hdb(struct mpi3mr_ioc *mrioc, 329 + struct bsg_job *job) 330 + { 331 + long rval = 0; 332 + struct mpi3mr_bsg_in_hdb_status *hbd_status; 333 + struct mpi3mr_hdb_entry *hbd_status_entry; 334 + u32 length, min_length; 335 + u8 i; 336 + struct diag_buffer_desc *diag_buffer; 337 + uint32_t data_in_sz = 0; 338 + 339 + data_in_sz = job->request_payload.payload_len; 340 + 341 + length = (sizeof(*hbd_status) + ((MPI3MR_MAX_NUM_HDB - 1) * 342 + sizeof(*hbd_status_entry))); 343 + hbd_status = kmalloc(length, GFP_KERNEL); 344 + if (!hbd_status) 345 + return -ENOMEM; 346 + hbd_status_entry = &hbd_status->entry[0]; 347 + 348 + hbd_status->num_hdb_types = MPI3MR_MAX_NUM_HDB; 349 + for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { 350 + diag_buffer = &mrioc->diag_buffers[i]; 351 + hbd_status_entry->buf_type = diag_buffer->type; 352 + hbd_status_entry->status = diag_buffer->status; 353 + hbd_status_entry->trigger_type = diag_buffer->trigger_type; 354 + memcpy(&hbd_status_entry->trigger_data, 355 + &diag_buffer->trigger_data, 356 + sizeof(hbd_status_entry->trigger_data)); 357 + hbd_status_entry->size = (diag_buffer->size / 1024); 358 + hbd_status_entry++; 359 + } 360 + hbd_status->element_trigger_format = 361 + MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_DATA; 362 + 363 + if (data_in_sz < 4) { 364 + dprint_bsg_err(mrioc, "%s: invalid size passed\n", __func__); 365 + rval = -EINVAL; 366 + goto out; 367 + } 368 + min_length = min(data_in_sz, length); 369 + if (job->request_payload.payload_len >= min_length) { 370 + sg_copy_from_buffer(job->request_payload.sg_list, 371 + job->request_payload.sg_cnt, 372 + hbd_status, min_length); 373 + rval = 0; 374 + } 375 + out: 376 + kfree(hbd_status); 377 + return rval; 378 + } 379 + 942 380 943 381 /** 944 382 * mpi3mr_enable_logdata - Handler for log data enable ··· 1492 424 goto out; 1493 425 } 1494 426 427 + if (mrioc->unrecoverable || mrioc->block_on_pci_err) 428 + return -EINVAL; 429 + 1495 430 sg_copy_to_buffer(job->request_payload.sg_list, 1496 431 job->request_payload.sg_cnt, 1497 432 &adpreset, sizeof(adpreset)); ··· 1623 552 break; 1624 553 case MPI3MR_DRVBSG_OPCODE_PELENABLE: 1625 554 rval = mpi3mr_bsg_pel_enable(mrioc, job); 555 + break; 556 + case MPI3MR_DRVBSG_OPCODE_QUERY_HDB: 557 + rval = mpi3mr_bsg_query_hdb(mrioc, job); 558 + break; 559 + case MPI3MR_DRVBSG_OPCODE_REPOST_HDB: 560 + rval = mpi3mr_bsg_repost_hdb(mrioc, job); 561 + break; 562 + case MPI3MR_DRVBSG_OPCODE_UPLOAD_HDB: 563 + rval = mpi3mr_bsg_upload_hdb(mrioc, job); 564 + break; 565 + case MPI3MR_DRVBSG_OPCODE_REFRESH_HDB_TRIGGERS: 566 + rval = mpi3mr_bsg_refresh_hdb_triggers(mrioc, job); 1626 567 break; 1627 568 case MPI3MR_DRVBSG_OPCODE_UNKNOWN: 1628 569 default: ··· 2578 1495 mutex_unlock(&mrioc->bsg_cmds.mutex); 2579 1496 goto out; 2580 1497 } 2581 - if (mrioc->stop_bsgs) { 1498 + if (mrioc->stop_bsgs || mrioc->block_on_pci_err) { 2582 1499 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); 2583 1500 rval = -EAGAIN; 2584 1501 mutex_unlock(&mrioc->bsg_cmds.mutex); ··· 3111 2028 ioc_state = mpi3mr_get_iocstate(mrioc); 3112 2029 if (ioc_state == MRIOC_STATE_UNRECOVERABLE) 3113 2030 adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE; 3114 - else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs)) 2031 + else if (mrioc->reset_in_progress || mrioc->stop_bsgs || 2032 + mrioc->block_on_pci_err) 3115 2033 adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET; 3116 2034 else if (ioc_state == MRIOC_STATE_FAULT) 3117 2035 adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
+281 -13
drivers/scsi/mpi3mr/mpi3mr_fw.c
··· 274 274 case MPI3_EVENT_PREPARE_FOR_RESET: 275 275 desc = "Prepare For Reset"; 276 276 break; 277 + case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE: 278 + desc = "Diagnostic Buffer Status Change"; 279 + break; 277 280 } 278 281 279 282 if (!desc) ··· 345 342 { 346 343 u16 reply_desc_type, host_tag = 0; 347 344 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 348 - u32 ioc_loginfo = 0; 345 + u32 ioc_loginfo = 0, sense_count = 0; 349 346 struct mpi3_status_reply_descriptor *status_desc; 350 347 struct mpi3_address_reply_descriptor *addr_desc; 351 348 struct mpi3_success_reply_descriptor *success_desc; 352 349 struct mpi3_default_reply *def_reply = NULL; 353 350 struct mpi3mr_drv_cmd *cmdptr = NULL; 354 351 struct mpi3_scsi_io_reply *scsi_reply; 352 + struct scsi_sense_hdr sshdr; 355 353 u8 *sense_buf = NULL; 356 354 357 355 *reply_dma = 0; ··· 367 363 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 368 364 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 369 365 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 366 + mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); 370 367 break; 371 368 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 372 369 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; ··· 385 380 scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; 386 381 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 387 382 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 383 + sense_count = le32_to_cpu(scsi_reply->sense_count); 384 + if (sense_buf) { 385 + scsi_normalize_sense(sense_buf, sense_count, 386 + &sshdr); 387 + mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key, 388 + sshdr.asc, sshdr.ascq); 389 + } 388 390 } 391 + mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); 389 392 break; 390 393 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 391 394 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; ··· 608 595 mrioc = (struct mpi3mr_ioc *)shost->hostdata; 609 596 610 597 if ((mrioc->reset_in_progress || mrioc->prepare_for_reset || 611 - mrioc->unrecoverable)) 598 + mrioc->unrecoverable || mrioc->pci_err_recovery)) 612 599 return 0; 613 600 614 601 num_entries = mpi3mr_process_op_reply_q(mrioc, ··· 951 938 }, 952 939 { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" }, 953 940 { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" }, 941 + { 942 + MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT, 943 + "diagnostic buffer post timeout" 944 + }, 945 + { 946 + MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT, 947 + "diagnostic buffer release timeout" 948 + }, 954 949 { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" }, 955 950 { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"}, 956 951 { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" }, ··· 1693 1672 retval = -EAGAIN; 1694 1673 goto out; 1695 1674 } 1675 + if (mrioc->pci_err_recovery) { 1676 + ioc_err(mrioc, "admin request queue submission failed due to pci error recovery in progress\n"); 1677 + retval = -EAGAIN; 1678 + goto out; 1679 + } 1680 + 1696 1681 areq_entry = (u8 *)mrioc->admin_req_base + 1697 1682 (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ); 1698 1683 memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); ··· 2369 2342 retval = -EAGAIN; 2370 2343 goto out; 2371 2344 } 2345 + if (mrioc->pci_err_recovery) { 2346 + ioc_err(mrioc, "operational request queue submission failed due to pci error recovery in progress\n"); 2347 + retval = -EAGAIN; 2348 + goto out; 2349 + } 2372 2350 2373 2351 segment_base_addr = segments[pi / op_req_q->segment_qd].segment; 2374 2352 req_entry = (u8 *)segment_base_addr + ··· 2419 2387 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code) 2420 2388 { 2421 2389 u32 ioc_status, host_diagnostic, timeout; 2390 + union mpi3mr_trigger_data trigger_data; 2422 2391 2423 2392 if (mrioc->unrecoverable) { 2424 2393 ioc_err(mrioc, "controller is unrecoverable\n"); ··· 2431 2398 ioc_err(mrioc, "controller is not present\n"); 2432 2399 return; 2433 2400 } 2434 - 2401 + memset(&trigger_data, 0, sizeof(trigger_data)); 2435 2402 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2436 - if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 2437 - (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 2403 + 2404 + if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { 2405 + mpi3mr_set_trigger_data_in_all_hdb(mrioc, 2406 + MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0); 2407 + return; 2408 + } else if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 2409 + trigger_data.fault = (readl(&mrioc->sysif_regs->fault) & 2410 + MPI3_SYSIF_FAULT_CODE_MASK); 2411 + 2412 + mpi3mr_set_trigger_data_in_all_hdb(mrioc, 2413 + MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0); 2438 2414 mpi3mr_print_fault_info(mrioc); 2439 2415 return; 2440 2416 } 2417 + 2441 2418 mpi3mr_set_diagsave(mrioc); 2442 2419 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 2443 2420 reason_code); 2421 + trigger_data.fault = (readl(&mrioc->sysif_regs->fault) & 2422 + MPI3_SYSIF_FAULT_CODE_MASK); 2423 + mpi3mr_set_trigger_data_in_all_hdb(mrioc, MPI3MR_HDB_TRIGGER_TYPE_FAULT, 2424 + &trigger_data, 0); 2444 2425 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 2445 2426 do { 2446 2427 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); ··· 2634 2587 container_of(work, struct mpi3mr_ioc, watchdog_work.work); 2635 2588 unsigned long flags; 2636 2589 enum mpi3mr_iocstate ioc_state; 2637 - u32 fault, host_diagnostic, ioc_status; 2590 + u32 host_diagnostic, ioc_status; 2591 + union mpi3mr_trigger_data trigger_data; 2638 2592 u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH; 2639 2593 2640 - if (mrioc->reset_in_progress) 2594 + if (mrioc->reset_in_progress || mrioc->pci_err_recovery) 2641 2595 return; 2642 2596 2643 2597 if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) { ··· 2666 2618 return; 2667 2619 } 2668 2620 2621 + memset(&trigger_data, 0, sizeof(trigger_data)); 2669 2622 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2670 2623 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { 2624 + mpi3mr_set_trigger_data_in_all_hdb(mrioc, 2625 + MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0); 2671 2626 mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0); 2672 2627 return; 2673 2628 } ··· 2680 2629 if (ioc_state != MRIOC_STATE_FAULT) 2681 2630 goto schedule_work; 2682 2631 2683 - fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; 2632 + trigger_data.fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; 2633 + mpi3mr_set_trigger_data_in_all_hdb(mrioc, 2634 + MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0); 2684 2635 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2685 2636 if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) { 2686 2637 if (!mrioc->diagsave_timeout) { ··· 2696 2643 mpi3mr_print_fault_info(mrioc); 2697 2644 mrioc->diagsave_timeout = 0; 2698 2645 2699 - switch (fault) { 2646 + switch (trigger_data.fault) { 2700 2647 case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED: 2701 2648 case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED: 2702 2649 ioc_warn(mrioc, ··· 3056 3003 mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift; 3057 3004 mrioc->facts.shutdown_timeout = 3058 3005 le16_to_cpu(facts_data->shutdown_timeout); 3059 - 3006 + mrioc->facts.diag_trace_sz = 3007 + le32_to_cpu(facts_data->diag_trace_size); 3008 + mrioc->facts.diag_fw_sz = 3009 + le32_to_cpu(facts_data->diag_fw_size); 3010 + mrioc->facts.diag_drvr_sz = le32_to_cpu(facts_data->diag_driver_size); 3060 3011 mrioc->facts.max_dev_per_tg = 3061 3012 facts_data->max_devices_per_throttle_group; 3062 3013 mrioc->facts.io_throttle_data_length = ··· 3739 3682 }; 3740 3683 3741 3684 /** 3685 + * mpi3mr_repost_diag_bufs - repost host diag buffers 3686 + * @mrioc: Adapter instance reference 3687 + * 3688 + * repost firmware and trace diag buffers based on global 3689 + * trigger flag from driver page 2 3690 + * 3691 + * Return: 0 on success, non-zero on failures. 3692 + */ 3693 + static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc) 3694 + { 3695 + u64 global_trigger; 3696 + union mpi3mr_trigger_data prev_trigger_data; 3697 + struct diag_buffer_desc *trace_hdb = NULL; 3698 + struct diag_buffer_desc *fw_hdb = NULL; 3699 + int retval = 0; 3700 + bool trace_repost_needed = false; 3701 + bool fw_repost_needed = false; 3702 + u8 prev_trigger_type; 3703 + 3704 + retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT); 3705 + if (retval) 3706 + return -1; 3707 + 3708 + trace_hdb = mpi3mr_diag_buffer_for_type(mrioc, 3709 + MPI3_DIAG_BUFFER_TYPE_TRACE); 3710 + 3711 + if (trace_hdb && 3712 + trace_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED && 3713 + trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL && 3714 + trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) 3715 + trace_repost_needed = true; 3716 + 3717 + fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW); 3718 + 3719 + if (fw_hdb && fw_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED && 3720 + fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL && 3721 + fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) 3722 + fw_repost_needed = true; 3723 + 3724 + if (trace_repost_needed || fw_repost_needed) { 3725 + global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger); 3726 + if (global_trigger & 3727 + MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_TRACE_DISABLED) 3728 + trace_repost_needed = false; 3729 + if (global_trigger & 3730 + MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_FW_DISABLED) 3731 + fw_repost_needed = false; 3732 + } 3733 + 3734 + if (trace_repost_needed) { 3735 + prev_trigger_type = trace_hdb->trigger_type; 3736 + memcpy(&prev_trigger_data, &trace_hdb->trigger_data, 3737 + sizeof(trace_hdb->trigger_data)); 3738 + retval = mpi3mr_issue_diag_buf_post(mrioc, trace_hdb); 3739 + if (!retval) { 3740 + dprint_init(mrioc, "trace diag buffer reposted"); 3741 + mpi3mr_set_trigger_data_in_hdb(trace_hdb, 3742 + MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1); 3743 + } else { 3744 + trace_hdb->trigger_type = prev_trigger_type; 3745 + memcpy(&trace_hdb->trigger_data, &prev_trigger_data, 3746 + sizeof(prev_trigger_data)); 3747 + ioc_err(mrioc, "trace diag buffer repost failed"); 3748 + return -1; 3749 + } 3750 + } 3751 + 3752 + if (fw_repost_needed) { 3753 + prev_trigger_type = fw_hdb->trigger_type; 3754 + memcpy(&prev_trigger_data, &fw_hdb->trigger_data, 3755 + sizeof(fw_hdb->trigger_data)); 3756 + retval = mpi3mr_issue_diag_buf_post(mrioc, fw_hdb); 3757 + if (!retval) { 3758 + dprint_init(mrioc, "firmware diag buffer reposted"); 3759 + mpi3mr_set_trigger_data_in_hdb(fw_hdb, 3760 + MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1); 3761 + } else { 3762 + fw_hdb->trigger_type = prev_trigger_type; 3763 + memcpy(&fw_hdb->trigger_data, &prev_trigger_data, 3764 + sizeof(prev_trigger_data)); 3765 + ioc_err(mrioc, "firmware diag buffer repost failed"); 3766 + return -1; 3767 + } 3768 + } 3769 + return retval; 3770 + } 3771 + 3772 + /** 3742 3773 * mpi3mr_print_ioc_info - Display controller information 3743 3774 * @mrioc: Adapter instance reference 3744 3775 * ··· 4043 3898 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET); 4044 3899 mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT); 4045 3900 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE); 3901 + mpi3mr_unmask_events(mrioc, MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE); 4046 3902 4047 3903 retval = mpi3mr_issue_event_notification(mrioc); 4048 3904 if (retval) ··· 4135 3989 } 4136 3990 } 4137 3991 3992 + dprint_init(mrioc, "allocating host diag buffers\n"); 3993 + mpi3mr_alloc_diag_bufs(mrioc); 3994 + 4138 3995 dprint_init(mrioc, "allocating ioctl dma buffers\n"); 4139 3996 mpi3mr_alloc_ioctl_dma_memory(mrioc); 3997 + 3998 + dprint_init(mrioc, "posting host diag buffers\n"); 3999 + retval = mpi3mr_post_diag_bufs(mrioc); 4000 + 4001 + if (retval) 4002 + ioc_warn(mrioc, "failed to post host diag buffers\n"); 4140 4003 4141 4004 if (!mrioc->init_cmds.reply) { 4142 4005 retval = mpi3mr_alloc_reply_sense_bufs(mrioc); ··· 4222 4067 goto out_failed; 4223 4068 } 4224 4069 4070 + retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT); 4071 + if (retval) { 4072 + ioc_err(mrioc, "failed to refresh triggers\n"); 4073 + goto out_failed; 4074 + } 4075 + 4225 4076 ioc_info(mrioc, "controller initialization completed successfully\n"); 4226 4077 return retval; 4227 4078 out_failed: ··· 4279 4118 goto out_failed_noretry; 4280 4119 } 4281 4120 4282 - if (is_resume) { 4121 + if (is_resume || mrioc->block_on_pci_err) { 4283 4122 dprint_reset(mrioc, "setting up single ISR\n"); 4284 4123 retval = mpi3mr_setup_isr(mrioc, 1); 4285 4124 if (retval) { ··· 4305 4144 4306 4145 mpi3mr_print_ioc_info(mrioc); 4307 4146 4147 + if (is_resume) { 4148 + dprint_reset(mrioc, "posting host diag buffers\n"); 4149 + retval = mpi3mr_post_diag_bufs(mrioc); 4150 + if (retval) 4151 + ioc_warn(mrioc, "failed to post host diag buffers\n"); 4152 + } else { 4153 + retval = mpi3mr_repost_diag_bufs(mrioc); 4154 + if (retval) 4155 + ioc_warn(mrioc, "failed to re post host diag buffers\n"); 4156 + } 4157 + 4308 4158 dprint_reset(mrioc, "sending ioc_init\n"); 4309 4159 retval = mpi3mr_issue_iocinit(mrioc); 4310 4160 if (retval) { ··· 4330 4158 goto out_failed; 4331 4159 } 4332 4160 4333 - if (is_resume) { 4161 + if (is_resume || mrioc->block_on_pci_err) { 4334 4162 dprint_reset(mrioc, "setting up multiple ISR\n"); 4335 4163 retval = mpi3mr_setup_isr(mrioc, 0); 4336 4164 if (retval) { ··· 4581 4409 { 4582 4410 u16 i; 4583 4411 struct mpi3mr_intr_info *intr_info; 4412 + struct diag_buffer_desc *diag_buffer; 4584 4413 4585 4414 mpi3mr_free_enclosure_list(mrioc); 4586 4415 mpi3mr_free_ioctl_dma_memory(mrioc); ··· 4716 4543 mrioc->pel_seqnum_virt = NULL; 4717 4544 } 4718 4545 4546 + for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) { 4547 + diag_buffer = &mrioc->diag_buffers[i]; 4548 + if (diag_buffer->addr) { 4549 + dma_free_coherent(&mrioc->pdev->dev, 4550 + diag_buffer->size, diag_buffer->addr, 4551 + diag_buffer->dma_addr); 4552 + diag_buffer->addr = NULL; 4553 + diag_buffer->size = 0; 4554 + diag_buffer->type = 0; 4555 + diag_buffer->status = 0; 4556 + } 4557 + } 4558 + 4719 4559 kfree(mrioc->throttle_groups); 4720 4560 mrioc->throttle_groups = NULL; 4721 4561 ··· 4818 4632 4819 4633 ioc_state = mpi3mr_get_iocstate(mrioc); 4820 4634 4821 - if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) && 4635 + if (!mrioc->unrecoverable && !mrioc->reset_in_progress && 4636 + !mrioc->pci_err_recovery && 4822 4637 (ioc_state == MRIOC_STATE_READY)) { 4823 4638 if (mpi3mr_issue_and_process_mur(mrioc, 4824 4639 MPI3MR_RESET_FROM_CTLR_CLEANUP)) ··· 5167 4980 int retval = 0, i; 5168 4981 unsigned long flags; 5169 4982 u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 4983 + union mpi3mr_trigger_data trigger_data; 5170 4984 5171 4985 /* Block the reset handler until diag save in progress*/ 5172 4986 dprint_reset(mrioc, ··· 5200 5012 mrioc->reset_in_progress = 1; 5201 5013 mrioc->stop_bsgs = 1; 5202 5014 mrioc->prev_reset_result = -1; 5015 + memset(&trigger_data, 0, sizeof(trigger_data)); 5203 5016 5204 5017 if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) && 5205 5018 (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) && 5206 5019 (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) { 5020 + mpi3mr_set_trigger_data_in_all_hdb(mrioc, 5021 + MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0); 5022 + dprint_reset(mrioc, 5023 + "soft_reset_handler: releasing host diagnostic buffers\n"); 5024 + mpi3mr_release_diag_bufs(mrioc, 0); 5207 5025 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 5208 5026 mrioc->event_masks[i] = -1; 5209 5027 ··· 5226 5032 retval = mpi3mr_issue_reset(mrioc, 5227 5033 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 5228 5034 if (!retval) { 5035 + trigger_data.fault = (readl(&mrioc->sysif_regs->fault) & 5036 + MPI3_SYSIF_FAULT_CODE_MASK); 5229 5037 do { 5230 5038 host_diagnostic = 5231 5039 readl(&mrioc->sysif_regs->host_diagnostic); ··· 5236 5040 break; 5237 5041 msleep(100); 5238 5042 } while (--timeout); 5043 + mpi3mr_set_trigger_data_in_all_hdb(mrioc, 5044 + MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0); 5239 5045 } 5240 5046 } 5241 5047 ··· 5273 5075 mrioc->prepare_for_reset_timeout_counter = 0; 5274 5076 } 5275 5077 mpi3mr_memset_buffers(mrioc); 5078 + mpi3mr_release_diag_bufs(mrioc, 1); 5079 + mrioc->fw_release_trigger_active = false; 5080 + mrioc->trace_release_trigger_active = false; 5081 + mrioc->snapdump_trigger_active = false; 5082 + mpi3mr_set_trigger_data_in_all_hdb(mrioc, 5083 + MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0); 5084 + 5085 + dprint_reset(mrioc, 5086 + "soft_reset_handler: reinitializing the controller\n"); 5276 5087 retval = mpi3mr_reinit_ioc(mrioc, 0); 5277 5088 if (retval) { 5278 5089 pr_err(IOCNAME "reinit after soft reset failed: reason %d\n", ··· 6161 5954 out_failed: 6162 5955 return -1; 6163 5956 } 5957 + 5958 + /** 5959 + * mpi3mr_cfg_get_driver_pg2 - Read current driver page2 5960 + * @mrioc: Adapter instance reference 5961 + * @driver_pg2: Pointer to return driver page 2 5962 + * @pg_sz: Size of the memory allocated to the page pointer 5963 + * @page_action: Page action 5964 + * 5965 + * This is handler for config page read for the driver page2. 5966 + * This routine checks ioc_status to decide whether the page 5967 + * read is success or not. 5968 + * 5969 + * Return: 0 on success, non-zero on failure. 5970 + */ 5971 + int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc, 5972 + struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_action) 5973 + { 5974 + struct mpi3_config_page_header cfg_hdr; 5975 + struct mpi3_config_request cfg_req; 5976 + u16 ioc_status = 0; 5977 + 5978 + memset(driver_pg2, 0, pg_sz); 5979 + memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5980 + memset(&cfg_req, 0, sizeof(cfg_req)); 5981 + 5982 + cfg_req.function = MPI3_FUNCTION_CONFIG; 5983 + cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5984 + cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER; 5985 + cfg_req.page_number = 2; 5986 + cfg_req.page_address = 0; 5987 + cfg_req.page_version = MPI3_DRIVER2_PAGEVERSION; 5988 + 5989 + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5990 + MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5991 + ioc_err(mrioc, "driver page2 header read failed\n"); 5992 + goto out_failed; 5993 + } 5994 + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5995 + ioc_err(mrioc, "driver page2 header read failed with\n" 5996 + "ioc_status(0x%04x)\n", 5997 + ioc_status); 5998 + goto out_failed; 5999 + } 6000 + cfg_req.action = page_action; 6001 + 6002 + if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 6003 + MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg2, pg_sz)) { 6004 + ioc_err(mrioc, "driver page2 read failed\n"); 6005 + goto out_failed; 6006 + } 6007 + if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 6008 + ioc_err(mrioc, "driver page2 read failed with\n" 6009 + "ioc_status(0x%04x)\n", 6010 + ioc_status); 6011 + goto out_failed; 6012 + } 6013 + return 0; 6014 + out_failed: 6015 + return -1; 6016 + } 6017 +
+356 -5
drivers/scsi/mpi3mr/mpi3mr_os.c
··· 242 242 } 243 243 244 244 /** 245 + * mpi3mr_hdb_trigger_data_event - Add hdb trigger data event to 246 + * the list 247 + * @mrioc: Adapter instance reference 248 + * @event_data: Event data 249 + * 250 + * Add the given hdb trigger data event to the firmware event 251 + * list. 252 + * 253 + * Return: Nothing. 254 + */ 255 + void mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc *mrioc, 256 + struct trigger_event_data *event_data) 257 + { 258 + struct mpi3mr_fwevt *fwevt; 259 + u16 sz = sizeof(*event_data); 260 + 261 + fwevt = mpi3mr_alloc_fwevt(sz); 262 + if (!fwevt) { 263 + ioc_warn(mrioc, "failed to queue hdb trigger data event\n"); 264 + return; 265 + } 266 + 267 + fwevt->mrioc = mrioc; 268 + fwevt->event_id = MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER; 269 + fwevt->send_ack = 0; 270 + fwevt->process_evt = 1; 271 + fwevt->evt_ctx = 0; 272 + fwevt->event_data_size = sz; 273 + memcpy(fwevt->event_data, event_data, sz); 274 + 275 + mpi3mr_fwevt_add_to_list(mrioc, fwevt); 276 + } 277 + 278 + /** 245 279 * mpi3mr_fwevt_del_from_list - Delete firmware event from list 246 280 * @mrioc: Adapter instance reference 247 281 * @fwevt: Firmware event reference ··· 932 898 } 933 899 } else 934 900 mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev); 901 + mpi3mr_global_trigger(mrioc, 902 + MPI3_DRIVER2_GLOBALTRIGGER_DEVICE_REMOVAL_ENABLED); 935 903 936 904 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", 937 905 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); ··· 956 920 int retval = 0; 957 921 struct mpi3mr_tgt_dev *tgtdev; 958 922 959 - if (mrioc->reset_in_progress) 923 + if (mrioc->reset_in_progress || mrioc->pci_err_recovery) 960 924 return -1; 961 925 962 926 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); ··· 1470 1434 } 1471 1435 1472 1436 /** 1437 + * mpi3mr_process_trigger_data_event_bh - Process trigger event 1438 + * data 1439 + * @mrioc: Adapter instance reference 1440 + * @event_data: Event data 1441 + * 1442 + * This function releases diage buffers or issues diag fault 1443 + * based on trigger conditions 1444 + * 1445 + * Return: Nothing 1446 + */ 1447 + static void mpi3mr_process_trigger_data_event_bh(struct mpi3mr_ioc *mrioc, 1448 + struct trigger_event_data *event_data) 1449 + { 1450 + struct diag_buffer_desc *trace_hdb = event_data->trace_hdb; 1451 + struct diag_buffer_desc *fw_hdb = event_data->fw_hdb; 1452 + unsigned long flags; 1453 + int retval = 0; 1454 + u8 trigger_type = event_data->trigger_type; 1455 + union mpi3mr_trigger_data *trigger_data = 1456 + &event_data->trigger_specific_data; 1457 + 1458 + if (event_data->snapdump) { 1459 + if (trace_hdb) 1460 + mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, 1461 + trigger_data, 1); 1462 + if (fw_hdb) 1463 + mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, 1464 + trigger_data, 1); 1465 + mpi3mr_soft_reset_handler(mrioc, 1466 + MPI3MR_RESET_FROM_TRIGGER, 1); 1467 + return; 1468 + } 1469 + 1470 + if (trace_hdb) { 1471 + retval = mpi3mr_issue_diag_buf_release(mrioc, trace_hdb); 1472 + if (!retval) { 1473 + mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, 1474 + trigger_data, 1); 1475 + } 1476 + spin_lock_irqsave(&mrioc->trigger_lock, flags); 1477 + mrioc->trace_release_trigger_active = false; 1478 + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 1479 + } 1480 + if (fw_hdb) { 1481 + retval = mpi3mr_issue_diag_buf_release(mrioc, fw_hdb); 1482 + if (!retval) { 1483 + mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, 1484 + trigger_data, 1); 1485 + } 1486 + spin_lock_irqsave(&mrioc->trigger_lock, flags); 1487 + mrioc->fw_release_trigger_active = false; 1488 + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 1489 + } 1490 + } 1491 + 1492 + /** 1473 1493 * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event 1474 1494 * @mrioc: Adapter instance reference 1475 1495 * @encl_pg0: Enclosure page 0. ··· 2007 1915 struct mpi3_device_page0 *dev_pg0 = NULL; 2008 1916 u16 perst_id, handle, dev_info; 2009 1917 struct mpi3_device0_sas_sata_format *sasinf = NULL; 1918 + unsigned int timeout; 2010 1919 2011 1920 mpi3mr_fwevt_del_from_list(mrioc, fwevt); 2012 1921 mrioc->current_event = fwevt; ··· 2098 2005 } 2099 2006 case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH: 2100 2007 { 2101 - while (mrioc->device_refresh_on) 2008 + timeout = MPI3MR_RESET_TIMEOUT * 2; 2009 + while ((mrioc->device_refresh_on || mrioc->block_on_pci_err) && 2010 + !mrioc->unrecoverable && !mrioc->pci_err_recovery) { 2102 2011 msleep(500); 2012 + if (!timeout--) { 2013 + mrioc->unrecoverable = 1; 2014 + break; 2015 + } 2016 + } 2017 + 2018 + if (mrioc->unrecoverable || mrioc->pci_err_recovery) 2019 + break; 2103 2020 2104 2021 dprint_event_bh(mrioc, 2105 2022 "scan for non responding and newly added devices after soft reset started\n"); ··· 2120 2017 mpi3mr_refresh_tgtdevs(mrioc); 2121 2018 ioc_info(mrioc, 2122 2019 "scan for non responding and newly added devices after soft reset completed\n"); 2020 + break; 2021 + } 2022 + case MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER: 2023 + { 2024 + mpi3mr_process_trigger_data_event_bh(mrioc, 2025 + (struct trigger_event_data *)fwevt->event_data); 2123 2026 break; 2124 2027 } 2125 2028 default: ··· 2966 2857 ack_req = 1; 2967 2858 2968 2859 evt_type = event_reply->event; 2860 + mpi3mr_event_trigger(mrioc, event_reply->event); 2969 2861 2970 2862 switch (evt_type) { 2971 2863 case MPI3_EVENT_DEVICE_ADDED: ··· 3003 2893 { 3004 2894 mpi3mr_preparereset_evt_th(mrioc, event_reply); 3005 2895 ack_req = 0; 2896 + break; 2897 + } 2898 + case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE: 2899 + { 2900 + mpi3mr_hdbstatuschg_evt_th(mrioc, event_reply); 3006 2901 break; 3007 2902 } 3008 2903 case MPI3_EVENT_DEVICE_INFO_CHANGED: ··· 3273 3158 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3274 3159 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 3275 3160 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3161 + mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); 3276 3162 break; 3277 3163 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 3278 3164 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; ··· 3302 3186 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3303 3187 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) 3304 3188 panic("%s: Ran out of sense buffers\n", mrioc->name); 3189 + if (sense_buf) { 3190 + scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3191 + mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key, 3192 + sshdr.asc, sshdr.ascq); 3193 + } 3194 + mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); 3305 3195 break; 3306 3196 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 3307 3197 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; ··· 3807 3685 mutex_unlock(&drv_cmd->mutex); 3808 3686 goto out; 3809 3687 } 3688 + if (mrioc->block_on_pci_err) { 3689 + retval = -1; 3690 + dprint_tm(mrioc, "sending task management failed due to\n" 3691 + "pci error recovery in progress\n"); 3692 + mutex_unlock(&drv_cmd->mutex); 3693 + goto out; 3694 + } 3810 3695 3811 3696 drv_cmd->state = MPI3MR_CMD_PENDING; 3812 3697 drv_cmd->is_waiting = 1; ··· 3940 3811 default: 3941 3812 break; 3942 3813 } 3814 + mpi3mr_global_trigger(mrioc, 3815 + MPI3_DRIVER2_GLOBALTRIGGER_TASK_MANAGEMENT_ENABLED); 3943 3816 3944 3817 out_unlock: 3945 3818 drv_cmd->state = MPI3MR_CMD_NOTUSED; ··· 4199 4068 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4200 4069 u8 dev_type = MPI3_DEVICE_DEVFORM_VD; 4201 4070 int retval = FAILED; 4071 + unsigned int timeout = MPI3MR_RESET_TIMEOUT; 4202 4072 4203 4073 sdev_priv_data = scmd->device->hostdata; 4204 4074 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { ··· 4210 4078 if (dev_type == MPI3_DEVICE_DEVFORM_VD) { 4211 4079 mpi3mr_wait_for_host_io(mrioc, 4212 4080 MPI3MR_RAID_ERRREC_RESET_TIMEOUT); 4213 - if (!mpi3mr_get_fw_pending_ios(mrioc)) 4081 + if (!mpi3mr_get_fw_pending_ios(mrioc)) { 4082 + while (mrioc->reset_in_progress || 4083 + mrioc->prepare_for_reset || 4084 + mrioc->block_on_pci_err) { 4085 + ssleep(1); 4086 + if (!timeout--) { 4087 + retval = FAILED; 4088 + goto out; 4089 + } 4090 + } 4214 4091 retval = SUCCESS; 4092 + goto out; 4093 + } 4215 4094 } 4216 4095 if (retval == FAILED) 4217 4096 mpi3mr_print_pending_host_io(mrioc); 4218 4097 4098 + out: 4219 4099 sdev_printk(KERN_INFO, scmd->device, 4220 4100 "Bus reset is %s for scmd(%p)\n", 4221 4101 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); ··· 4910 4766 goto out; 4911 4767 } 4912 4768 4913 - if (mrioc->reset_in_progress) { 4769 + if (mrioc->reset_in_progress || mrioc->prepare_for_reset 4770 + || mrioc->block_on_pci_err) { 4914 4771 retval = SCSI_MLQUEUE_HOST_BUSY; 4915 4772 goto out; 4916 4773 } ··· 5394 5249 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5395 5250 ssleep(1); 5396 5251 5397 - if (!pci_device_is_present(mrioc->pdev)) { 5252 + if (mrioc->block_on_pci_err) { 5253 + mrioc->block_on_pci_err = false; 5254 + scsi_unblock_requests(shost); 5255 + mrioc->unrecoverable = 1; 5256 + } 5257 + 5258 + if (!pci_device_is_present(mrioc->pdev) || 5259 + mrioc->pci_err_recovery) { 5398 5260 mrioc->unrecoverable = 1; 5399 5261 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5400 5262 } ··· 5585 5433 return 0; 5586 5434 } 5587 5435 5436 + /** 5437 + * mpi3mr_pcierr_error_detected - PCI error detected callback 5438 + * @pdev: PCI device instance 5439 + * @state: channel state 5440 + * 5441 + * This function is called by the PCI error recovery driver and 5442 + * based on the state passed the driver decides what actions to 5443 + * be recommended back to PCI driver. 5444 + * 5445 + * For all of the states if there is no valid mrioc or scsi host 5446 + * references in the PCI device then this function will return 5447 + * the result as disconnect. 5448 + * 5449 + * For normal state, this function will return the result as can 5450 + * recover. 5451 + * 5452 + * For frozen state, this function will block for any pending 5453 + * controller initialization or re-initialization to complete, 5454 + * stop any new interactions with the controller and return 5455 + * status as reset required. 5456 + * 5457 + * For permanent failure state, this function will mark the 5458 + * controller as unrecoverable and return status as disconnect. 5459 + * 5460 + * Returns: PCI_ERS_RESULT_NEED_RESET or CAN_RECOVER or 5461 + * DISCONNECT based on the controller state. 5462 + */ 5463 + static pci_ers_result_t 5464 + mpi3mr_pcierr_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 5465 + { 5466 + struct Scsi_Host *shost; 5467 + struct mpi3mr_ioc *mrioc; 5468 + unsigned int timeout = MPI3MR_RESET_TIMEOUT; 5469 + 5470 + dev_info(&pdev->dev, "%s: callback invoked state(%d)\n", __func__, 5471 + state); 5472 + 5473 + shost = pci_get_drvdata(pdev); 5474 + mrioc = shost_priv(shost); 5475 + 5476 + switch (state) { 5477 + case pci_channel_io_normal: 5478 + return PCI_ERS_RESULT_CAN_RECOVER; 5479 + case pci_channel_io_frozen: 5480 + mrioc->pci_err_recovery = true; 5481 + mrioc->block_on_pci_err = true; 5482 + do { 5483 + if (mrioc->reset_in_progress || mrioc->is_driver_loading) 5484 + ssleep(1); 5485 + else 5486 + break; 5487 + } while (--timeout); 5488 + 5489 + if (!timeout) { 5490 + mrioc->pci_err_recovery = true; 5491 + mrioc->block_on_pci_err = true; 5492 + mrioc->unrecoverable = 1; 5493 + mpi3mr_stop_watchdog(mrioc); 5494 + mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5495 + return PCI_ERS_RESULT_DISCONNECT; 5496 + } 5497 + 5498 + scsi_block_requests(mrioc->shost); 5499 + mpi3mr_stop_watchdog(mrioc); 5500 + mpi3mr_cleanup_resources(mrioc); 5501 + return PCI_ERS_RESULT_NEED_RESET; 5502 + case pci_channel_io_perm_failure: 5503 + mrioc->pci_err_recovery = true; 5504 + mrioc->block_on_pci_err = true; 5505 + mrioc->unrecoverable = 1; 5506 + mpi3mr_stop_watchdog(mrioc); 5507 + mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5508 + return PCI_ERS_RESULT_DISCONNECT; 5509 + default: 5510 + return PCI_ERS_RESULT_DISCONNECT; 5511 + } 5512 + } 5513 + 5514 + /** 5515 + * mpi3mr_pcierr_slot_reset - Post slot reset callback 5516 + * @pdev: PCI device instance 5517 + * 5518 + * This function is called by the PCI error recovery driver 5519 + * after a slot or link reset issued by it for the recovery, the 5520 + * driver is expected to bring back the controller and 5521 + * initialize it. 5522 + * 5523 + * This function restores PCI state and reinitializes controller 5524 + * resources and the controller, this blocks for any pending 5525 + * reset to complete. 5526 + * 5527 + * Returns: PCI_ERS_RESULT_DISCONNECT on failure or 5528 + * PCI_ERS_RESULT_RECOVERED 5529 + */ 5530 + static pci_ers_result_t mpi3mr_pcierr_slot_reset(struct pci_dev *pdev) 5531 + { 5532 + struct Scsi_Host *shost; 5533 + struct mpi3mr_ioc *mrioc; 5534 + unsigned int timeout = MPI3MR_RESET_TIMEOUT; 5535 + 5536 + dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5537 + 5538 + shost = pci_get_drvdata(pdev); 5539 + mrioc = shost_priv(shost); 5540 + 5541 + do { 5542 + if (mrioc->reset_in_progress) 5543 + ssleep(1); 5544 + else 5545 + break; 5546 + } while (--timeout); 5547 + 5548 + if (!timeout) 5549 + goto out_failed; 5550 + 5551 + pci_restore_state(pdev); 5552 + 5553 + if (mpi3mr_setup_resources(mrioc)) { 5554 + ioc_err(mrioc, "setup resources failed\n"); 5555 + goto out_failed; 5556 + } 5557 + mrioc->unrecoverable = 0; 5558 + mrioc->pci_err_recovery = false; 5559 + 5560 + if (mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0)) 5561 + goto out_failed; 5562 + 5563 + return PCI_ERS_RESULT_RECOVERED; 5564 + 5565 + out_failed: 5566 + mrioc->unrecoverable = 1; 5567 + mrioc->block_on_pci_err = false; 5568 + scsi_unblock_requests(shost); 5569 + mpi3mr_start_watchdog(mrioc); 5570 + return PCI_ERS_RESULT_DISCONNECT; 5571 + } 5572 + 5573 + /** 5574 + * mpi3mr_pcierr_resume - PCI error recovery resume 5575 + * callback 5576 + * @pdev: PCI device instance 5577 + * 5578 + * This function enables all I/O and IOCTLs post reset issued as 5579 + * part of the PCI error recovery 5580 + * 5581 + * Return: Nothing. 5582 + */ 5583 + static void mpi3mr_pcierr_resume(struct pci_dev *pdev) 5584 + { 5585 + struct Scsi_Host *shost; 5586 + struct mpi3mr_ioc *mrioc; 5587 + 5588 + dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5589 + 5590 + shost = pci_get_drvdata(pdev); 5591 + mrioc = shost_priv(shost); 5592 + 5593 + if (mrioc->block_on_pci_err) { 5594 + mrioc->block_on_pci_err = false; 5595 + scsi_unblock_requests(shost); 5596 + mpi3mr_start_watchdog(mrioc); 5597 + } 5598 + } 5599 + 5600 + /** 5601 + * mpi3mr_pcierr_mmio_enabled - PCI error recovery callback 5602 + * @pdev: PCI device instance 5603 + * 5604 + * This is called only if mpi3mr_pcierr_error_detected returns 5605 + * PCI_ERS_RESULT_CAN_RECOVER. 5606 + * 5607 + * Return: PCI_ERS_RESULT_DISCONNECT when the controller is 5608 + * unrecoverable or when the shost/mrioc reference cannot be 5609 + * found, else return PCI_ERS_RESULT_RECOVERED 5610 + */ 5611 + static pci_ers_result_t mpi3mr_pcierr_mmio_enabled(struct pci_dev *pdev) 5612 + { 5613 + struct Scsi_Host *shost; 5614 + struct mpi3mr_ioc *mrioc; 5615 + 5616 + dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5617 + 5618 + shost = pci_get_drvdata(pdev); 5619 + mrioc = shost_priv(shost); 5620 + 5621 + if (mrioc->unrecoverable) 5622 + return PCI_ERS_RESULT_DISCONNECT; 5623 + 5624 + return PCI_ERS_RESULT_RECOVERED; 5625 + } 5626 + 5588 5627 static const struct pci_device_id mpi3mr_pci_id_table[] = { 5589 5628 { 5590 5629 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, ··· 5793 5450 }; 5794 5451 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); 5795 5452 5453 + static struct pci_error_handlers mpi3mr_err_handler = { 5454 + .error_detected = mpi3mr_pcierr_error_detected, 5455 + .mmio_enabled = mpi3mr_pcierr_mmio_enabled, 5456 + .slot_reset = mpi3mr_pcierr_slot_reset, 5457 + .resume = mpi3mr_pcierr_resume, 5458 + }; 5459 + 5796 5460 static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume); 5797 5461 5798 5462 static struct pci_driver mpi3mr_pci_driver = { ··· 5808 5458 .probe = mpi3mr_probe, 5809 5459 .remove = mpi3mr_remove, 5810 5460 .shutdown = mpi3mr_shutdown, 5461 + .err_handler = &mpi3mr_err_handler, 5811 5462 .driver.pm = &mpi3mr_pm_ops, 5812 5463 }; 5813 5464
+37 -6
drivers/scsi/mpi3mr/mpi3mr_transport.c
··· 151 151 return -EFAULT; 152 152 } 153 153 154 + if (mrioc->pci_err_recovery) { 155 + ioc_err(mrioc, "%s: pci error recovery in progress!\n", __func__); 156 + return -EFAULT; 157 + } 158 + 154 159 data_out_sz = sizeof(struct rep_manu_request); 155 160 data_in_sz = sizeof(struct rep_manu_reply); 156 161 data_out = dma_alloc_coherent(&mrioc->pdev->dev, ··· 795 790 return -EFAULT; 796 791 } 797 792 793 + if (mrioc->pci_err_recovery) { 794 + ioc_err(mrioc, "%s: pci error recovery in progress!\n", 795 + __func__); 796 + return -EFAULT; 797 + } 798 + 798 799 if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &device_pg0, 799 800 sizeof(device_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE, handle))) { 800 801 ioc_err(mrioc, "%s: device page0 read failed\n", __func__); ··· 1018 1007 hba_port->port_id = port_id; 1019 1008 ioc_info(mrioc, "hba_port entry: %p, port: %d is added to hba_port list\n", 1020 1009 hba_port, hba_port->port_id); 1010 + if (mrioc->reset_in_progress || 1011 + mrioc->pci_err_recovery) 1012 + hba_port->flags = MPI3MR_HBA_PORT_FLAG_NEW; 1021 1013 list_add_tail(&hba_port->list, &mrioc->hba_port_table_list); 1022 1014 return hba_port; 1023 1015 } ··· 1069 1055 struct mpi3mr_sas_node *mr_sas_node; 1070 1056 struct mpi3mr_sas_phy *mr_sas_phy; 1071 1057 1072 - if (mrioc->reset_in_progress) 1058 + if (mrioc->reset_in_progress || mrioc->pci_err_recovery) 1073 1059 return; 1074 1060 1075 1061 spin_lock_irqsave(&mrioc->sas_node_lock, flags); ··· 1367 1353 mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node, 1368 1354 mr_sas_port->remote_identify.sas_address, hba_port); 1369 1355 1370 - if (mr_sas_node->num_phys > sizeof(mr_sas_port->phy_mask) * 8) 1356 + if (mr_sas_node->num_phys >= sizeof(mr_sas_port->phy_mask) * 8) 1371 1357 ioc_info(mrioc, "max port count %u could be too high\n", 1372 1358 mr_sas_node->num_phys); 1373 1359 ··· 1377 1363 (mr_sas_node->phy[i].hba_port != hba_port)) 1378 1364 continue; 1379 1365 1380 - if (i > sizeof(mr_sas_port->phy_mask) * 8) { 1366 + if (i >= sizeof(mr_sas_port->phy_mask) * 8) { 1381 1367 ioc_warn(mrioc, "skipping port %u, max allowed value is %zu\n", 1382 1368 i, sizeof(mr_sas_port->phy_mask) * 8); 1383 1369 goto out_fail; ··· 1992 1978 if (!handle) 1993 1979 return -1; 1994 1980 1995 - if (mrioc->reset_in_progress) 1981 + if (mrioc->reset_in_progress || mrioc->pci_err_recovery) 1996 1982 return -1; 1997 1983 1998 1984 if ((mpi3mr_cfg_get_sas_exp_pg0(mrioc, &ioc_status, &expander_pg0, ··· 2198 2184 /* remove sibling ports attached to this expander */ 2199 2185 list_for_each_entry_safe(mr_sas_port, next, 2200 2186 &sas_expander->sas_port_list, port_list) { 2201 - if (mrioc->reset_in_progress) 2187 + if (mrioc->reset_in_progress || mrioc->pci_err_recovery) 2202 2188 return; 2203 2189 if (mr_sas_port->remote_identify.device_type == 2204 2190 SAS_END_DEVICE) ··· 2248 2234 struct mpi3mr_sas_node *sas_expander; 2249 2235 unsigned long flags; 2250 2236 2251 - if (mrioc->reset_in_progress) 2237 + if (mrioc->reset_in_progress || mrioc->pci_err_recovery) 2252 2238 return; 2253 2239 2254 2240 if (!hba_port) ··· 2559 2545 return -EFAULT; 2560 2546 } 2561 2547 2548 + if (mrioc->pci_err_recovery) { 2549 + ioc_err(mrioc, "%s: pci error recovery in progress!\n", __func__); 2550 + return -EFAULT; 2551 + } 2552 + 2562 2553 data_out_sz = sizeof(struct phy_error_log_request); 2563 2554 data_in_sz = sizeof(struct phy_error_log_reply); 2564 2555 sz = data_out_sz + data_in_sz; ··· 2820 2801 2821 2802 if (mrioc->reset_in_progress) { 2822 2803 ioc_err(mrioc, "%s: host reset in progress!\n", __func__); 2804 + return -EFAULT; 2805 + } 2806 + 2807 + if (mrioc->pci_err_recovery) { 2808 + ioc_err(mrioc, "%s: pci error recovery in progress!\n", 2809 + __func__); 2823 2810 return -EFAULT; 2824 2811 } 2825 2812 ··· 3248 3223 3249 3224 if (mrioc->reset_in_progress) { 3250 3225 ioc_err(mrioc, "%s: host reset in progress!\n", __func__); 3226 + rc = -EFAULT; 3227 + goto out; 3228 + } 3229 + 3230 + if (mrioc->pci_err_recovery) { 3231 + ioc_err(mrioc, "%s: pci error recovery in progress!\n", __func__); 3251 3232 rc = -EFAULT; 3252 3233 goto out; 3253 3234 }
+1
drivers/scsi/pcmcia/aha152x_stub.c
··· 75 75 module_param(reset_delay, int, 0); 76 76 module_param(ext_trans, int, 0); 77 77 78 + MODULE_DESCRIPTION("Adaptec AHA152X-compatible PCMCIA SCSI card driver"); 78 79 MODULE_LICENSE("Dual MPL/GPL"); 79 80 80 81 /*====================================================================*/
+3 -1
drivers/scsi/pm8001/pm8001_sas.c
··· 166 166 unsigned long flags; 167 167 pm8001_ha = sas_phy->ha->lldd_ha; 168 168 phy = &pm8001_ha->phy[phy_id]; 169 - pm8001_ha->phy[phy_id].enable_completion = &completion; 170 169 171 170 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { 172 171 /* ··· 189 190 rates->maximum_linkrate; 190 191 } 191 192 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { 193 + pm8001_ha->phy[phy_id].enable_completion = &completion; 192 194 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 193 195 wait_for_completion(&completion); 194 196 } ··· 198 198 break; 199 199 case PHY_FUNC_HARD_RESET: 200 200 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { 201 + pm8001_ha->phy[phy_id].enable_completion = &completion; 201 202 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 202 203 wait_for_completion(&completion); 203 204 } ··· 207 206 break; 208 207 case PHY_FUNC_LINK_RESET: 209 208 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { 209 + pm8001_ha->phy[phy_id].enable_completion = &completion; 210 210 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 211 211 wait_for_completion(&completion); 212 212 }
+3 -3
drivers/scsi/pm8001/pm80xx_hwi.c
··· 568 568 pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version = 569 569 pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION); 570 570 571 - pm8001_dbg(pm8001_ha, DEV, 571 + pm8001_dbg(pm8001_ha, INIT, 572 572 "Main cfg table: sign:%x interface rev:%x fw_rev:%x\n", 573 573 pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature, 574 574 pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev, 575 575 pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev); 576 576 577 - pm8001_dbg(pm8001_ha, DEV, 577 + pm8001_dbg(pm8001_ha, INIT, 578 578 "table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n", 579 579 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset, 580 580 pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset, ··· 582 582 pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset, 583 583 pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset); 584 584 585 - pm8001_dbg(pm8001_ha, DEV, 585 + pm8001_dbg(pm8001_ha, INIT, 586 586 "Main cfg table; ila rev:%x Inactive fw rev:%x\n", 587 587 pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version, 588 588 pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version);
+1
drivers/scsi/ppa.c
··· 1155 1155 }; 1156 1156 module_parport_driver(ppa_driver); 1157 1157 1158 + MODULE_DESCRIPTION("IOMEGA PPA3 parallel port SCSI host adapter driver"); 1158 1159 MODULE_LICENSE("GPL");
+58 -40
drivers/scsi/qla2xxx/qla_bsg.c
··· 324 324 "request_sg_cnt=%x reply_sg_cnt=%x.\n", 325 325 bsg_job->request_payload.sg_cnt, 326 326 bsg_job->reply_payload.sg_cnt); 327 - rval = -EPERM; 327 + rval = -ENOBUFS; 328 328 goto done; 329 329 } 330 330 ··· 3059 3059 return ret; 3060 3060 } 3061 3061 3062 + static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job) 3063 + { 3064 + bool found = false; 3065 + struct fc_bsg_reply *bsg_reply = bsg_job->reply; 3066 + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 3067 + struct qla_hw_data *ha = vha->hw; 3068 + srb_t *sp = NULL; 3069 + int cnt; 3070 + unsigned long flags; 3071 + struct req_que *req; 3072 + 3073 + spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3074 + req = qpair->req; 3075 + 3076 + for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 3077 + sp = req->outstanding_cmds[cnt]; 3078 + if (sp && 3079 + (sp->type == SRB_CT_CMD || 3080 + sp->type == SRB_ELS_CMD_HST || 3081 + sp->type == SRB_ELS_CMD_HST_NOLOGIN) && 3082 + sp->u.bsg_job == bsg_job) { 3083 + req->outstanding_cmds[cnt] = NULL; 3084 + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3085 + 3086 + if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) { 3087 + ql_log(ql_log_warn, vha, 0x7089, 3088 + "mbx abort_command failed.\n"); 3089 + bsg_reply->result = -EIO; 3090 + } else { 3091 + ql_dbg(ql_dbg_user, vha, 0x708a, 3092 + "mbx abort_command success.\n"); 3093 + bsg_reply->result = 0; 3094 + } 3095 + /* ref: INIT */ 3096 + kref_put(&sp->cmd_kref, qla2x00_sp_release); 3097 + 3098 + found = true; 3099 + goto done; 3100 + } 3101 + } 3102 + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3103 + 3104 + done: 3105 + return found; 3106 + } 3107 + 3062 3108 int 3063 3109 qla24xx_bsg_timeout(struct bsg_job *bsg_job) 3064 3110 { 3065 3111 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 3066 3112 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 3067 3113 struct qla_hw_data *ha = vha->hw; 3068 - srb_t *sp; 3069 - int cnt, que; 3070 - unsigned long flags; 3071 - struct req_que *req; 3114 + int i; 3115 + struct qla_qpair *qpair; 3072 3116 3073 3117 ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n", 3074 3118 __func__, bsg_job); ··· 3123 3079 qla_pci_set_eeh_busy(vha); 3124 3080 } 3125 3081 3082 + if (qla_bsg_found(ha->base_qpair, bsg_job)) 3083 + goto done; 3084 + 3126 3085 /* find the bsg job from the active list of commands */ 3127 - spin_lock_irqsave(&ha->hardware_lock, flags); 3128 - for (que = 0; que < ha->max_req_queues; que++) { 3129 - req = ha->req_q_map[que]; 3130 - if (!req) 3086 + for (i = 0; i < ha->max_qpairs; i++) { 3087 + qpair = vha->hw->queue_pair_map[i]; 3088 + if (!qpair) 3131 3089 continue; 3132 - 3133 - for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 3134 - sp = req->outstanding_cmds[cnt]; 3135 - if (sp && 3136 - (sp->type == SRB_CT_CMD || 3137 - sp->type == SRB_ELS_CMD_HST || 3138 - sp->type == SRB_ELS_CMD_HST_NOLOGIN || 3139 - sp->type == SRB_FXIOCB_BCMD) && 3140 - sp->u.bsg_job == bsg_job) { 3141 - req->outstanding_cmds[cnt] = NULL; 3142 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 3143 - 3144 - if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) { 3145 - ql_log(ql_log_warn, vha, 0x7089, 3146 - "mbx abort_command failed.\n"); 3147 - bsg_reply->result = -EIO; 3148 - } else { 3149 - ql_dbg(ql_dbg_user, vha, 0x708a, 3150 - "mbx abort_command success.\n"); 3151 - bsg_reply->result = 0; 3152 - } 3153 - spin_lock_irqsave(&ha->hardware_lock, flags); 3154 - goto done; 3155 - 3156 - } 3157 - } 3090 + if (qla_bsg_found(qpair, bsg_job)) 3091 + goto done; 3158 3092 } 3159 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 3093 + 3160 3094 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); 3161 3095 bsg_reply->result = -ENXIO; 3162 - return 0; 3163 3096 3164 3097 done: 3165 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 3166 - /* ref: INIT */ 3167 - kref_put(&sp->cmd_kref, qla2x00_sp_release); 3168 3098 return 0; 3169 3099 } 3170 3100
+14 -3
drivers/scsi/qla2xxx/qla_def.h
··· 3309 3309 u8 node_name[8]; 3310 3310 }; 3311 3311 3312 + enum scan_step { 3313 + FAB_SCAN_START, 3314 + FAB_SCAN_GPNFT_FCP, 3315 + FAB_SCAN_GNNFT_FCP, 3316 + FAB_SCAN_GPNFT_NVME, 3317 + FAB_SCAN_GNNFT_NVME, 3318 + }; 3319 + 3312 3320 struct fab_scan { 3313 3321 struct fab_scan_rp *l; 3314 3322 u32 size; 3323 + u32 rscn_gen_start; 3324 + u32 rscn_gen_end; 3325 + enum scan_step step; 3315 3326 u16 scan_retry; 3316 3327 #define MAX_SCAN_RETRIES 5 3317 3328 enum scan_flags_t scan_flags; ··· 3548 3537 QLA_EVT_RELOGIN, 3549 3538 QLA_EVT_ASYNC_PRLO, 3550 3539 QLA_EVT_ASYNC_PRLO_DONE, 3551 - QLA_EVT_GPNFT, 3552 - QLA_EVT_GPNFT_DONE, 3553 - QLA_EVT_GNNFT_DONE, 3540 + QLA_EVT_SCAN_CMD, 3541 + QLA_EVT_SCAN_FINISH, 3554 3542 QLA_EVT_GFPNID, 3555 3543 QLA_EVT_SP_RETRY, 3556 3544 QLA_EVT_IIDMA, ··· 5040 5030 5041 5031 /* Counter to detect races between ELS and RSCN events */ 5042 5032 atomic_t generation_tick; 5033 + atomic_t rscn_gen; 5043 5034 /* Time when global fcport update has been scheduled */ 5044 5035 int total_fcport_update_gen; 5045 5036 /* List of pending LOGOs, protected by tgt_mutex */
+3 -3
drivers/scsi/qla2xxx/qla_gbl.h
··· 728 728 void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *); 729 729 int qla2x00_mgmt_svr_login(scsi_qla_host_t *); 730 730 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool); 731 - int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *); 732 - void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *); 733 - void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *); 731 + int qla_fab_async_scan(scsi_qla_host_t *, srb_t *); 732 + void qla_fab_scan_start(struct scsi_qla_host *); 733 + void qla_fab_scan_finish(scsi_qla_host_t *, srb_t *); 734 734 int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *); 735 735 int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *); 736 736 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *);
+215 -258
drivers/scsi/qla2xxx/qla_gs.c
··· 1710 1710 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION); 1711 1711 alen = scnprintf( 1712 1712 eiter->a.orom_version, sizeof(eiter->a.orom_version), 1713 - "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]); 1713 + "%d.%02d", ha->efi_revision[1], ha->efi_revision[0]); 1714 1714 alen += FDMI_ATTR_ALIGNMENT(alen); 1715 1715 alen += FDMI_ATTR_TYPELEN(eiter); 1716 1716 eiter->len = cpu_to_be16(alen); ··· 3168 3168 return rc; 3169 3169 } 3170 3170 3171 - void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) 3171 + static bool qla_ok_to_clear_rscn(scsi_qla_host_t *vha, fc_port_t *fcport) 3172 + { 3173 + u32 rscn_gen; 3174 + 3175 + rscn_gen = atomic_read(&vha->rscn_gen); 3176 + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2017, 3177 + "%s %d %8phC rscn_gen %x start %x end %x current %x\n", 3178 + __func__, __LINE__, fcport->port_name, fcport->rscn_gen, 3179 + vha->scan.rscn_gen_start, vha->scan.rscn_gen_end, rscn_gen); 3180 + 3181 + if (val_is_in_range(fcport->rscn_gen, vha->scan.rscn_gen_start, 3182 + vha->scan.rscn_gen_end)) 3183 + /* rscn came in before fabric scan */ 3184 + return true; 3185 + 3186 + if (val_is_in_range(fcport->rscn_gen, vha->scan.rscn_gen_end, rscn_gen)) 3187 + /* rscn came in after fabric scan */ 3188 + return false; 3189 + 3190 + /* rare: fcport's scan_needed + rscn_gen must be stale */ 3191 + return true; 3192 + } 3193 + 3194 + void qla_fab_scan_finish(scsi_qla_host_t *vha, srb_t *sp) 3172 3195 { 3173 3196 fc_port_t *fcport; 3174 3197 u32 i, rc; ··· 3304 3281 (fcport->scan_needed && 3305 3282 fcport->port_type != FCT_INITIATOR && 3306 3283 fcport->port_type != FCT_NVME_INITIATOR)) { 3284 + fcport->scan_needed = 0; 3307 3285 qlt_schedule_sess_for_deletion(fcport); 3308 3286 } 3309 3287 fcport->d_id.b24 = rp->id.b24; 3310 - fcport->scan_needed = 0; 3311 3288 break; 3312 3289 } 3313 3290 ··· 3348 3325 do_delete = true; 3349 3326 } 3350 3327 3351 - fcport->scan_needed = 0; 3328 + if (qla_ok_to_clear_rscn(vha, fcport)) 3329 + fcport->scan_needed = 0; 3330 + 3352 3331 if (((qla_dual_mode_enabled(vha) || 3353 3332 qla_ini_mode_enabled(vha)) && 3354 3333 atomic_read(&fcport->state) == FCS_ONLINE) || ··· 3380 3355 fcport->port_name, fcport->loop_id, 3381 3356 fcport->login_retry); 3382 3357 } 3383 - fcport->scan_needed = 0; 3358 + 3359 + if (qla_ok_to_clear_rscn(vha, fcport)) 3360 + fcport->scan_needed = 0; 3384 3361 qla24xx_fcport_handle_login(vha, fcport); 3385 3362 } 3386 3363 } ··· 3406 3379 } 3407 3380 } 3408 3381 3409 - static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha, 3382 + static int qla2x00_post_next_scan_work(struct scsi_qla_host *vha, 3410 3383 srb_t *sp, int cmd) 3411 3384 { 3412 3385 struct qla_work_evt *e; 3413 - 3414 - if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE) 3415 - return QLA_PARAMETER_ERROR; 3416 3386 3417 3387 e = qla2x00_alloc_work(vha, cmd); 3418 3388 if (!e) ··· 3420 3396 return qla2x00_post_work(vha, e); 3421 3397 } 3422 3398 3423 - static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha, 3424 - srb_t *sp, int cmd) 3425 - { 3426 - struct qla_work_evt *e; 3427 - 3428 - if (cmd != QLA_EVT_GPNFT) 3429 - return QLA_PARAMETER_ERROR; 3430 - 3431 - e = qla2x00_alloc_work(vha, cmd); 3432 - if (!e) 3433 - return QLA_FUNCTION_FAILED; 3434 - 3435 - e->u.gpnft.fc4_type = FC4_TYPE_NVME; 3436 - e->u.gpnft.sp = sp; 3437 - 3438 - return qla2x00_post_work(vha, e); 3439 - } 3440 - 3441 3399 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha, 3442 3400 struct srb *sp) 3443 3401 { 3444 3402 struct qla_hw_data *ha = vha->hw; 3445 3403 int num_fibre_dev = ha->max_fibre_devices; 3446 - struct ct_sns_req *ct_req = 3447 - (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; 3448 3404 struct ct_sns_gpnft_rsp *ct_rsp = 3449 3405 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; 3450 3406 struct ct_sns_gpn_ft_data *d; 3451 3407 struct fab_scan_rp *rp; 3452 - u16 cmd = be16_to_cpu(ct_req->command); 3453 - u8 fc4_type = sp->gen2; 3454 3408 int i, j, k; 3455 3409 port_id_t id; 3456 3410 u8 found; ··· 3447 3445 if (id.b24 == 0 || wwn == 0) 3448 3446 continue; 3449 3447 3450 - if (fc4_type == FC4_TYPE_FCP_SCSI) { 3451 - if (cmd == GPN_FT_CMD) { 3452 - rp = &vha->scan.l[j]; 3453 - rp->id = id; 3454 - memcpy(rp->port_name, d->port_name, 8); 3455 - j++; 3456 - rp->fc4type = FS_FC4TYPE_FCP; 3457 - } else { 3448 + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2025, 3449 + "%s %06x %8ph \n", 3450 + __func__, id.b24, d->port_name); 3451 + 3452 + switch (vha->scan.step) { 3453 + case FAB_SCAN_GPNFT_FCP: 3454 + rp = &vha->scan.l[j]; 3455 + rp->id = id; 3456 + memcpy(rp->port_name, d->port_name, 8); 3457 + j++; 3458 + rp->fc4type = FS_FC4TYPE_FCP; 3459 + break; 3460 + case FAB_SCAN_GNNFT_FCP: 3461 + for (k = 0; k < num_fibre_dev; k++) { 3462 + rp = &vha->scan.l[k]; 3463 + if (id.b24 == rp->id.b24) { 3464 + memcpy(rp->node_name, 3465 + d->port_name, 8); 3466 + break; 3467 + } 3468 + } 3469 + break; 3470 + case FAB_SCAN_GPNFT_NVME: 3471 + found = 0; 3472 + 3473 + for (k = 0; k < num_fibre_dev; k++) { 3474 + rp = &vha->scan.l[k]; 3475 + if (!memcmp(rp->port_name, d->port_name, 8)) { 3476 + /* 3477 + * Supports FC-NVMe & FCP 3478 + */ 3479 + rp->fc4type |= FS_FC4TYPE_NVME; 3480 + found = 1; 3481 + break; 3482 + } 3483 + } 3484 + 3485 + /* We found new FC-NVMe only port */ 3486 + if (!found) { 3458 3487 for (k = 0; k < num_fibre_dev; k++) { 3459 3488 rp = &vha->scan.l[k]; 3460 - if (id.b24 == rp->id.b24) { 3461 - memcpy(rp->node_name, 3462 - d->port_name, 8); 3489 + if (wwn_to_u64(rp->port_name)) { 3490 + continue; 3491 + } else { 3492 + rp->id = id; 3493 + memcpy(rp->port_name, d->port_name, 8); 3494 + rp->fc4type = FS_FC4TYPE_NVME; 3463 3495 break; 3464 3496 } 3465 3497 } 3466 3498 } 3467 - } else { 3468 - /* Search if the fibre device supports FC4_TYPE_NVME */ 3469 - if (cmd == GPN_FT_CMD) { 3470 - found = 0; 3471 - 3472 - for (k = 0; k < num_fibre_dev; k++) { 3473 - rp = &vha->scan.l[k]; 3474 - if (!memcmp(rp->port_name, 3475 - d->port_name, 8)) { 3476 - /* 3477 - * Supports FC-NVMe & FCP 3478 - */ 3479 - rp->fc4type |= FS_FC4TYPE_NVME; 3480 - found = 1; 3481 - break; 3482 - } 3483 - } 3484 - 3485 - /* We found new FC-NVMe only port */ 3486 - if (!found) { 3487 - for (k = 0; k < num_fibre_dev; k++) { 3488 - rp = &vha->scan.l[k]; 3489 - if (wwn_to_u64(rp->port_name)) { 3490 - continue; 3491 - } else { 3492 - rp->id = id; 3493 - memcpy(rp->port_name, 3494 - d->port_name, 8); 3495 - rp->fc4type = 3496 - FS_FC4TYPE_NVME; 3497 - break; 3498 - } 3499 - } 3500 - } 3501 - } else { 3502 - for (k = 0; k < num_fibre_dev; k++) { 3503 - rp = &vha->scan.l[k]; 3504 - if (id.b24 == rp->id.b24) { 3505 - memcpy(rp->node_name, 3506 - d->port_name, 8); 3507 - break; 3508 - } 3499 + break; 3500 + case FAB_SCAN_GNNFT_NVME: 3501 + for (k = 0; k < num_fibre_dev; k++) { 3502 + rp = &vha->scan.l[k]; 3503 + if (id.b24 == rp->id.b24) { 3504 + memcpy(rp->node_name, d->port_name, 8); 3505 + break; 3509 3506 } 3510 3507 } 3508 + break; 3509 + default: 3510 + break; 3511 3511 } 3512 3512 } 3513 3513 } 3514 3514 3515 - static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) 3515 + static void qla_async_scan_sp_done(srb_t *sp, int res) 3516 3516 { 3517 3517 struct scsi_qla_host *vha = sp->vha; 3518 - struct ct_sns_req *ct_req = 3519 - (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; 3520 - u16 cmd = be16_to_cpu(ct_req->command); 3521 - u8 fc4_type = sp->gen2; 3522 3518 unsigned long flags; 3523 3519 int rc; 3524 3520 3525 3521 /* gen2 field is holding the fc4type */ 3526 - ql_dbg(ql_dbg_disc, vha, 0xffff, 3527 - "Async done-%s res %x FC4Type %x\n", 3528 - sp->name, res, sp->gen2); 3522 + ql_dbg(ql_dbg_disc, vha, 0x2026, 3523 + "Async done-%s res %x step %x\n", 3524 + sp->name, res, vha->scan.step); 3529 3525 3530 3526 sp->rc = res; 3531 3527 if (res) { ··· 3547 3547 * sp for GNNFT_DONE work. This will allow all 3548 3548 * the resource to get freed up. 3549 3549 */ 3550 - rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, 3551 - QLA_EVT_GNNFT_DONE); 3550 + rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH); 3552 3551 if (rc) { 3553 3552 /* Cleanup here to prevent memory leak */ 3554 3553 qla24xx_sp_unmap(vha, sp); ··· 3572 3573 3573 3574 qla2x00_find_free_fcp_nvme_slot(vha, sp); 3574 3575 3575 - if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled && 3576 - cmd == GNN_FT_CMD) { 3577 - spin_lock_irqsave(&vha->work_lock, flags); 3578 - vha->scan.scan_flags &= ~SF_SCANNING; 3579 - spin_unlock_irqrestore(&vha->work_lock, flags); 3576 + spin_lock_irqsave(&vha->work_lock, flags); 3577 + vha->scan.scan_flags &= ~SF_SCANNING; 3578 + spin_unlock_irqrestore(&vha->work_lock, flags); 3580 3579 3581 - sp->rc = res; 3582 - rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT); 3583 - if (rc) { 3584 - qla24xx_sp_unmap(vha, sp); 3585 - set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3586 - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3587 - } 3588 - return; 3589 - } 3580 + switch (vha->scan.step) { 3581 + case FAB_SCAN_GPNFT_FCP: 3582 + case FAB_SCAN_GPNFT_NVME: 3583 + rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_CMD); 3584 + break; 3585 + case FAB_SCAN_GNNFT_FCP: 3586 + if (vha->flags.nvme_enabled) 3587 + rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_CMD); 3588 + else 3589 + rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH); 3590 3590 3591 - if (cmd == GPN_FT_CMD) { 3592 - rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, 3593 - QLA_EVT_GPNFT_DONE); 3594 - } else { 3595 - rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, 3596 - QLA_EVT_GNNFT_DONE); 3591 + break; 3592 + case FAB_SCAN_GNNFT_NVME: 3593 + rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH); 3594 + break; 3595 + default: 3596 + /* should not be here */ 3597 + WARN_ON(1); 3598 + rc = QLA_FUNCTION_FAILED; 3599 + break; 3597 3600 } 3598 3601 3599 3602 if (rc) { ··· 3606 3605 } 3607 3606 } 3608 3607 3609 - /* 3610 - * Get WWNN list for fc4_type 3611 - * 3612 - * It is assumed the same SRB is re-used from GPNFT to avoid 3613 - * mem free & re-alloc 3614 - */ 3615 - static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, 3616 - u8 fc4_type) 3617 - { 3618 - int rval = QLA_FUNCTION_FAILED; 3619 - struct ct_sns_req *ct_req; 3620 - struct ct_sns_pkt *ct_sns; 3621 - unsigned long flags; 3622 - 3623 - if (!vha->flags.online) { 3624 - spin_lock_irqsave(&vha->work_lock, flags); 3625 - vha->scan.scan_flags &= ~SF_SCANNING; 3626 - spin_unlock_irqrestore(&vha->work_lock, flags); 3627 - goto done_free_sp; 3628 - } 3629 - 3630 - if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) { 3631 - ql_log(ql_log_warn, vha, 0xffff, 3632 - "%s: req %p rsp %p are not setup\n", 3633 - __func__, sp->u.iocb_cmd.u.ctarg.req, 3634 - sp->u.iocb_cmd.u.ctarg.rsp); 3635 - spin_lock_irqsave(&vha->work_lock, flags); 3636 - vha->scan.scan_flags &= ~SF_SCANNING; 3637 - spin_unlock_irqrestore(&vha->work_lock, flags); 3638 - WARN_ON(1); 3639 - set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3640 - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3641 - goto done_free_sp; 3642 - } 3643 - 3644 - ql_dbg(ql_dbg_disc, vha, 0xfffff, 3645 - "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n", 3646 - __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size, 3647 - sp->u.iocb_cmd.u.ctarg.req_size); 3648 - 3649 - sp->type = SRB_CT_PTHRU_CMD; 3650 - sp->name = "gnnft"; 3651 - sp->gen1 = vha->hw->base_qpair->chip_reset; 3652 - sp->gen2 = fc4_type; 3653 - qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 3654 - qla2x00_async_gpnft_gnnft_sp_done); 3655 - 3656 - memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); 3657 - memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); 3658 - 3659 - ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 3660 - /* CT_IU preamble */ 3661 - ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, 3662 - sp->u.iocb_cmd.u.ctarg.rsp_size); 3663 - 3664 - /* GPN_FT req */ 3665 - ct_req->req.gpn_ft.port_type = fc4_type; 3666 - 3667 - sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; 3668 - sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 3669 - 3670 - ql_dbg(ql_dbg_disc, vha, 0xffff, 3671 - "Async-%s hdl=%x FC4Type %x.\n", sp->name, 3672 - sp->handle, ct_req->req.gpn_ft.port_type); 3673 - 3674 - rval = qla2x00_start_sp(sp); 3675 - if (rval != QLA_SUCCESS) { 3676 - goto done_free_sp; 3677 - } 3678 - 3679 - return rval; 3680 - 3681 - done_free_sp: 3682 - if (sp->u.iocb_cmd.u.ctarg.req) { 3683 - dma_free_coherent(&vha->hw->pdev->dev, 3684 - sp->u.iocb_cmd.u.ctarg.req_allocated_size, 3685 - sp->u.iocb_cmd.u.ctarg.req, 3686 - sp->u.iocb_cmd.u.ctarg.req_dma); 3687 - sp->u.iocb_cmd.u.ctarg.req = NULL; 3688 - } 3689 - if (sp->u.iocb_cmd.u.ctarg.rsp) { 3690 - dma_free_coherent(&vha->hw->pdev->dev, 3691 - sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 3692 - sp->u.iocb_cmd.u.ctarg.rsp, 3693 - sp->u.iocb_cmd.u.ctarg.rsp_dma); 3694 - sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3695 - } 3696 - /* ref: INIT */ 3697 - kref_put(&sp->cmd_kref, qla2x00_sp_release); 3698 - 3699 - spin_lock_irqsave(&vha->work_lock, flags); 3700 - vha->scan.scan_flags &= ~SF_SCANNING; 3701 - if (vha->scan.scan_flags == 0) { 3702 - ql_dbg(ql_dbg_disc, vha, 0xffff, 3703 - "%s: schedule\n", __func__); 3704 - vha->scan.scan_flags |= SF_QUEUED; 3705 - schedule_delayed_work(&vha->scan.scan_work, 5); 3706 - } 3707 - spin_unlock_irqrestore(&vha->work_lock, flags); 3708 - 3709 - 3710 - return rval; 3711 - } /* GNNFT */ 3712 - 3713 - void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp) 3714 - { 3715 - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3716 - "%s enter\n", __func__); 3717 - qla24xx_async_gnnft(vha, sp, sp->gen2); 3718 - } 3719 - 3720 3608 /* Get WWPN list for certain fc4_type */ 3721 - int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) 3609 + int qla_fab_async_scan(scsi_qla_host_t *vha, srb_t *sp) 3722 3610 { 3723 3611 int rval = QLA_FUNCTION_FAILED; 3724 3612 struct ct_sns_req *ct_req; 3725 3613 struct ct_sns_pkt *ct_sns; 3726 - u32 rspsz; 3614 + u32 rspsz = 0; 3727 3615 unsigned long flags; 3728 3616 3729 - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3617 + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x200c, 3730 3618 "%s enter\n", __func__); 3731 3619 3732 3620 if (!vha->flags.online) ··· 3624 3734 spin_lock_irqsave(&vha->work_lock, flags); 3625 3735 if (vha->scan.scan_flags & SF_SCANNING) { 3626 3736 spin_unlock_irqrestore(&vha->work_lock, flags); 3627 - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3737 + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2012, 3628 3738 "%s: scan active\n", __func__); 3629 3739 return rval; 3630 3740 } 3631 3741 vha->scan.scan_flags |= SF_SCANNING; 3742 + if (!sp) 3743 + vha->scan.step = FAB_SCAN_START; 3744 + 3632 3745 spin_unlock_irqrestore(&vha->work_lock, flags); 3633 3746 3634 - if (fc4_type == FC4_TYPE_FCP_SCSI) { 3635 - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3747 + switch (vha->scan.step) { 3748 + case FAB_SCAN_START: 3749 + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2018, 3636 3750 "%s: Performing FCP Scan\n", __func__); 3637 - 3638 - if (sp) { 3639 - /* ref: INIT */ 3640 - kref_put(&sp->cmd_kref, qla2x00_sp_release); 3641 - } 3642 3751 3643 3752 /* ref: INIT */ 3644 3753 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); ··· 3654 3765 GFP_KERNEL); 3655 3766 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 3656 3767 if (!sp->u.iocb_cmd.u.ctarg.req) { 3657 - ql_log(ql_log_warn, vha, 0xffff, 3768 + ql_log(ql_log_warn, vha, 0x201a, 3658 3769 "Failed to allocate ct_sns request.\n"); 3659 3770 spin_lock_irqsave(&vha->work_lock, flags); 3660 3771 vha->scan.scan_flags &= ~SF_SCANNING; ··· 3662 3773 qla2x00_rel_sp(sp); 3663 3774 return rval; 3664 3775 } 3665 - sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; 3666 3776 3667 3777 rspsz = sizeof(struct ct_sns_gpnft_rsp) + 3668 3778 vha->hw->max_fibre_devices * ··· 3673 3785 GFP_KERNEL); 3674 3786 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz; 3675 3787 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 3676 - ql_log(ql_log_warn, vha, 0xffff, 3788 + ql_log(ql_log_warn, vha, 0x201b, 3677 3789 "Failed to allocate ct_sns request.\n"); 3678 3790 spin_lock_irqsave(&vha->work_lock, flags); 3679 3791 vha->scan.scan_flags &= ~SF_SCANNING; ··· 3693 3805 "%s scan list size %d\n", __func__, vha->scan.size); 3694 3806 3695 3807 memset(vha->scan.l, 0, vha->scan.size); 3696 - } else if (!sp) { 3697 - ql_dbg(ql_dbg_disc, vha, 0xffff, 3698 - "NVME scan did not provide SP\n"); 3699 - return rval; 3808 + 3809 + vha->scan.step = FAB_SCAN_GPNFT_FCP; 3810 + break; 3811 + case FAB_SCAN_GPNFT_FCP: 3812 + vha->scan.step = FAB_SCAN_GNNFT_FCP; 3813 + break; 3814 + case FAB_SCAN_GNNFT_FCP: 3815 + vha->scan.step = FAB_SCAN_GPNFT_NVME; 3816 + break; 3817 + case FAB_SCAN_GPNFT_NVME: 3818 + vha->scan.step = FAB_SCAN_GNNFT_NVME; 3819 + break; 3820 + case FAB_SCAN_GNNFT_NVME: 3821 + default: 3822 + /* should not be here */ 3823 + WARN_ON(1); 3824 + goto done_free_sp; 3700 3825 } 3701 3826 3702 - sp->type = SRB_CT_PTHRU_CMD; 3703 - sp->name = "gpnft"; 3704 - sp->gen1 = vha->hw->base_qpair->chip_reset; 3705 - sp->gen2 = fc4_type; 3706 - qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 3707 - qla2x00_async_gpnft_gnnft_sp_done); 3827 + if (!sp) { 3828 + ql_dbg(ql_dbg_disc, vha, 0x201c, 3829 + "scan did not provide SP\n"); 3830 + return rval; 3831 + } 3832 + if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) { 3833 + ql_log(ql_log_warn, vha, 0x201d, 3834 + "%s: req %p rsp %p are not setup\n", 3835 + __func__, sp->u.iocb_cmd.u.ctarg.req, 3836 + sp->u.iocb_cmd.u.ctarg.rsp); 3837 + spin_lock_irqsave(&vha->work_lock, flags); 3838 + vha->scan.scan_flags &= ~SF_SCANNING; 3839 + spin_unlock_irqrestore(&vha->work_lock, flags); 3840 + WARN_ON(1); 3841 + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3842 + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3843 + goto done_free_sp; 3844 + } 3708 3845 3709 3846 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size; 3710 - memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); 3711 3847 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); 3848 + memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); 3849 + 3850 + 3851 + sp->type = SRB_CT_PTHRU_CMD; 3852 + sp->gen1 = vha->hw->base_qpair->chip_reset; 3853 + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 3854 + qla_async_scan_sp_done); 3712 3855 3713 3856 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 3714 - /* CT_IU preamble */ 3715 - ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz); 3716 3857 3717 - /* GPN_FT req */ 3718 - ct_req->req.gpn_ft.port_type = fc4_type; 3858 + /* CT_IU preamble */ 3859 + switch (vha->scan.step) { 3860 + case FAB_SCAN_GPNFT_FCP: 3861 + sp->name = "gpnft"; 3862 + ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz); 3863 + ct_req->req.gpn_ft.port_type = FC4_TYPE_FCP_SCSI; 3864 + sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; 3865 + break; 3866 + case FAB_SCAN_GNNFT_FCP: 3867 + sp->name = "gnnft"; 3868 + ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, rspsz); 3869 + ct_req->req.gpn_ft.port_type = FC4_TYPE_FCP_SCSI; 3870 + sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; 3871 + break; 3872 + case FAB_SCAN_GPNFT_NVME: 3873 + sp->name = "gpnft"; 3874 + ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz); 3875 + ct_req->req.gpn_ft.port_type = FC4_TYPE_NVME; 3876 + sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; 3877 + break; 3878 + case FAB_SCAN_GNNFT_NVME: 3879 + sp->name = "gnnft"; 3880 + ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, rspsz); 3881 + ct_req->req.gpn_ft.port_type = FC4_TYPE_NVME; 3882 + sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; 3883 + break; 3884 + default: 3885 + /* should not be here */ 3886 + WARN_ON(1); 3887 + goto done_free_sp; 3888 + } 3719 3889 3720 3890 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; 3721 3891 3722 - ql_dbg(ql_dbg_disc, vha, 0xffff, 3723 - "Async-%s hdl=%x FC4Type %x.\n", sp->name, 3724 - sp->handle, ct_req->req.gpn_ft.port_type); 3892 + ql_dbg(ql_dbg_disc, vha, 0x2003, 3893 + "%s: step %d, rsp size %d, req size %d hdl %x %s FC4TYPE %x \n", 3894 + __func__, vha->scan.step, sp->u.iocb_cmd.u.ctarg.rsp_size, 3895 + sp->u.iocb_cmd.u.ctarg.req_size, sp->handle, sp->name, 3896 + ct_req->req.gpn_ft.port_type); 3725 3897 3726 3898 rval = qla2x00_start_sp(sp); 3727 3899 if (rval != QLA_SUCCESS) { ··· 3812 3864 spin_lock_irqsave(&vha->work_lock, flags); 3813 3865 vha->scan.scan_flags &= ~SF_SCANNING; 3814 3866 if (vha->scan.scan_flags == 0) { 3815 - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 3867 + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2007, 3816 3868 "%s: Scan scheduled.\n", __func__); 3817 3869 vha->scan.scan_flags |= SF_QUEUED; 3818 3870 schedule_delayed_work(&vha->scan.scan_work, 5); ··· 3821 3873 3822 3874 3823 3875 return rval; 3876 + } 3877 + 3878 + void qla_fab_scan_start(struct scsi_qla_host *vha) 3879 + { 3880 + int rval; 3881 + 3882 + rval = qla_fab_async_scan(vha, NULL); 3883 + if (rval) 3884 + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3824 3885 } 3825 3886 3826 3887 void qla_scan_work_fn(struct work_struct *work)
+71 -23
drivers/scsi/qla2xxx/qla_init.c
··· 423 423 sp->type = SRB_LOGOUT_CMD; 424 424 sp->name = "logout"; 425 425 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 426 - qla2x00_async_logout_sp_done), 426 + qla2x00_async_logout_sp_done); 427 427 428 428 ql_dbg(ql_dbg_disc, vha, 0x2070, 429 429 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n", ··· 1842 1842 return qla2x00_post_work(vha, e); 1843 1843 } 1844 1844 1845 + static void qla_rscn_gen_tick(scsi_qla_host_t *vha, u32 *ret_rscn_gen) 1846 + { 1847 + *ret_rscn_gen = atomic_inc_return(&vha->rscn_gen); 1848 + /* memory barrier */ 1849 + wmb(); 1850 + } 1851 + 1845 1852 void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) 1846 1853 { 1847 1854 fc_port_t *fcport; 1848 1855 unsigned long flags; 1856 + u32 rscn_gen; 1849 1857 1850 1858 switch (ea->id.b.rsvd_1) { 1851 1859 case RSCN_PORT_ADDR: ··· 1883 1875 * Otherwise we're already in the middle of a relogin 1884 1876 */ 1885 1877 fcport->scan_needed = 1; 1886 - fcport->rscn_gen++; 1878 + qla_rscn_gen_tick(vha, &fcport->rscn_gen); 1887 1879 } 1888 1880 } else { 1889 1881 fcport->scan_needed = 1; 1890 - fcport->rscn_gen++; 1882 + qla_rscn_gen_tick(vha, &fcport->rscn_gen); 1891 1883 } 1892 1884 } 1893 1885 break; 1894 1886 case RSCN_AREA_ADDR: 1887 + qla_rscn_gen_tick(vha, &rscn_gen); 1895 1888 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1896 1889 if (fcport->flags & FCF_FCP2_DEVICE && 1897 1890 atomic_read(&fcport->state) == FCS_ONLINE) ··· 1900 1891 1901 1892 if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) { 1902 1893 fcport->scan_needed = 1; 1903 - fcport->rscn_gen++; 1894 + fcport->rscn_gen = rscn_gen; 1904 1895 } 1905 1896 } 1906 1897 break; 1907 1898 case RSCN_DOM_ADDR: 1899 + qla_rscn_gen_tick(vha, &rscn_gen); 1908 1900 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1909 1901 if (fcport->flags & FCF_FCP2_DEVICE && 1910 1902 atomic_read(&fcport->state) == FCS_ONLINE) ··· 1913 1903 1914 1904 if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) { 1915 1905 fcport->scan_needed = 1; 1916 - fcport->rscn_gen++; 1906 + fcport->rscn_gen = rscn_gen; 1917 1907 } 1918 1908 } 1919 1909 break; 1920 1910 case RSCN_FAB_ADDR: 1921 1911 default: 1912 + qla_rscn_gen_tick(vha, &rscn_gen); 1922 1913 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1923 1914 if (fcport->flags & FCF_FCP2_DEVICE && 1924 1915 atomic_read(&fcport->state) == FCS_ONLINE) 1925 1916 continue; 1926 1917 1927 1918 fcport->scan_needed = 1; 1928 - fcport->rscn_gen++; 1919 + fcport->rscn_gen = rscn_gen; 1929 1920 } 1930 1921 break; 1931 1922 } ··· 1935 1924 if (vha->scan.scan_flags == 0) { 1936 1925 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__); 1937 1926 vha->scan.scan_flags |= SF_QUEUED; 1927 + vha->scan.rscn_gen_start = atomic_read(&vha->rscn_gen); 1938 1928 schedule_delayed_work(&vha->scan.scan_work, 5); 1939 1929 } 1940 1930 spin_unlock_irqrestore(&vha->work_lock, flags); ··· 6405 6393 qlt_do_generation_tick(vha, &discovery_gen); 6406 6394 6407 6395 if (USE_ASYNC_SCAN(ha)) { 6408 - rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI, 6409 - NULL); 6410 - if (rval) 6411 - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 6396 + /* start of scan begins here */ 6397 + vha->scan.rscn_gen_end = atomic_read(&vha->rscn_gen); 6398 + qla_fab_scan_start(vha); 6412 6399 } else { 6413 6400 list_for_each_entry(fcport, &vha->vp_fcports, list) 6414 6401 fcport->scan_state = QLA_FCPORT_SCAN; ··· 8218 8207 struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status; 8219 8208 bool valid_pri_image = false, valid_sec_image = false; 8220 8209 bool active_pri_image = false, active_sec_image = false; 8210 + int rc; 8221 8211 8222 8212 if (!ha->flt_region_aux_img_status_pri) { 8223 8213 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n"); 8224 8214 goto check_sec_image; 8225 8215 } 8226 8216 8227 - qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, 8217 + rc = qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, 8228 8218 ha->flt_region_aux_img_status_pri, 8229 8219 sizeof(pri_aux_image_status) >> 2); 8220 + if (rc) { 8221 + ql_log(ql_log_info, vha, 0x01a1, 8222 + "Unable to read Primary aux image(%x).\n", rc); 8223 + goto check_sec_image; 8224 + } 8230 8225 qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status); 8231 8226 8232 8227 if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) { ··· 8263 8246 goto check_valid_image; 8264 8247 } 8265 8248 8266 - qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, 8249 + rc = qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, 8267 8250 ha->flt_region_aux_img_status_sec, 8268 8251 sizeof(sec_aux_image_status) >> 2); 8252 + if (rc) { 8253 + ql_log(ql_log_info, vha, 0x01a2, 8254 + "Unable to read Secondary aux image(%x).\n", rc); 8255 + goto check_valid_image; 8256 + } 8257 + 8269 8258 qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status); 8270 8259 8271 8260 if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) { ··· 8329 8306 struct qla27xx_image_status pri_image_status, sec_image_status; 8330 8307 bool valid_pri_image = false, valid_sec_image = false; 8331 8308 bool active_pri_image = false, active_sec_image = false; 8309 + int rc; 8332 8310 8333 8311 if (!ha->flt_region_img_status_pri) { 8334 8312 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n"); ··· 8371 8347 goto check_valid_image; 8372 8348 } 8373 8349 8374 - qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), 8350 + rc = qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), 8375 8351 ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2); 8352 + if (rc) { 8353 + ql_log(ql_log_info, vha, 0x01a3, 8354 + "Unable to read Secondary image status(%x).\n", rc); 8355 + goto check_valid_image; 8356 + } 8357 + 8376 8358 qla27xx_print_image(vha, "Secondary image", &sec_image_status); 8377 8359 8378 8360 if (qla27xx_check_image_status_signature(&sec_image_status)) { ··· 8450 8420 "FW: Loading firmware from flash (%x).\n", faddr); 8451 8421 8452 8422 dcode = (uint32_t *)req->ring; 8453 - qla24xx_read_flash_data(vha, dcode, faddr, 8); 8454 - if (qla24xx_risc_firmware_invalid(dcode)) { 8423 + rval = qla24xx_read_flash_data(vha, dcode, faddr, 8); 8424 + if (rval || qla24xx_risc_firmware_invalid(dcode)) { 8455 8425 ql_log(ql_log_fatal, vha, 0x008c, 8456 - "Unable to verify the integrity of flash firmware " 8457 - "image.\n"); 8426 + "Unable to verify the integrity of flash firmware image (rval %x).\n", rval); 8458 8427 ql_log(ql_log_fatal, vha, 0x008d, 8459 8428 "Firmware data: %08x %08x %08x %08x.\n", 8460 8429 dcode[0], dcode[1], dcode[2], dcode[3]); ··· 8467 8438 for (j = 0; j < segments; j++) { 8468 8439 ql_dbg(ql_dbg_init, vha, 0x008d, 8469 8440 "-> Loading segment %u...\n", j); 8470 - qla24xx_read_flash_data(vha, dcode, faddr, 10); 8441 + rval = qla24xx_read_flash_data(vha, dcode, faddr, 10); 8442 + if (rval) { 8443 + ql_log(ql_log_fatal, vha, 0x016a, 8444 + "-> Unable to read segment addr + size .\n"); 8445 + return QLA_FUNCTION_FAILED; 8446 + } 8471 8447 risc_addr = be32_to_cpu((__force __be32)dcode[2]); 8472 8448 risc_size = be32_to_cpu((__force __be32)dcode[3]); 8473 8449 if (!*srisc_addr) { ··· 8488 8454 ql_dbg(ql_dbg_init, vha, 0x008e, 8489 8455 "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n", 8490 8456 fragment, risc_addr, faddr, dlen); 8491 - qla24xx_read_flash_data(vha, dcode, faddr, dlen); 8457 + rval = qla24xx_read_flash_data(vha, dcode, faddr, dlen); 8458 + if (rval) { 8459 + ql_log(ql_log_fatal, vha, 0x016b, 8460 + "-> Unable to read fragment(faddr %#x dlen %#lx).\n", 8461 + faddr, dlen); 8462 + return QLA_FUNCTION_FAILED; 8463 + } 8492 8464 for (i = 0; i < dlen; i++) 8493 8465 dcode[i] = swab32(dcode[i]); 8494 8466 ··· 8523 8483 fwdt->length = 0; 8524 8484 8525 8485 dcode = (uint32_t *)req->ring; 8526 - qla24xx_read_flash_data(vha, dcode, faddr, 7); 8486 + 8487 + rval = qla24xx_read_flash_data(vha, dcode, faddr, 7); 8488 + if (rval) { 8489 + ql_log(ql_log_fatal, vha, 0x016c, 8490 + "-> Unable to read template size.\n"); 8491 + goto failed; 8492 + } 8493 + 8527 8494 risc_size = be32_to_cpu((__force __be32)dcode[2]); 8528 8495 ql_dbg(ql_dbg_init, vha, 0x0161, 8529 8496 "-> fwdt%u template array at %#x (%#x dwords)\n", ··· 8556 8509 } 8557 8510 8558 8511 dcode = fwdt->template; 8559 - qla24xx_read_flash_data(vha, dcode, faddr, risc_size); 8512 + rval = qla24xx_read_flash_data(vha, dcode, faddr, risc_size); 8560 8513 8561 - if (!qla27xx_fwdt_template_valid(dcode)) { 8514 + if (rval || !qla27xx_fwdt_template_valid(dcode)) { 8562 8515 ql_log(ql_log_warn, vha, 0x0165, 8563 - "-> fwdt%u failed template validate\n", j); 8516 + "-> fwdt%u failed template validate (rval %x)\n", 8517 + j, rval); 8564 8518 goto failed; 8565 8519 } 8566 8520
+8
drivers/scsi/qla2xxx/qla_inline.h
··· 631 631 } 632 632 return 0; 633 633 } 634 + 635 + static inline bool val_is_in_range(u32 val, u32 start, u32 end) 636 + { 637 + if (val >= start && val <= end) 638 + return true; 639 + else 640 + return false; 641 + }
-6
drivers/scsi/qla2xxx/qla_isr.c
··· 3014 3014 } 3015 3015 } 3016 3016 3017 - struct scsi_dif_tuple { 3018 - __be16 guard; /* Checksum */ 3019 - __be16 app_tag; /* APPL identifier */ 3020 - __be32 ref_tag; /* Target LBA or indirect LBA */ 3021 - }; 3022 - 3023 3017 /* 3024 3018 * Checks the guard or meta-data for the type of error 3025 3019 * detected by the HBA. In case of errors, we set the
+1 -1
drivers/scsi/qla2xxx/qla_mid.c
··· 180 180 atomic_set(&vha->loop_state, LOOP_DOWN); 181 181 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 182 182 list_for_each_entry(fcport, &vha->vp_fcports, list) 183 - fcport->logout_on_delete = 0; 183 + fcport->logout_on_delete = 1; 184 184 185 185 if (!vha->hw->flags.edif_enabled) 186 186 qla2x00_wait_for_sess_deletion(vha);
+4 -1
drivers/scsi/qla2xxx/qla_nvme.c
··· 49 49 return 0; 50 50 } 51 51 52 - if (!vha->nvme_local_port && qla_nvme_register_hba(vha)) 52 + if (qla_nvme_register_hba(vha)) 53 + return 0; 54 + 55 + if (!vha->nvme_local_port) 53 56 return 0; 54 57 55 58 if (!(fcport->nvme_prli_service_param &
+5 -14
drivers/scsi/qla2xxx/qla_os.c
··· 1875 1875 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1876 1876 sp = req->outstanding_cmds[cnt]; 1877 1877 if (sp) { 1878 - /* 1879 - * perform lockless completion during driver unload 1880 - */ 1881 1878 if (qla2x00_chip_is_down(vha)) { 1882 1879 req->outstanding_cmds[cnt] = NULL; 1883 - spin_unlock_irqrestore(qp->qp_lock_ptr, flags); 1884 1880 sp->done(sp, res); 1885 - spin_lock_irqsave(qp->qp_lock_ptr, flags); 1886 1881 continue; 1887 1882 } 1888 1883 ··· 4684 4689 qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt) 4685 4690 { 4686 4691 u32 temp; 4687 - struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb; 4692 + struct init_cb_81xx *icb = (struct init_cb_81xx *)vha->hw->init_cb; 4688 4693 *ret_cnt = FW_DEF_EXCHANGES_CNT; 4689 4694 4690 4695 if (max_cnt > vha->hw->max_exchg) ··· 5558 5563 qla2x00_async_prlo_done(vha, e->u.logio.fcport, 5559 5564 e->u.logio.data); 5560 5565 break; 5561 - case QLA_EVT_GPNFT: 5562 - qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type, 5563 - e->u.gpnft.sp); 5566 + case QLA_EVT_SCAN_CMD: 5567 + qla_fab_async_scan(vha, e->u.iosb.sp); 5564 5568 break; 5565 - case QLA_EVT_GPNFT_DONE: 5566 - qla24xx_async_gpnft_done(vha, e->u.iosb.sp); 5567 - break; 5568 - case QLA_EVT_GNNFT_DONE: 5569 - qla24xx_async_gnnft_done(vha, e->u.iosb.sp); 5569 + case QLA_EVT_SCAN_FINISH: 5570 + qla_fab_scan_finish(vha, e->u.iosb.sp); 5570 5571 break; 5571 5572 case QLA_EVT_GFPNID: 5572 5573 qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
+75 -33
drivers/scsi/qla2xxx/qla_sup.c
··· 555 555 struct qla_flt_location *fltl = (void *)req->ring; 556 556 uint32_t *dcode = (uint32_t *)req->ring; 557 557 uint8_t *buf = (void *)req->ring, *bcode, last_image; 558 + int rc; 558 559 559 560 /* 560 561 * FLT-location structure resides after the last PCI region. ··· 585 584 pcihdr = 0; 586 585 do { 587 586 /* Verify PCI expansion ROM header. */ 588 - qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); 587 + rc = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); 588 + if (rc) { 589 + ql_log(ql_log_info, vha, 0x016d, 590 + "Unable to read PCI Expansion Rom Header (%x).\n", rc); 591 + return QLA_FUNCTION_FAILED; 592 + } 589 593 bcode = buf + (pcihdr % 4); 590 594 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) 591 595 goto end; 592 596 593 597 /* Locate PCI data structure. */ 594 598 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); 595 - qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); 599 + rc = qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); 600 + if (rc) { 601 + ql_log(ql_log_info, vha, 0x0179, 602 + "Unable to read PCI Data Structure (%x).\n", rc); 603 + return QLA_FUNCTION_FAILED; 604 + } 596 605 bcode = buf + (pcihdr % 4); 597 606 598 607 /* Validate signature of PCI data structure. */ ··· 617 606 } while (!last_image); 618 607 619 608 /* Now verify FLT-location structure. */ 620 - qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2); 609 + rc = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2); 610 + if (rc) { 611 + ql_log(ql_log_info, vha, 0x017a, 612 + "Unable to read FLT (%x).\n", rc); 613 + return QLA_FUNCTION_FAILED; 614 + } 621 615 if (memcmp(fltl->sig, "QFLT", 4)) 622 616 goto end; 623 617 ··· 2621 2605 uint32_t offset, uint32_t length) 2622 2606 { 2623 2607 struct qla_hw_data *ha = vha->hw; 2608 + int rc; 2624 2609 2625 2610 /* Suspend HBA. */ 2626 2611 scsi_block_requests(vha->host); 2627 2612 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 2628 2613 2629 2614 /* Go with read. */ 2630 - qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2); 2615 + rc = qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2); 2616 + if (rc) { 2617 + ql_log(ql_log_info, vha, 0x01a0, 2618 + "Unable to perform optrom read(%x).\n", rc); 2619 + } 2631 2620 2632 2621 /* Resume HBA. */ 2633 2622 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); ··· 3433 3412 struct active_regions active_regions = { }; 3434 3413 3435 3414 if (IS_P3P_TYPE(ha)) 3436 - return ret; 3415 + return QLA_SUCCESS; 3437 3416 3438 3417 if (!mbuf) 3439 3418 return QLA_FUNCTION_FAILED; ··· 3453 3432 3454 3433 do { 3455 3434 /* Verify PCI expansion ROM header. */ 3456 - qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); 3435 + ret = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); 3436 + if (ret) { 3437 + ql_log(ql_log_info, vha, 0x017d, 3438 + "Unable to read PCI EXP Rom Header(%x).\n", ret); 3439 + return QLA_FUNCTION_FAILED; 3440 + } 3441 + 3457 3442 bcode = mbuf + (pcihdr % 4); 3458 3443 if (memcmp(bcode, "\x55\xaa", 2)) { 3459 3444 /* No signature */ 3460 3445 ql_log(ql_log_fatal, vha, 0x0059, 3461 3446 "No matching ROM signature.\n"); 3462 - ret = QLA_FUNCTION_FAILED; 3463 - break; 3447 + return QLA_FUNCTION_FAILED; 3464 3448 } 3465 3449 3466 3450 /* Locate PCI data structure. */ 3467 3451 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); 3468 3452 3469 - qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); 3453 + ret = qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); 3454 + if (ret) { 3455 + ql_log(ql_log_info, vha, 0x018e, 3456 + "Unable to read PCI Data Structure (%x).\n", ret); 3457 + return QLA_FUNCTION_FAILED; 3458 + } 3459 + 3470 3460 bcode = mbuf + (pcihdr % 4); 3471 3461 3472 3462 /* Validate signature of PCI data structure. */ ··· 3486 3454 ql_log(ql_log_fatal, vha, 0x005a, 3487 3455 "PCI data struct not found pcir_adr=%x.\n", pcids); 3488 3456 ql_dump_buffer(ql_dbg_init, vha, 0x0059, dcode, 32); 3489 - ret = QLA_FUNCTION_FAILED; 3490 - break; 3457 + return QLA_FUNCTION_FAILED; 3491 3458 } 3492 3459 3493 3460 /* Read version */ ··· 3538 3507 faddr = ha->flt_region_fw_sec; 3539 3508 } 3540 3509 3541 - qla24xx_read_flash_data(vha, dcode, faddr, 8); 3542 - if (qla24xx_risc_firmware_invalid(dcode)) { 3543 - ql_log(ql_log_warn, vha, 0x005f, 3544 - "Unrecognized fw revision at %x.\n", 3545 - ha->flt_region_fw * 4); 3546 - ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32); 3510 + ret = qla24xx_read_flash_data(vha, dcode, faddr, 8); 3511 + if (ret) { 3512 + ql_log(ql_log_info, vha, 0x019e, 3513 + "Unable to read FW version (%x).\n", ret); 3514 + return ret; 3547 3515 } else { 3548 - for (i = 0; i < 4; i++) 3549 - ha->fw_revision[i] = 3516 + if (qla24xx_risc_firmware_invalid(dcode)) { 3517 + ql_log(ql_log_warn, vha, 0x005f, 3518 + "Unrecognized fw revision at %x.\n", 3519 + ha->flt_region_fw * 4); 3520 + ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32); 3521 + } else { 3522 + for (i = 0; i < 4; i++) 3523 + ha->fw_revision[i] = 3550 3524 be32_to_cpu((__force __be32)dcode[4+i]); 3551 - ql_dbg(ql_dbg_init, vha, 0x0060, 3552 - "Firmware revision (flash) %u.%u.%u (%x).\n", 3553 - ha->fw_revision[0], ha->fw_revision[1], 3554 - ha->fw_revision[2], ha->fw_revision[3]); 3525 + ql_dbg(ql_dbg_init, vha, 0x0060, 3526 + "Firmware revision (flash) %u.%u.%u (%x).\n", 3527 + ha->fw_revision[0], ha->fw_revision[1], 3528 + ha->fw_revision[2], ha->fw_revision[3]); 3529 + } 3555 3530 } 3556 3531 3557 3532 /* Check for golden firmware and get version if available */ ··· 3568 3531 3569 3532 memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version)); 3570 3533 faddr = ha->flt_region_gold_fw; 3571 - qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8); 3572 - if (qla24xx_risc_firmware_invalid(dcode)) { 3573 - ql_log(ql_log_warn, vha, 0x0056, 3574 - "Unrecognized golden fw at %#x.\n", faddr); 3575 - ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32); 3534 + ret = qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8); 3535 + if (ret) { 3536 + ql_log(ql_log_info, vha, 0x019f, 3537 + "Unable to read Gold FW version (%x).\n", ret); 3576 3538 return ret; 3539 + } else { 3540 + if (qla24xx_risc_firmware_invalid(dcode)) { 3541 + ql_log(ql_log_warn, vha, 0x0056, 3542 + "Unrecognized golden fw at %#x.\n", faddr); 3543 + ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32); 3544 + return QLA_FUNCTION_FAILED; 3545 + } 3546 + 3547 + for (i = 0; i < 4; i++) 3548 + ha->gold_fw_version[i] = 3549 + be32_to_cpu((__force __be32)dcode[4+i]); 3577 3550 } 3578 - 3579 - for (i = 0; i < 4; i++) 3580 - ha->gold_fw_version[i] = 3581 - be32_to_cpu((__force __be32)dcode[4+i]); 3582 - 3583 3551 return ret; 3584 3552 } 3585 3553
+2 -2
drivers/scsi/qla2xxx/qla_version.h
··· 6 6 /* 7 7 * Driver version 8 8 */ 9 - #define QLA2XXX_VERSION "10.02.09.200-k" 9 + #define QLA2XXX_VERSION "10.02.09.300-k" 10 10 11 11 #define QLA_DRIVER_MAJOR_VER 10 12 12 #define QLA_DRIVER_MINOR_VER 2 13 13 #define QLA_DRIVER_PATCH_VER 9 14 - #define QLA_DRIVER_BETA_VER 200 14 + #define QLA_DRIVER_BETA_VER 300
+1
drivers/scsi/scsi_common.c
··· 12 12 #include <asm/unaligned.h> 13 13 #include <scsi/scsi_common.h> 14 14 15 + MODULE_DESCRIPTION("SCSI functions used by both the initiator and the target code"); 15 16 MODULE_LICENSE("GPL v2"); 16 17 17 18 /* Command group 3 is reserved and should never be used. */
+5 -6
drivers/scsi/scsi_devinfo.c
··· 39 39 static char scsi_dev_flags[256]; 40 40 41 41 /* 42 - * scsi_static_device_list: deprecated list of devices that require 43 - * settings that differ from the default, includes black-listed (broken) 44 - * devices. The entries here are added to the tail of scsi_dev_info_list 45 - * via scsi_dev_info_list_init. 42 + * scsi_static_device_list: list of devices that require settings that differ 43 + * from the default, includes black-listed (broken) devices. The entries here 44 + * are added to the tail of scsi_dev_info_list via scsi_dev_info_list_init. 46 45 * 47 - * Do not add to this list, use the command line or proc interface to add 48 - * to the scsi_dev_info_list. This table will eventually go away. 46 + * If possible, set the BLIST_* flags from inside a SCSI LLD rather than 47 + * adding an entry to this list. 49 48 */ 50 49 static struct { 51 50 char *vendor;
+1 -2
drivers/scsi/scsi_scan.c
··· 334 334 sdev->sg_reserved_size = INT_MAX; 335 335 336 336 scsi_init_limits(shost, &lim); 337 - q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, NULL); 337 + q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, sdev); 338 338 if (IS_ERR(q)) { 339 339 /* release fn is set up in scsi_sysfs_device_initialise, so 340 340 * have to free and put manually here */ ··· 344 344 } 345 345 kref_get(&sdev->host->tagset_refcnt); 346 346 sdev->request_queue = q; 347 - q->queuedata = sdev; 348 347 349 348 depth = sdev->host->cmd_per_lun ?: 1; 350 349
+1
drivers/scsi/sun3_scsi.c
··· 666 666 module_platform_driver_probe(sun3_scsi_driver, sun3_scsi_probe); 667 667 668 668 MODULE_ALIAS("platform:" DRV_MODULE_NAME); 669 + MODULE_DESCRIPTION("Sun3 NCR5380 SCSI controller driver"); 669 670 MODULE_LICENSE("GPL");
+62 -27
drivers/ufs/core/ufs-mcq.c
··· 18 18 #include <linux/iopoll.h> 19 19 20 20 #define MAX_QUEUE_SUP GENMASK(7, 0) 21 + #define QCFGPTR GENMASK(23, 16) 21 22 #define UFS_MCQ_MIN_RW_QUEUES 2 22 23 #define UFS_MCQ_MIN_READ_QUEUES 0 23 24 #define UFS_MCQ_MIN_POLL_QUEUES 0 ··· 26 25 #define QUEUE_ID_OFFSET 16 27 26 28 27 #define MCQ_CFG_MAC_MASK GENMASK(16, 8) 29 - #define MCQ_QCFG_SIZE 0x40 30 28 #define MCQ_ENTRY_SIZE_IN_DWORD 8 31 29 #define CQE_UCD_BA GENMASK_ULL(63, 7) 32 30 ··· 117 117 } 118 118 119 119 /** 120 + * ufshcd_mcq_queue_cfg_addr - get an start address of the MCQ Queue Config 121 + * Registers. 122 + * @hba: per adapter instance 123 + * 124 + * Return: Start address of MCQ Queue Config Registers in HCI 125 + */ 126 + unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba) 127 + { 128 + return FIELD_GET(QCFGPTR, hba->mcq_capabilities) * 0x200; 129 + } 130 + EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr); 131 + 132 + /** 120 133 * ufshcd_mcq_decide_queue_depth - decide the queue depth 121 134 * @hba: per adapter instance 122 135 * ··· 137 124 * 138 125 * MAC - Max. Active Command of the Host Controller (HC) 139 126 * HC wouldn't send more than this commands to the device. 140 - * It is mandatory to implement get_hba_mac() to enable MCQ mode. 141 127 * Calculates and adjusts the queue depth based on the depth 142 128 * supported by the HC and ufs device. 143 129 */ ··· 144 132 { 145 133 int mac; 146 134 147 - /* Mandatory to implement get_hba_mac() */ 148 - mac = ufshcd_mcq_vops_get_hba_mac(hba); 149 - if (mac < 0) { 150 - dev_err(hba->dev, "Failed to get mac, err=%d\n", mac); 151 - return mac; 135 + if (!hba->vops || !hba->vops->get_hba_mac) { 136 + /* 137 + * Extract the maximum number of active transfer tasks value 138 + * from the host controller capabilities register. This value is 139 + * 0-based. 140 + */ 141 + hba->capabilities = 142 + ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); 143 + mac = hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_MCQ; 144 + mac++; 145 + } else { 146 + mac = hba->vops->get_hba_mac(hba); 152 147 } 148 + if (mac < 0) 149 + goto err; 153 150 154 151 WARN_ON_ONCE(!hba->dev_info.bqueuedepth); 155 152 /* ··· 167 146 * shared queuing architecture is enabled. 168 147 */ 169 148 return min_t(int, mac, hba->dev_info.bqueuedepth); 149 + 150 + err: 151 + dev_err(hba->dev, "Failed to get mac, err=%d\n", mac); 152 + return mac; 170 153 } 171 154 172 155 static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) ··· 187 162 if (hba_maxq < tot_queues) { 188 163 dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n", 189 164 tot_queues, hba_maxq); 165 + return -EOPNOTSUPP; 166 + } 167 + 168 + /* 169 + * Device should support at least one I/O queue to handle device 170 + * commands via hba->dev_cmd_queue. 171 + */ 172 + if (hba_maxq == poll_queues) { 173 + dev_err(hba->dev, "At least one non-poll queue required\n"); 190 174 return -EOPNOTSUPP; 191 175 } 192 176 ··· 260 226 261 227 return 0; 262 228 } 263 - 264 - 265 - /* Operation and runtime registers configuration */ 266 - #define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i)) 267 - #define MCQ_OPR_OFFSET_n(p, i) \ 268 - (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i)) 269 229 270 230 static void __iomem *mcq_opr_base(struct ufs_hba *hba, 271 231 enum ufshcd_mcq_opr n, int i) ··· 365 337 366 338 /* Submission Queue Lower Base Address */ 367 339 ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr), 368 - MCQ_CFG_n(REG_SQLBA, i)); 340 + ufshcd_mcq_cfg_offset(REG_SQLBA, i)); 369 341 /* Submission Queue Upper Base Address */ 370 342 ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr), 371 - MCQ_CFG_n(REG_SQUBA, i)); 343 + ufshcd_mcq_cfg_offset(REG_SQUBA, i)); 372 344 /* Submission Queue Doorbell Address Offset */ 373 - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i), 374 - MCQ_CFG_n(REG_SQDAO, i)); 345 + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQD, i), 346 + ufshcd_mcq_cfg_offset(REG_SQDAO, i)); 375 347 /* Submission Queue Interrupt Status Address Offset */ 376 - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i), 377 - MCQ_CFG_n(REG_SQISAO, i)); 348 + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQIS, i), 349 + ufshcd_mcq_cfg_offset(REG_SQISAO, i)); 378 350 379 351 /* Completion Queue Lower Base Address */ 380 352 ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr), 381 - MCQ_CFG_n(REG_CQLBA, i)); 353 + ufshcd_mcq_cfg_offset(REG_CQLBA, i)); 382 354 /* Completion Queue Upper Base Address */ 383 355 ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr), 384 - MCQ_CFG_n(REG_CQUBA, i)); 356 + ufshcd_mcq_cfg_offset(REG_CQUBA, i)); 385 357 /* Completion Queue Doorbell Address Offset */ 386 - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i), 387 - MCQ_CFG_n(REG_CQDAO, i)); 358 + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQD, i), 359 + ufshcd_mcq_cfg_offset(REG_CQDAO, i)); 388 360 /* Completion Queue Interrupt Status Address Offset */ 389 - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i), 390 - MCQ_CFG_n(REG_CQISAO, i)); 361 + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQIS, i), 362 + ufshcd_mcq_cfg_offset(REG_CQISAO, i)); 391 363 392 364 /* Save the base addresses for quicker access */ 393 365 hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP; ··· 404 376 405 377 /* Completion Queue Enable|Size to Completion Queue Attribute */ 406 378 ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize, 407 - MCQ_CFG_n(REG_CQATTR, i)); 379 + ufshcd_mcq_cfg_offset(REG_CQATTR, i)); 408 380 409 381 /* 410 382 * Submission Qeueue Enable|Size|Completion Queue ID to ··· 412 384 */ 413 385 ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize | 414 386 (i << QUEUE_ID_OFFSET), 415 - MCQ_CFG_n(REG_SQATTR, i)); 387 + ufshcd_mcq_cfg_offset(REG_SQATTR, i)); 416 388 } 417 389 } 418 390 EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational); ··· 427 399 void ufshcd_mcq_enable(struct ufs_hba *hba) 428 400 { 429 401 ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG); 402 + hba->mcq_enabled = true; 430 403 } 431 404 EXPORT_SYMBOL_GPL(ufshcd_mcq_enable); 405 + 406 + void ufshcd_mcq_disable(struct ufs_hba *hba) 407 + { 408 + ufshcd_rmwl(hba, MCQ_MODE_SELECT, 0, REG_UFS_MEM_CFG); 409 + hba->mcq_enabled = false; 410 + } 432 411 433 412 void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg) 434 413 {
+72 -1
drivers/ufs/core/ufs-sysfs.c
··· 1340 1340 .attrs = ufs_sysfs_device_flags, 1341 1341 }; 1342 1342 1343 + static ssize_t max_number_of_rtt_show(struct device *dev, 1344 + struct device_attribute *attr, char *buf) 1345 + { 1346 + struct ufs_hba *hba = dev_get_drvdata(dev); 1347 + u32 rtt; 1348 + int ret; 1349 + 1350 + down(&hba->host_sem); 1351 + if (!ufshcd_is_user_access_allowed(hba)) { 1352 + up(&hba->host_sem); 1353 + return -EBUSY; 1354 + } 1355 + 1356 + ufshcd_rpm_get_sync(hba); 1357 + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, 1358 + QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt); 1359 + ufshcd_rpm_put_sync(hba); 1360 + 1361 + if (ret) 1362 + goto out; 1363 + 1364 + ret = sysfs_emit(buf, "0x%08X\n", rtt); 1365 + 1366 + out: 1367 + up(&hba->host_sem); 1368 + return ret; 1369 + } 1370 + 1371 + static ssize_t max_number_of_rtt_store(struct device *dev, 1372 + struct device_attribute *attr, 1373 + const char *buf, size_t count) 1374 + { 1375 + struct ufs_hba *hba = dev_get_drvdata(dev); 1376 + struct ufs_dev_info *dev_info = &hba->dev_info; 1377 + struct scsi_device *sdev; 1378 + unsigned int rtt; 1379 + int ret; 1380 + 1381 + if (kstrtouint(buf, 0, &rtt)) 1382 + return -EINVAL; 1383 + 1384 + if (rtt > dev_info->rtt_cap) { 1385 + dev_err(dev, "rtt can be at most bDeviceRTTCap\n"); 1386 + return -EINVAL; 1387 + } 1388 + 1389 + down(&hba->host_sem); 1390 + if (!ufshcd_is_user_access_allowed(hba)) { 1391 + ret = -EBUSY; 1392 + goto out; 1393 + } 1394 + 1395 + ufshcd_rpm_get_sync(hba); 1396 + 1397 + shost_for_each_device(sdev, hba->host) 1398 + blk_mq_freeze_queue(sdev->request_queue); 1399 + 1400 + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 1401 + QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt); 1402 + 1403 + shost_for_each_device(sdev, hba->host) 1404 + blk_mq_unfreeze_queue(sdev->request_queue); 1405 + 1406 + ufshcd_rpm_put_sync(hba); 1407 + 1408 + out: 1409 + up(&hba->host_sem); 1410 + return ret < 0 ? ret : count; 1411 + } 1412 + 1413 + static DEVICE_ATTR_RW(max_number_of_rtt); 1414 + 1343 1415 static inline bool ufshcd_is_wb_attrs(enum attr_idn idn) 1344 1416 { 1345 1417 return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS && ··· 1459 1387 UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT); 1460 1388 UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ); 1461 1389 UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK); 1462 - UFS_ATTRIBUTE(max_number_of_rtt, _MAX_NUM_OF_RTT); 1463 1390 UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL); 1464 1391 UFS_ATTRIBUTE(exception_event_status, _EE_STATUS); 1465 1392 UFS_ATTRIBUTE(ffu_status, _FFU_STATUS);
+20 -14
drivers/ufs/core/ufshcd-crypto.c
··· 95 95 return err; 96 96 } 97 97 98 - static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) 98 + static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile, 99 + const struct blk_crypto_key *key, 100 + unsigned int slot) 99 101 { 102 + struct ufs_hba *hba = 103 + container_of(profile, struct ufs_hba, crypto_profile); 100 104 /* 101 105 * Clear the crypto cfg on the device. Clearing CFGE 102 106 * might not be sufficient, so just clear the entire cfg. ··· 110 106 return ufshcd_program_key(hba, &cfg, slot); 111 107 } 112 108 113 - static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile, 114 - const struct blk_crypto_key *key, 115 - unsigned int slot) 116 - { 117 - struct ufs_hba *hba = 118 - container_of(profile, struct ufs_hba, crypto_profile); 119 - 120 - return ufshcd_clear_keyslot(hba, slot); 121 - } 122 - 109 + /* 110 + * Reprogram the keyslots if needed, and return true if CRYPTO_GENERAL_ENABLE 111 + * should be used in the host controller initialization sequence. 112 + */ 123 113 bool ufshcd_crypto_enable(struct ufs_hba *hba) 124 114 { 125 115 if (!(hba->caps & UFSHCD_CAP_CRYPTO)) ··· 121 123 122 124 /* Reset might clear all keys, so reprogram all the keys. */ 123 125 blk_crypto_reprogram_all_keys(&hba->crypto_profile); 126 + 127 + if (hba->quirks & UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE) 128 + return false; 129 + 124 130 return true; 125 131 } 126 132 ··· 160 158 int cap_idx; 161 159 int err = 0; 162 160 enum blk_crypto_mode_num blk_mode_num; 161 + 162 + if (hba->quirks & UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE) 163 + return 0; 163 164 164 165 /* 165 166 * Don't use crypto if either the hardware doesn't advertise the ··· 233 228 if (!(hba->caps & UFSHCD_CAP_CRYPTO)) 234 229 return; 235 230 236 - /* Clear all keyslots - the number of keyslots is (CFGC + 1) */ 237 - for (slot = 0; slot < hba->crypto_capabilities.config_count + 1; slot++) 238 - ufshcd_clear_keyslot(hba, slot); 231 + /* Clear all keyslots. */ 232 + for (slot = 0; slot < hba->crypto_profile.num_slots; slot++) 233 + hba->crypto_profile.ll_ops.keyslot_evict(&hba->crypto_profile, 234 + NULL, slot); 239 235 } 240 236 241 237 void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q)
+36
drivers/ufs/core/ufshcd-crypto.h
··· 37 37 h->dunu = cpu_to_le32(upper_32_bits(lrbp->data_unit_num)); 38 38 } 39 39 40 + static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba, 41 + struct ufshcd_lrb *lrbp) 42 + { 43 + struct scsi_cmnd *cmd = lrbp->cmd; 44 + const struct bio_crypt_ctx *crypt_ctx = scsi_cmd_to_rq(cmd)->crypt_ctx; 45 + 46 + if (crypt_ctx && hba->vops && hba->vops->fill_crypto_prdt) 47 + return hba->vops->fill_crypto_prdt(hba, crypt_ctx, 48 + lrbp->ucd_prdt_ptr, 49 + scsi_sg_count(cmd)); 50 + return 0; 51 + } 52 + 53 + static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba, 54 + struct ufshcd_lrb *lrbp) 55 + { 56 + if (!(hba->quirks & UFSHCD_QUIRK_KEYS_IN_PRDT)) 57 + return; 58 + 59 + if (!(scsi_cmd_to_rq(lrbp->cmd)->crypt_ctx)) 60 + return; 61 + 62 + /* Zeroize the PRDT because it can contain cryptographic keys. */ 63 + memzero_explicit(lrbp->ucd_prdt_ptr, 64 + ufshcd_sg_entry_size(hba) * scsi_sg_count(lrbp->cmd)); 65 + } 66 + 40 67 bool ufshcd_crypto_enable(struct ufs_hba *hba); 41 68 42 69 int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba); ··· 80 53 static inline void 81 54 ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, 82 55 struct request_desc_header *h) { } 56 + 57 + static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba, 58 + struct ufshcd_lrb *lrbp) 59 + { 60 + return 0; 61 + } 62 + 63 + static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba, 64 + struct ufshcd_lrb *lrbp) { } 83 65 84 66 static inline bool ufshcd_crypto_enable(struct ufs_hba *hba) 85 67 {
+1 -14
drivers/ufs/core/ufshcd-priv.h
··· 64 64 void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, 65 65 struct cq_entry *cqe); 66 66 int ufshcd_mcq_init(struct ufs_hba *hba); 67 + void ufshcd_mcq_disable(struct ufs_hba *hba); 67 68 int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba); 68 69 int ufshcd_mcq_memory_alloc(struct ufs_hba *hba); 69 - void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba); 70 - void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds); 71 - u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i); 72 - void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); 73 70 struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, 74 71 struct request *req); 75 - unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, 76 - struct ufs_hw_queue *hwq); 77 72 void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba, 78 73 struct ufs_hw_queue *hwq); 79 74 bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd); ··· 246 251 { 247 252 if (hba->vops && hba->vops->mcq_config_resource) 248 253 return hba->vops->mcq_config_resource(hba); 249 - 250 - return -EOPNOTSUPP; 251 - } 252 - 253 - static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba) 254 - { 255 - if (hba->vops && hba->vops->get_hba_mac) 256 - return hba->vops->get_hba_mac(hba); 257 254 258 255 return -EOPNOTSUPP; 259 256 }
+104 -59
drivers/ufs/core/ufshcd.c
··· 102 102 /* Default RTC update every 10 seconds */ 103 103 #define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC) 104 104 105 + /* bMaxNumOfRTT is equal to two after device manufacturing */ 106 + #define DEFAULT_MAX_NUM_RTT 2 107 + 105 108 /* UFSHC 4.0 compliant HC support this mode. */ 106 109 static bool use_mcq_mode = true; 107 110 ··· 164 161 enum { 165 162 UFSHCD_MAX_CHANNEL = 0, 166 163 UFSHCD_MAX_ID = 1, 167 - UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED, 168 - UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED, 169 164 }; 170 165 171 166 static const char *const ufshcd_state_name[] = { ··· 453 452 454 453 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 455 454 456 - if (is_mcq_enabled(hba)) { 455 + if (hba->mcq_enabled) { 457 456 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq); 458 457 459 458 hwq_id = hwq->id; ··· 1561 1560 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 1562 1561 1563 1562 out: 1564 - if (sched_clk_scaling_suspend_work && !scale_up) 1563 + if (sched_clk_scaling_suspend_work && 1564 + (!scale_up || hba->clk_scaling.suspend_on_no_request)) 1565 1565 queue_work(hba->clk_scaling.workq, 1566 1566 &hba->clk_scaling.suspend_work); 1567 1567 ··· 2302 2300 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) 2303 2301 ufshcd_start_monitor(hba, lrbp); 2304 2302 2305 - if (is_mcq_enabled(hba)) { 2303 + if (hba->mcq_enabled) { 2306 2304 int utrd_size = sizeof(struct utp_transfer_req_desc); 2307 2305 struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr; 2308 2306 struct utp_transfer_req_desc *dest; ··· 2402 2400 hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; 2403 2401 2404 2402 /* nutrs and nutmrs are 0 based values */ 2405 - hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; 2403 + hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1; 2406 2404 hba->nutmrs = 2407 2405 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; 2408 2406 hba->reserved_slot = hba->nutrs - 1; 2407 + 2408 + hba->nortt = FIELD_GET(MASK_NUMBER_OUTSTANDING_RTT, hba->capabilities) + 1; 2409 2409 2410 2410 /* Read crypto capabilities */ 2411 2411 err = ufshcd_hba_init_crypto_capabilities(hba); ··· 2640 2636 2641 2637 ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd)); 2642 2638 2643 - return 0; 2639 + return ufshcd_crypto_fill_prdt(hba, lrbp); 2644 2640 } 2645 2641 2646 2642 /** ··· 3001 2997 goto out; 3002 2998 } 3003 2999 3004 - if (is_mcq_enabled(hba)) 3000 + if (hba->mcq_enabled) 3005 3001 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); 3006 3002 3007 3003 ufshcd_send_command(hba, tag, hwq); ··· 3060 3056 unsigned long flags; 3061 3057 int err; 3062 3058 3063 - if (is_mcq_enabled(hba)) { 3059 + if (hba->mcq_enabled) { 3064 3060 /* 3065 3061 * MCQ mode. Clean up the MCQ resources similar to 3066 3062 * what the ufshcd_utrl_clear() does for SDB mode. ··· 3170 3166 __func__, lrbp->task_tag); 3171 3167 3172 3168 /* MCQ mode */ 3173 - if (is_mcq_enabled(hba)) { 3169 + if (hba->mcq_enabled) { 3174 3170 /* successfully cleared the command, retry if needed */ 3175 3171 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) 3176 3172 err = -EAGAIN; ··· 3992 3988 */ 3993 3989 static int ufshcd_dme_link_startup(struct ufs_hba *hba) 3994 3990 { 3995 - struct uic_command uic_cmd = {0}; 3991 + struct uic_command uic_cmd = { 3992 + .command = UIC_CMD_DME_LINK_STARTUP, 3993 + }; 3996 3994 int ret; 3997 - 3998 - uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; 3999 3995 4000 3996 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 4001 3997 if (ret) ··· 4014 4010 */ 4015 4011 static int ufshcd_dme_reset(struct ufs_hba *hba) 4016 4012 { 4017 - struct uic_command uic_cmd = {0}; 4013 + struct uic_command uic_cmd = { 4014 + .command = UIC_CMD_DME_RESET, 4015 + }; 4018 4016 int ret; 4019 - 4020 - uic_cmd.command = UIC_CMD_DME_RESET; 4021 4017 4022 4018 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 4023 4019 if (ret) ··· 4053 4049 */ 4054 4050 static int ufshcd_dme_enable(struct ufs_hba *hba) 4055 4051 { 4056 - struct uic_command uic_cmd = {0}; 4052 + struct uic_command uic_cmd = { 4053 + .command = UIC_CMD_DME_ENABLE, 4054 + }; 4057 4055 int ret; 4058 - 4059 - uic_cmd.command = UIC_CMD_DME_ENABLE; 4060 4056 4061 4057 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 4062 4058 if (ret) ··· 4110 4106 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, 4111 4107 u8 attr_set, u32 mib_val, u8 peer) 4112 4108 { 4113 - struct uic_command uic_cmd = {0}; 4109 + struct uic_command uic_cmd = { 4110 + .command = peer ? UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET, 4111 + .argument1 = attr_sel, 4112 + .argument2 = UIC_ARG_ATTR_TYPE(attr_set), 4113 + .argument3 = mib_val, 4114 + }; 4114 4115 static const char *const action[] = { 4115 4116 "dme-set", 4116 4117 "dme-peer-set" ··· 4123 4114 const char *set = action[!!peer]; 4124 4115 int ret; 4125 4116 int retries = UFS_UIC_COMMAND_RETRIES; 4126 - 4127 - uic_cmd.command = peer ? 4128 - UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; 4129 - uic_cmd.argument1 = attr_sel; 4130 - uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); 4131 - uic_cmd.argument3 = mib_val; 4132 4117 4133 4118 do { 4134 4119 /* for peer attributes we retry upon failure */ ··· 4153 4150 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, 4154 4151 u32 *mib_val, u8 peer) 4155 4152 { 4156 - struct uic_command uic_cmd = {0}; 4153 + struct uic_command uic_cmd = { 4154 + .command = peer ? UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET, 4155 + .argument1 = attr_sel, 4156 + }; 4157 4157 static const char *const action[] = { 4158 4158 "dme-get", 4159 4159 "dme-peer-get" ··· 4189 4183 goto out; 4190 4184 } 4191 4185 } 4192 - 4193 - uic_cmd.command = peer ? 4194 - UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; 4195 - uic_cmd.argument1 = attr_sel; 4196 4186 4197 4187 do { 4198 4188 /* for peer attributes we retry upon failure */ ··· 4322 4320 */ 4323 4321 int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) 4324 4322 { 4325 - struct uic_command uic_cmd = {0}; 4323 + struct uic_command uic_cmd = { 4324 + .command = UIC_CMD_DME_SET, 4325 + .argument1 = UIC_ARG_MIB(PA_PWRMODE), 4326 + .argument3 = mode, 4327 + }; 4326 4328 int ret; 4327 4329 4328 4330 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { ··· 4339 4333 } 4340 4334 } 4341 4335 4342 - uic_cmd.command = UIC_CMD_DME_SET; 4343 - uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); 4344 - uic_cmd.argument3 = mode; 4345 4336 ufshcd_hold(hba); 4346 4337 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 4347 4338 ufshcd_release(hba); ··· 4379 4376 4380 4377 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) 4381 4378 { 4382 - int ret; 4383 - struct uic_command uic_cmd = {0}; 4379 + struct uic_command uic_cmd = { 4380 + .command = UIC_CMD_DME_HIBER_ENTER, 4381 + }; 4384 4382 ktime_t start = ktime_get(); 4383 + int ret; 4385 4384 4386 4385 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); 4387 4386 4388 - uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; 4389 4387 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 4390 4388 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", 4391 4389 ktime_to_us(ktime_sub(ktime_get(), start)), ret); ··· 4404 4400 4405 4401 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) 4406 4402 { 4407 - struct uic_command uic_cmd = {0}; 4403 + struct uic_command uic_cmd = { 4404 + .command = UIC_CMD_DME_HIBER_EXIT, 4405 + }; 4408 4406 int ret; 4409 4407 ktime_t start = ktime_get(); 4410 4408 4411 4409 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); 4412 4410 4413 - uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; 4414 4411 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 4415 4412 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", 4416 4413 ktime_to_us(ktime_sub(ktime_get(), start)), ret); ··· 5481 5476 struct scsi_cmnd *cmd = lrbp->cmd; 5482 5477 5483 5478 scsi_dma_unmap(cmd); 5479 + ufshcd_crypto_clear_prdt(hba, lrbp); 5484 5480 ufshcd_release(hba); 5485 5481 ufshcd_clk_scaling_update_busy(hba); 5486 5482 } ··· 5564 5558 u32 tr_doorbell; 5565 5559 struct ufs_hw_queue *hwq; 5566 5560 5567 - if (is_mcq_enabled(hba)) { 5561 + if (hba->mcq_enabled) { 5568 5562 hwq = &hba->uhq[queue_num]; 5569 5563 5570 5564 return ufshcd_mcq_poll_cqe_lock(hba, hwq); ··· 6205 6199 /* Complete requests that have door-bell cleared */ 6206 6200 static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl) 6207 6201 { 6208 - if (is_mcq_enabled(hba)) 6202 + if (hba->mcq_enabled) 6209 6203 ufshcd_mcq_compl_pending_transfer(hba, force_compl); 6210 6204 else 6211 6205 ufshcd_transfer_req_compl(hba); ··· 6462 6456 *ret ? "failed" : "succeeded"); 6463 6457 6464 6458 /* Release cmd in MCQ mode if abort succeeds */ 6465 - if (is_mcq_enabled(hba) && (*ret == 0)) { 6459 + if (hba->mcq_enabled && (*ret == 0)) { 6466 6460 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd)); 6467 6461 if (!hwq) 6468 6462 return 0; ··· 7395 7389 goto out; 7396 7390 } 7397 7391 7398 - if (is_mcq_enabled(hba)) { 7392 + if (hba->mcq_enabled) { 7399 7393 for (pos = 0; pos < hba->nutrs; pos++) { 7400 7394 lrbp = &hba->lrb[pos]; 7401 7395 if (ufshcd_cmd_inflight(lrbp->cmd) && ··· 7491 7485 */ 7492 7486 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", 7493 7487 __func__, tag); 7494 - if (is_mcq_enabled(hba)) { 7488 + if (hba->mcq_enabled) { 7495 7489 /* MCQ mode */ 7496 7490 if (ufshcd_cmd_inflight(lrbp->cmd)) { 7497 7491 /* sleep for max. 200us same delay as in SDB mode */ ··· 7569 7563 7570 7564 ufshcd_hold(hba); 7571 7565 7572 - if (!is_mcq_enabled(hba)) { 7566 + if (!hba->mcq_enabled) { 7573 7567 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 7574 7568 if (!test_bit(tag, &hba->outstanding_reqs)) { 7575 7569 /* If command is already aborted/completed, return FAILED. */ ··· 7602 7596 } 7603 7597 hba->req_abort_count++; 7604 7598 7605 - if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) { 7599 + if (!hba->mcq_enabled && !(reg & (1 << tag))) { 7606 7600 /* only execute this code in single doorbell mode */ 7607 7601 dev_err(hba->dev, 7608 7602 "%s: cmd was completed, but without a notifying intr, tag = %d", ··· 7629 7623 goto release; 7630 7624 } 7631 7625 7632 - if (is_mcq_enabled(hba)) { 7626 + if (hba->mcq_enabled) { 7633 7627 /* MCQ mode. Branch off to handle abort for mcq mode */ 7634 7628 err = ufshcd_mcq_abort(cmd); 7635 7629 goto release; ··· 8131 8125 dev_info->b_ext_iid_en = ext_iid_en; 8132 8126 } 8133 8127 8128 + static void ufshcd_set_rtt(struct ufs_hba *hba) 8129 + { 8130 + struct ufs_dev_info *dev_info = &hba->dev_info; 8131 + u32 rtt = 0; 8132 + u32 dev_rtt = 0; 8133 + int host_rtt_cap = hba->vops && hba->vops->max_num_rtt ? 8134 + hba->vops->max_num_rtt : hba->nortt; 8135 + 8136 + /* RTT override makes sense only for UFS-4.0 and above */ 8137 + if (dev_info->wspecversion < 0x400) 8138 + return; 8139 + 8140 + if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 8141 + QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &dev_rtt)) { 8142 + dev_err(hba->dev, "failed reading bMaxNumOfRTT\n"); 8143 + return; 8144 + } 8145 + 8146 + /* do not override if it was already written */ 8147 + if (dev_rtt != DEFAULT_MAX_NUM_RTT) 8148 + return; 8149 + 8150 + rtt = min_t(int, dev_info->rtt_cap, host_rtt_cap); 8151 + 8152 + if (rtt == dev_rtt) 8153 + return; 8154 + 8155 + if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 8156 + QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt)) 8157 + dev_err(hba->dev, "failed writing bMaxNumOfRTT\n"); 8158 + } 8159 + 8134 8160 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, 8135 8161 const struct ufs_dev_quirk *fixups) 8136 8162 { ··· 8297 8259 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 | 8298 8260 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1]; 8299 8261 dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH]; 8262 + 8263 + dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP]; 8300 8264 8301 8265 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; 8302 8266 ··· 8552 8512 goto out; 8553 8513 } 8554 8514 8515 + ufshcd_set_rtt(hba); 8516 + 8555 8517 ufshcd_get_ref_clk_gating_wait(hba); 8556 8518 8557 8519 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, ··· 8684 8642 if (ret) 8685 8643 goto err; 8686 8644 8645 + hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; 8646 + hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED; 8647 + 8687 8648 return 0; 8688 8649 err: 8689 8650 hba->nutrs = old_nutrs; ··· 8707 8662 ufshcd_enable_intr(hba, intrs); 8708 8663 ufshcd_mcq_make_queues_operational(hba); 8709 8664 ufshcd_mcq_config_mac(hba, hba->nutrs); 8710 - 8711 - hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; 8712 - hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED; 8713 - 8714 - ufshcd_mcq_enable(hba); 8715 - hba->mcq_enabled = true; 8716 8665 8717 8666 dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n", 8718 8667 hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT], ··· 8735 8696 ufshcd_set_link_active(hba); 8736 8697 8737 8698 /* Reconfigure MCQ upon reset */ 8738 - if (is_mcq_enabled(hba) && !init_dev_params) 8699 + if (hba->mcq_enabled && !init_dev_params) { 8739 8700 ufshcd_config_mcq(hba); 8701 + ufshcd_mcq_enable(hba); 8702 + } 8740 8703 8741 8704 /* Verify device initialization by sending NOP OUT UPIU */ 8742 8705 ret = ufshcd_verify_dev_init(hba); ··· 8759 8718 if (ret) 8760 8719 return ret; 8761 8720 if (is_mcq_supported(hba) && !hba->scsi_host_added) { 8721 + ufshcd_mcq_enable(hba); 8762 8722 ret = ufshcd_alloc_mcq(hba); 8763 8723 if (!ret) { 8764 8724 ufshcd_config_mcq(hba); 8765 8725 } else { 8766 8726 /* Continue with SDB mode */ 8727 + ufshcd_mcq_disable(hba); 8767 8728 use_mcq_mode = false; 8768 8729 dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", 8769 8730 ret); ··· 8779 8736 } else if (is_mcq_supported(hba)) { 8780 8737 /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */ 8781 8738 ufshcd_config_mcq(hba); 8739 + ufshcd_mcq_enable(hba); 8782 8740 } 8783 8741 } 8784 8742 ··· 8965 8921 .eh_timed_out = ufshcd_eh_timed_out, 8966 8922 .this_id = -1, 8967 8923 .sg_tablesize = SG_ALL, 8968 - .cmd_per_lun = UFSHCD_CMD_PER_LUN, 8969 - .can_queue = UFSHCD_CAN_QUEUE, 8970 8924 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX, 8971 8925 .max_sectors = SZ_1M / SECTOR_SIZE, 8972 8926 .max_host_blocked = 1, ··· 10221 10179 blk_mq_destroy_queue(hba->tmf_queue); 10222 10180 blk_put_queue(hba->tmf_queue); 10223 10181 blk_mq_free_tag_set(&hba->tmf_tag_set); 10224 - scsi_remove_host(hba->host); 10182 + if (hba->scsi_host_added) 10183 + scsi_remove_host(hba->host); 10225 10184 /* disable interrupts */ 10226 10185 ufshcd_disable_intr(hba, hba->intr_mask); 10227 10186 ufshcd_hba_stop(hba); ··· 10501 10458 dev_err(hba->dev, "scsi_add_host failed\n"); 10502 10459 goto out_disable; 10503 10460 } 10461 + hba->scsi_host_added = true; 10504 10462 } 10505 10463 10506 10464 hba->tmf_tag_set = (struct blk_mq_tag_set) { ··· 10584 10540 free_tmf_tag_set: 10585 10541 blk_mq_free_tag_set(&hba->tmf_tag_set); 10586 10542 out_remove_scsi_host: 10587 - scsi_remove_host(hba->host); 10543 + if (hba->scsi_host_added) 10544 + scsi_remove_host(hba->host); 10588 10545 out_disable: 10589 10546 hba->is_irq_enabled = false; 10590 10547 ufshcd_hba_exit(hba);
+234 -6
drivers/ufs/host/ufs-exynos.c
··· 8 8 * 9 9 */ 10 10 11 + #include <asm/unaligned.h> 12 + #include <crypto/aes.h> 13 + #include <linux/arm-smccc.h> 11 14 #include <linux/clk.h> 12 15 #include <linux/delay.h> 13 16 #include <linux/module.h> ··· 28 25 29 26 #include "ufs-exynos.h" 30 27 28 + #define DATA_UNIT_SIZE 4096 29 + 31 30 /* 32 31 * Exynos's Vendor specific registers for UFSHCI 33 32 */ 34 33 #define HCI_TXPRDT_ENTRY_SIZE 0x00 35 34 #define PRDT_PREFECT_EN BIT(31) 36 - #define PRDT_SET_SIZE(x) ((x) & 0x1F) 37 35 #define HCI_RXPRDT_ENTRY_SIZE 0x04 38 36 #define HCI_1US_TO_CNT_VAL 0x0C 39 37 #define CNT_VAL_1US_MASK 0x3FF ··· 1047 1043 exynos_ufs_fit_aggr_timeout(ufs); 1048 1044 1049 1045 hci_writel(ufs, 0xa, HCI_DATA_REORDER); 1050 - hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE); 1051 - hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE); 1046 + hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_TXPRDT_ENTRY_SIZE); 1047 + hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE); 1052 1048 hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); 1053 1049 hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); 1054 1050 hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN); ··· 1155 1151 hba->quirks = ufs->drv_data->quirks; 1156 1152 } 1157 1153 1154 + #ifdef CONFIG_SCSI_UFS_CRYPTO 1155 + 1156 + /* 1157 + * Support for Flash Memory Protector (FMP), which is the inline encryption 1158 + * hardware on Exynos and Exynos-based SoCs. The interface to this hardware is 1159 + * not compatible with the standard UFS crypto. It requires that encryption be 1160 + * configured in the PRDT using a nonstandard extension. 1161 + */ 1162 + 1163 + enum fmp_crypto_algo_mode { 1164 + FMP_BYPASS_MODE = 0, 1165 + FMP_ALGO_MODE_AES_CBC = 1, 1166 + FMP_ALGO_MODE_AES_XTS = 2, 1167 + }; 1168 + enum fmp_crypto_key_length { 1169 + FMP_KEYLEN_256BIT = 1, 1170 + }; 1171 + 1172 + /** 1173 + * struct fmp_sg_entry - nonstandard format of PRDT entries when FMP is enabled 1174 + * 1175 + * @base: The standard PRDT entry, but with nonstandard bitfields in the high 1176 + * bits of the 'size' field, i.e. the last 32-bit word. When these 1177 + * nonstandard bitfields are zero, the data segment won't be encrypted or 1178 + * decrypted. Otherwise they specify the algorithm and key length with 1179 + * which the data segment will be encrypted or decrypted. 1180 + * @file_iv: The initialization vector (IV) with all bytes reversed 1181 + * @file_enckey: The first half of the AES-XTS key with all bytes reserved 1182 + * @file_twkey: The second half of the AES-XTS key with all bytes reserved 1183 + * @disk_iv: Unused 1184 + * @reserved: Unused 1185 + */ 1186 + struct fmp_sg_entry { 1187 + struct ufshcd_sg_entry base; 1188 + __be64 file_iv[2]; 1189 + __be64 file_enckey[4]; 1190 + __be64 file_twkey[4]; 1191 + __be64 disk_iv[2]; 1192 + __be64 reserved[2]; 1193 + }; 1194 + 1195 + #define SMC_CMD_FMP_SECURITY \ 1196 + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \ 1197 + ARM_SMCCC_OWNER_SIP, 0x1810) 1198 + #define SMC_CMD_SMU \ 1199 + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \ 1200 + ARM_SMCCC_OWNER_SIP, 0x1850) 1201 + #define SMC_CMD_FMP_SMU_RESUME \ 1202 + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \ 1203 + ARM_SMCCC_OWNER_SIP, 0x1860) 1204 + #define SMU_EMBEDDED 0 1205 + #define SMU_INIT 0 1206 + #define CFG_DESCTYPE_3 3 1207 + 1208 + static void exynos_ufs_fmp_init(struct ufs_hba *hba, struct exynos_ufs *ufs) 1209 + { 1210 + struct blk_crypto_profile *profile = &hba->crypto_profile; 1211 + struct arm_smccc_res res; 1212 + int err; 1213 + 1214 + /* 1215 + * Check for the standard crypto support bit, since it's available even 1216 + * though the rest of the interface to FMP is nonstandard. 1217 + * 1218 + * This check should have the effect of preventing the driver from 1219 + * trying to use FMP on old Exynos SoCs that don't have FMP. 1220 + */ 1221 + if (!(ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES) & 1222 + MASK_CRYPTO_SUPPORT)) 1223 + return; 1224 + 1225 + /* 1226 + * The below sequence of SMC calls to enable FMP can be found in the 1227 + * downstream driver source for gs101 and other Exynos-based SoCs. It 1228 + * is the only way to enable FMP that works on SoCs such as gs101 that 1229 + * don't make the FMP registers accessible to Linux. It probably works 1230 + * on other Exynos-based SoCs too, and might even still be the only way 1231 + * that works. But this hasn't been properly tested, and this code is 1232 + * mutually exclusive with exynos_ufs_config_smu(). So for now only 1233 + * enable FMP support on SoCs with EXYNOS_UFS_OPT_UFSPR_SECURE. 1234 + */ 1235 + if (!(ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE)) 1236 + return; 1237 + 1238 + /* 1239 + * This call (which sets DESCTYPE to 0x3 in the FMPSECURITY0 register) 1240 + * is needed to make the hardware use the larger PRDT entry size. 1241 + */ 1242 + BUILD_BUG_ON(sizeof(struct fmp_sg_entry) != 128); 1243 + arm_smccc_smc(SMC_CMD_FMP_SECURITY, 0, SMU_EMBEDDED, CFG_DESCTYPE_3, 1244 + 0, 0, 0, 0, &res); 1245 + if (res.a0) { 1246 + dev_warn(hba->dev, 1247 + "SMC_CMD_FMP_SECURITY failed on init: %ld. Disabling FMP support.\n", 1248 + res.a0); 1249 + return; 1250 + } 1251 + ufshcd_set_sg_entry_size(hba, sizeof(struct fmp_sg_entry)); 1252 + 1253 + /* 1254 + * This is needed to initialize FMP. Without it, errors occur when 1255 + * inline encryption is used. 1256 + */ 1257 + arm_smccc_smc(SMC_CMD_SMU, SMU_INIT, SMU_EMBEDDED, 0, 0, 0, 0, 0, &res); 1258 + if (res.a0) { 1259 + dev_err(hba->dev, 1260 + "SMC_CMD_SMU(SMU_INIT) failed: %ld. Disabling FMP support.\n", 1261 + res.a0); 1262 + return; 1263 + } 1264 + 1265 + /* Advertise crypto capabilities to the block layer. */ 1266 + err = devm_blk_crypto_profile_init(hba->dev, profile, 0); 1267 + if (err) { 1268 + /* Only ENOMEM should be possible here. */ 1269 + dev_err(hba->dev, "Failed to initialize crypto profile: %d\n", 1270 + err); 1271 + return; 1272 + } 1273 + profile->max_dun_bytes_supported = AES_BLOCK_SIZE; 1274 + profile->dev = hba->dev; 1275 + profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] = 1276 + DATA_UNIT_SIZE; 1277 + 1278 + /* Advertise crypto support to ufshcd-core. */ 1279 + hba->caps |= UFSHCD_CAP_CRYPTO; 1280 + 1281 + /* Advertise crypto quirks to ufshcd-core. */ 1282 + hba->quirks |= UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE | 1283 + UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE | 1284 + UFSHCD_QUIRK_KEYS_IN_PRDT; 1285 + 1286 + } 1287 + 1288 + static void exynos_ufs_fmp_resume(struct ufs_hba *hba) 1289 + { 1290 + struct arm_smccc_res res; 1291 + 1292 + arm_smccc_smc(SMC_CMD_FMP_SECURITY, 0, SMU_EMBEDDED, CFG_DESCTYPE_3, 1293 + 0, 0, 0, 0, &res); 1294 + if (res.a0) 1295 + dev_err(hba->dev, 1296 + "SMC_CMD_FMP_SECURITY failed on resume: %ld\n", res.a0); 1297 + 1298 + arm_smccc_smc(SMC_CMD_FMP_SMU_RESUME, 0, SMU_EMBEDDED, 0, 0, 0, 0, 0, 1299 + &res); 1300 + if (res.a0) 1301 + dev_err(hba->dev, 1302 + "SMC_CMD_FMP_SMU_RESUME failed: %ld\n", res.a0); 1303 + } 1304 + 1305 + static inline __be64 fmp_key_word(const u8 *key, int j) 1306 + { 1307 + return cpu_to_be64(get_unaligned_le64( 1308 + key + AES_KEYSIZE_256 - (j + 1) * sizeof(u64))); 1309 + } 1310 + 1311 + /* Fill the PRDT for a request according to the given encryption context. */ 1312 + static int exynos_ufs_fmp_fill_prdt(struct ufs_hba *hba, 1313 + const struct bio_crypt_ctx *crypt_ctx, 1314 + void *prdt, unsigned int num_segments) 1315 + { 1316 + struct fmp_sg_entry *fmp_prdt = prdt; 1317 + const u8 *enckey = crypt_ctx->bc_key->raw; 1318 + const u8 *twkey = enckey + AES_KEYSIZE_256; 1319 + u64 dun_lo = crypt_ctx->bc_dun[0]; 1320 + u64 dun_hi = crypt_ctx->bc_dun[1]; 1321 + unsigned int i; 1322 + 1323 + /* If FMP wasn't enabled, we shouldn't get any encrypted requests. */ 1324 + if (WARN_ON_ONCE(!(hba->caps & UFSHCD_CAP_CRYPTO))) 1325 + return -EIO; 1326 + 1327 + /* Configure FMP on each segment of the request. */ 1328 + for (i = 0; i < num_segments; i++) { 1329 + struct fmp_sg_entry *prd = &fmp_prdt[i]; 1330 + int j; 1331 + 1332 + /* Each segment must be exactly one data unit. */ 1333 + if (prd->base.size != cpu_to_le32(DATA_UNIT_SIZE - 1)) { 1334 + dev_err(hba->dev, 1335 + "data segment is misaligned for FMP\n"); 1336 + return -EIO; 1337 + } 1338 + 1339 + /* Set the algorithm and key length. */ 1340 + prd->base.size |= cpu_to_le32((FMP_ALGO_MODE_AES_XTS << 28) | 1341 + (FMP_KEYLEN_256BIT << 26)); 1342 + 1343 + /* Set the IV. */ 1344 + prd->file_iv[0] = cpu_to_be64(dun_hi); 1345 + prd->file_iv[1] = cpu_to_be64(dun_lo); 1346 + 1347 + /* Set the key. */ 1348 + for (j = 0; j < AES_KEYSIZE_256 / sizeof(u64); j++) { 1349 + prd->file_enckey[j] = fmp_key_word(enckey, j); 1350 + prd->file_twkey[j] = fmp_key_word(twkey, j); 1351 + } 1352 + 1353 + /* Increment the data unit number. */ 1354 + dun_lo++; 1355 + if (dun_lo == 0) 1356 + dun_hi++; 1357 + } 1358 + return 0; 1359 + } 1360 + 1361 + #else /* CONFIG_SCSI_UFS_CRYPTO */ 1362 + 1363 + static void exynos_ufs_fmp_init(struct ufs_hba *hba, struct exynos_ufs *ufs) 1364 + { 1365 + } 1366 + 1367 + static void exynos_ufs_fmp_resume(struct ufs_hba *hba) 1368 + { 1369 + } 1370 + 1371 + #define exynos_ufs_fmp_fill_prdt NULL 1372 + 1373 + #endif /* !CONFIG_SCSI_UFS_CRYPTO */ 1374 + 1158 1375 static int exynos_ufs_init(struct ufs_hba *hba) 1159 1376 { 1160 1377 struct device *dev = hba->dev; ··· 1423 1198 1424 1199 exynos_ufs_priv_init(hba, ufs); 1425 1200 1201 + exynos_ufs_fmp_init(hba, ufs); 1202 + 1426 1203 if (ufs->drv_data->drv_init) { 1427 1204 ret = ufs->drv_data->drv_init(dev, ufs); 1428 1205 if (ret) { ··· 1440 1213 if (!(ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE)) 1441 1214 exynos_ufs_config_smu(ufs); 1442 1215 1443 - hba->host->dma_alignment = SZ_4K - 1; 1216 + hba->host->dma_alignment = DATA_UNIT_SIZE - 1; 1444 1217 return 0; 1445 1218 1446 1219 out: ··· 1559 1332 * (ufshcd_async_scan()). Note: this callback may also be called 1560 1333 * from other functions than ufshcd_init(). 1561 1334 */ 1562 - hba->host->max_segment_size = SZ_4K; 1335 + hba->host->max_segment_size = DATA_UNIT_SIZE; 1563 1336 1564 1337 if (ufs->drv_data->pre_hce_enable) { 1565 1338 ret = ufs->drv_data->pre_hce_enable(ufs); ··· 1659 1432 phy_power_on(ufs->phy); 1660 1433 1661 1434 exynos_ufs_config_smu(ufs); 1662 - 1435 + exynos_ufs_fmp_resume(hba); 1663 1436 return 0; 1664 1437 } 1665 1438 ··· 1925 1698 .hibern8_notify = exynos_ufs_hibern8_notify, 1926 1699 .suspend = exynos_ufs_suspend, 1927 1700 .resume = exynos_ufs_resume, 1701 + .fill_crypto_prdt = exynos_ufs_fmp_fill_prdt, 1928 1702 }; 1929 1703 1930 1704 static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = {
+4 -3
drivers/ufs/host/ufs-mediatek.c
··· 693 693 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 694 694 u32 irq, i; 695 695 696 - if (!is_mcq_enabled(hba)) 696 + if (!hba->mcq_enabled) 697 697 return; 698 698 699 699 if (host->mcq_nr_intr == 0) ··· 711 711 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 712 712 u32 irq, i; 713 713 714 - if (!is_mcq_enabled(hba)) 714 + if (!hba->mcq_enabled) 715 715 return; 716 716 717 717 if (host->mcq_nr_intr == 0) ··· 1308 1308 if (err) 1309 1309 return err; 1310 1310 1311 - if (is_mcq_enabled(hba)) { 1311 + if (hba->mcq_enabled) { 1312 1312 ufs_mtk_config_mcq(hba, false); 1313 1313 ufshcd_mcq_make_queues_operational(hba); 1314 1314 ufshcd_mcq_config_mac(hba, hba->nutrs); ··· 1785 1785 */ 1786 1786 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = { 1787 1787 .name = "mediatek.ufshci", 1788 + .max_num_rtt = MTK_MAX_NUM_RTT, 1788 1789 .init = ufs_mtk_init, 1789 1790 .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version, 1790 1791 .setup_clocks = ufs_mtk_setup_clocks,
+3
drivers/ufs/host/ufs-mediatek.h
··· 189 189 /* MTK delay of autosuspend: 500 ms */ 190 190 #define MTK_RPM_AUTOSUSPEND_DELAY_MS 500 191 191 192 + /* MTK RTT support number */ 193 + #define MTK_MAX_NUM_RTT 2 194 + 192 195 #endif /* !_UFS_MEDIATEK_H */
+3
drivers/ufs/host/ufs-qcom.c
··· 1548 1548 p->timer = DEVFREQ_TIMER_DELAYED; 1549 1549 d->upthreshold = 70; 1550 1550 d->downdifferential = 5; 1551 + 1552 + hba->clk_scaling.suspend_on_no_request = true; 1551 1553 } 1552 1554 #else 1553 1555 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, ··· 1885 1883 }; 1886 1884 module_platform_driver(ufs_qcom_pltform); 1887 1885 1886 + MODULE_DESCRIPTION("Qualcomm UFS host controller driver"); 1888 1887 MODULE_LICENSE("GPL v2");
+48 -1
drivers/ufs/host/ufshcd-pci.c
··· 20 20 #include <linux/acpi.h> 21 21 #include <linux/gpio/consumer.h> 22 22 23 + #define MAX_SUPP_MAC 64 24 + 23 25 struct ufs_host { 24 26 void (*late_init)(struct ufs_hba *hba); 25 27 }; ··· 448 446 return ufs_intel_common_init(hba); 449 447 } 450 448 449 + static int ufs_qemu_get_hba_mac(struct ufs_hba *hba) 450 + { 451 + return MAX_SUPP_MAC; 452 + } 453 + 454 + static int ufs_qemu_mcq_config_resource(struct ufs_hba *hba) 455 + { 456 + hba->mcq_base = hba->mmio_base + ufshcd_mcq_queue_cfg_addr(hba); 457 + 458 + return 0; 459 + } 460 + 461 + static int ufs_qemu_op_runtime_config(struct ufs_hba *hba) 462 + { 463 + struct ufshcd_mcq_opr_info_t *opr; 464 + int i; 465 + 466 + u32 sqdao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_SQDAO, 0)); 467 + u32 sqisao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_SQISAO, 0)); 468 + u32 cqdao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_CQDAO, 0)); 469 + u32 cqisao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_CQISAO, 0)); 470 + 471 + hba->mcq_opr[OPR_SQD].offset = sqdao; 472 + hba->mcq_opr[OPR_SQIS].offset = sqisao; 473 + hba->mcq_opr[OPR_CQD].offset = cqdao; 474 + hba->mcq_opr[OPR_CQIS].offset = cqisao; 475 + 476 + for (i = 0; i < OPR_MAX; i++) { 477 + opr = &hba->mcq_opr[i]; 478 + opr->stride = 48; 479 + opr->base = hba->mmio_base + opr->offset; 480 + } 481 + 482 + return 0; 483 + } 484 + 485 + static struct ufs_hba_variant_ops ufs_qemu_hba_vops = { 486 + .name = "qemu-pci", 487 + .get_hba_mac = ufs_qemu_get_hba_mac, 488 + .mcq_config_resource = ufs_qemu_mcq_config_resource, 489 + .op_runtime_config = ufs_qemu_op_runtime_config, 490 + }; 491 + 451 492 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = { 452 493 .name = "intel-pci", 453 494 .init = ufs_intel_common_init, ··· 636 591 }; 637 592 638 593 static const struct pci_device_id ufshcd_pci_tbl[] = { 639 - { PCI_VENDOR_ID_REDHAT, 0x0013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 594 + { PCI_VENDOR_ID_REDHAT, 0x0013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 595 + (kernel_ulong_t)&ufs_qemu_hba_vops }, 640 596 { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 641 597 { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, 642 598 { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops }, ··· 648 602 { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, 649 603 { PCI_VDEVICE(INTEL, 0xA847), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, 650 604 { PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, 605 + { PCI_VDEVICE(INTEL, 0xE447), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, 651 606 { } /* terminate list */ 652 607 }; 653 608
+2 -1
include/uapi/scsi/scsi_bsg_mpi3mr.h
··· 296 296 * multiple hdb entries. 297 297 * 298 298 * @num_hdb_types: Number of host diag buffer types supported 299 + * @element_trigger_format: Element trigger format 299 300 * @rsvd1: Reserved 300 301 * @rsvd2: Reserved 301 302 * @rsvd3: Reserved ··· 304 303 */ 305 304 struct mpi3mr_bsg_in_hdb_status { 306 305 __u8 num_hdb_types; 307 - __u8 rsvd1; 306 + __u8 element_trigger_format; 308 307 __u16 rsvd2; 309 308 __u32 rsvd3; 310 309 struct mpi3mr_hdb_entry entry[1];
+2
include/ufs/ufs.h
··· 592 592 enum ufs_rtc_time rtc_type; 593 593 time64_t rtc_time_baseline; 594 594 u32 rtc_update_period; 595 + 596 + u8 rtt_cap; /* bDeviceRTTCap */ 595 597 }; 596 598 597 599 /*
+49 -5
include/ufs/ufshcd.h
··· 73 73 * @done: UIC command completion 74 74 */ 75 75 struct uic_command { 76 - u32 command; 77 - u32 argument1; 76 + const u32 command; 77 + const u32 argument1; 78 78 u32 argument2; 79 79 u32 argument3; 80 80 int cmd_active; ··· 295 295 /** 296 296 * struct ufs_hba_variant_ops - variant specific callbacks 297 297 * @name: variant name 298 + * @max_num_rtt: maximum RTT supported by the host 298 299 * @init: called when the driver is initialized 299 300 * @exit: called to cleanup everything done in init 300 301 * @get_ufs_hci_version: called to get UFS HCI version ··· 322 321 * @device_reset: called to issue a reset pulse on the UFS device 323 322 * @config_scaling_param: called to configure clock scaling parameters 324 323 * @program_key: program or evict an inline encryption key 324 + * @fill_crypto_prdt: initialize crypto-related fields in the PRDT 325 325 * @event_notify: called to notify important events 326 326 * @reinit_notify: called to notify reinit of UFSHCD during max gear switch 327 327 * @mcq_config_resource: called to configure MCQ platform resources 328 - * @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode 328 + * @get_hba_mac: reports maximum number of outstanding commands supported by 329 + * the controller. Should be implemented for UFSHCI 4.0 or later 330 + * controllers that are not compliant with the UFSHCI 4.0 specification. 329 331 * @op_runtime_config: called to config Operation and runtime regs Pointers 330 332 * @get_outstanding_cqs: called to get outstanding completion queues 331 333 * @config_esi: called to config Event Specific Interrupt ··· 336 332 */ 337 333 struct ufs_hba_variant_ops { 338 334 const char *name; 335 + int max_num_rtt; 339 336 int (*init)(struct ufs_hba *); 340 337 void (*exit)(struct ufs_hba *); 341 338 u32 (*get_ufs_hci_version)(struct ufs_hba *); ··· 370 365 struct devfreq_simple_ondemand_data *data); 371 366 int (*program_key)(struct ufs_hba *hba, 372 367 const union ufs_crypto_cfg_entry *cfg, int slot); 368 + int (*fill_crypto_prdt)(struct ufs_hba *hba, 369 + const struct bio_crypt_ctx *crypt_ctx, 370 + void *prdt, unsigned int num_segments); 373 371 void (*event_notify)(struct ufs_hba *hba, 374 372 enum ufs_event_type evt, void *data); 375 373 void (*reinit_notify)(struct ufs_hba *); ··· 465 457 bool is_initialized; 466 458 bool is_busy_started; 467 459 bool is_suspended; 460 + bool suspend_on_no_request; 468 461 }; 469 462 470 463 #define UFS_EVENT_HIST_LENGTH 8 ··· 652 643 * thus need this quirk to skip related flow. 653 644 */ 654 645 UFSHCD_QUIRK_MCQ_BROKEN_RTC = 1 << 21, 646 + 647 + /* 648 + * This quirk needs to be enabled if the host controller supports inline 649 + * encryption but it needs to initialize the crypto capabilities in a 650 + * nonstandard way and/or needs to override blk_crypto_ll_ops. If 651 + * enabled, the standard code won't initialize the blk_crypto_profile; 652 + * ufs_hba_variant_ops::init() must do it instead. 653 + */ 654 + UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE = 1 << 22, 655 + 656 + /* 657 + * This quirk needs to be enabled if the host controller supports inline 658 + * encryption but does not support the CRYPTO_GENERAL_ENABLE bit, i.e. 659 + * host controller initialization fails if that bit is set. 660 + */ 661 + UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE = 1 << 23, 662 + 663 + /* 664 + * This quirk needs to be enabled if the host controller driver copies 665 + * cryptographic keys into the PRDT in order to send them to hardware, 666 + * and therefore the PRDT should be zeroized after each request (as per 667 + * the standard best practice for managing keys). 668 + */ 669 + UFSHCD_QUIRK_KEYS_IN_PRDT = 1 << 24, 655 670 }; 656 671 657 672 enum ufshcd_caps { ··· 852 819 * @capabilities: UFS Controller Capabilities 853 820 * @mcq_capabilities: UFS Multi Circular Queue capabilities 854 821 * @nutrs: Transfer Request Queue depth supported by controller 822 + * @nortt - Max outstanding RTTs supported by controller 855 823 * @nutmrs: Task Management Queue depth supported by controller 856 824 * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock. 857 825 * @ufs_version: UFS Version to which controller complies ··· 991 957 992 958 u32 capabilities; 993 959 int nutrs; 960 + int nortt; 994 961 u32 mcq_capabilities; 995 962 int nutmrs; 996 963 u32 reserved_slot; ··· 1161 1126 struct mutex sq_mutex; 1162 1127 }; 1163 1128 1164 - static inline bool is_mcq_enabled(struct ufs_hba *hba) 1129 + #define MCQ_QCFG_SIZE 0x40 1130 + 1131 + static inline unsigned int ufshcd_mcq_opr_offset(struct ufs_hba *hba, 1132 + enum ufshcd_mcq_opr opr, int idx) 1165 1133 { 1166 - return hba->mcq_enabled; 1134 + return hba->mcq_opr[opr].offset + hba->mcq_opr[opr].stride * idx; 1135 + } 1136 + 1137 + static inline unsigned int ufshcd_mcq_cfg_offset(unsigned int reg, int idx) 1138 + { 1139 + return reg + MCQ_QCFG_SIZE * idx; 1167 1140 } 1168 1141 1169 1142 #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE ··· 1304 1261 void ufshcd_hba_stop(struct ufs_hba *hba); 1305 1262 void ufshcd_schedule_eh_work(struct ufs_hba *hba); 1306 1263 void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds); 1264 + unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba); 1307 1265 u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i); 1308 1266 void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); 1309 1267 unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+3 -1
include/ufs/ufshci.h
··· 67 67 68 68 /* Controller capability masks */ 69 69 enum { 70 - MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F, 70 + MASK_TRANSFER_REQUESTS_SLOTS_SDB = 0x0000001F, 71 + MASK_TRANSFER_REQUESTS_SLOTS_MCQ = 0x000000FF, 72 + MASK_NUMBER_OUTSTANDING_RTT = 0x0000FF00, 71 73 MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000, 72 74 MASK_EHSLUTRD_SUPPORTED = 0x00400000, 73 75 MASK_AUTO_HIBERN8_SUPPORT = 0x00800000,