Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
"All changes in drivers (well technically SES is enclosure services,
but its change is minor). The biggest is the write combining change in
lpfc followed by the additional NULL checks in mpi3mr"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
scsi: ufs: core: Fix shift out of bounds when MAXQ=32
scsi: ufs: core: Move link recovery for hibern8 exit failure to wl_resume
scsi: ufs: core: Fix possible NULL pointer dereference in ufshcd_add_command_trace()
scsi: snic: MAINTAINERS: Update snic maintainers
scsi: snic: Remove unused linkstatus
scsi: pm8001: Fix use-after-free in pm8001_queue_command()
scsi: mpi3mr: Add NULL checks when resetting request and reply queues
scsi: ufs: core: Reset urgent_bkops_lvl to allow runtime PM power mode
scsi: ses: Fix devices attaching to different hosts
scsi: ufs: core: Fix RPMB region size detection for UFS 2.2
scsi: storvsc: Fix scheduling while atomic on PREEMPT_RT
scsi: lpfc: Properly set WC for DPP mapping

+96 -49
+1
MAINTAINERS
··· 6212 6212 6213 6213 CISCO SCSI HBA DRIVER 6214 6214 M: Karan Tilak Kumar <kartilak@cisco.com> 6215 + M: Narsimhulu Musini <nmusini@cisco.com> 6215 6216 M: Sesidhar Baddela <sebaddel@cisco.com> 6216 6217 L: linux-scsi@vger.kernel.org 6217 6218 S: Supported
+2
drivers/scsi/lpfc/lpfc_init.c
··· 12025 12025 iounmap(phba->sli4_hba.conf_regs_memmap_p); 12026 12026 if (phba->sli4_hba.dpp_regs_memmap_p) 12027 12027 iounmap(phba->sli4_hba.dpp_regs_memmap_p); 12028 + if (phba->sli4_hba.dpp_regs_memmap_wc_p) 12029 + iounmap(phba->sli4_hba.dpp_regs_memmap_wc_p); 12028 12030 break; 12029 12031 case LPFC_SLI_INTF_IF_TYPE_1: 12030 12032 break;
+30 -6
drivers/scsi/lpfc/lpfc_sli.c
··· 15977 15977 return NULL; 15978 15978 } 15979 15979 15980 + static __maybe_unused void __iomem * 15981 + lpfc_dpp_wc_map(struct lpfc_hba *phba, uint8_t dpp_barset) 15982 + { 15983 + 15984 + /* DPP region is supposed to cover 64-bit BAR2 */ 15985 + if (dpp_barset != WQ_PCI_BAR_4_AND_5) { 15986 + lpfc_log_msg(phba, KERN_WARNING, LOG_INIT, 15987 + "3273 dpp_barset x%x != WQ_PCI_BAR_4_AND_5\n", 15988 + dpp_barset); 15989 + return NULL; 15990 + } 15991 + 15992 + if (!phba->sli4_hba.dpp_regs_memmap_wc_p) { 15993 + void __iomem *dpp_map; 15994 + 15995 + dpp_map = ioremap_wc(phba->pci_bar2_map, 15996 + pci_resource_len(phba->pcidev, 15997 + PCI_64BIT_BAR4)); 15998 + 15999 + if (dpp_map) 16000 + phba->sli4_hba.dpp_regs_memmap_wc_p = dpp_map; 16001 + } 16002 + 16003 + return phba->sli4_hba.dpp_regs_memmap_wc_p; 16004 + } 16005 + 15980 16006 /** 15981 16007 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs 15982 16008 * @phba: HBA structure that EQs are on. ··· 16966 16940 uint8_t dpp_barset; 16967 16941 uint32_t dpp_offset; 16968 16942 uint8_t wq_create_version; 16969 - #ifdef CONFIG_X86 16970 - unsigned long pg_addr; 16971 - #endif 16972 16943 16973 16944 /* sanity check on queue memory */ 16974 16945 if (!wq || !cq) ··· 17151 17128 17152 17129 #ifdef CONFIG_X86 17153 17130 /* Enable combined writes for DPP aperture */ 17154 - pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; 17155 - rc = set_memory_wc(pg_addr, 1); 17156 - if (rc) { 17131 + bar_memmap_p = lpfc_dpp_wc_map(phba, dpp_barset); 17132 + if (!bar_memmap_p) { 17157 17133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17158 17134 "3272 Cannot setup Combined " 17159 17135 "Write on WQ[%d] - disable DPP\n", 17160 17136 wq->queue_id); 17161 17137 phba->cfg_enable_dpp = 0; 17138 + } else { 17139 + wq->dpp_regaddr = bar_memmap_p + dpp_offset; 17162 17140 } 17163 17141 #else 17164 17142 phba->cfg_enable_dpp = 0;
+3
drivers/scsi/lpfc/lpfc_sli4.h
··· 785 785 void __iomem *dpp_regs_memmap_p; /* Kernel memory mapped address for 786 786 * dpp registers 787 787 */ 788 + void __iomem *dpp_regs_memmap_wc_p;/* Kernel memory mapped address for 789 + * dpp registers with write combining 790 + */ 788 791 union { 789 792 struct { 790 793 /* IF Type 0, BAR 0 PCI cfg space reg mem map */
+18 -14
drivers/scsi/mpi3mr/mpi3mr_fw.c
··· 4807 4807 } 4808 4808 4809 4809 for (i = 0; i < mrioc->num_queues; i++) { 4810 - mrioc->op_reply_qinfo[i].qid = 0; 4811 - mrioc->op_reply_qinfo[i].ci = 0; 4812 - mrioc->op_reply_qinfo[i].num_replies = 0; 4813 - mrioc->op_reply_qinfo[i].ephase = 0; 4814 - atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 4815 - atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0); 4816 - mpi3mr_memset_op_reply_q_buffers(mrioc, i); 4810 + if (mrioc->op_reply_qinfo) { 4811 + mrioc->op_reply_qinfo[i].qid = 0; 4812 + mrioc->op_reply_qinfo[i].ci = 0; 4813 + mrioc->op_reply_qinfo[i].num_replies = 0; 4814 + mrioc->op_reply_qinfo[i].ephase = 0; 4815 + atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 4816 + atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0); 4817 + mpi3mr_memset_op_reply_q_buffers(mrioc, i); 4818 + } 4817 4819 4818 - mrioc->req_qinfo[i].ci = 0; 4819 - mrioc->req_qinfo[i].pi = 0; 4820 - mrioc->req_qinfo[i].num_requests = 0; 4821 - mrioc->req_qinfo[i].qid = 0; 4822 - mrioc->req_qinfo[i].reply_qid = 0; 4823 - spin_lock_init(&mrioc->req_qinfo[i].q_lock); 4824 - mpi3mr_memset_op_req_q_buffers(mrioc, i); 4820 + if (mrioc->req_qinfo) { 4821 + mrioc->req_qinfo[i].ci = 0; 4822 + mrioc->req_qinfo[i].pi = 0; 4823 + mrioc->req_qinfo[i].num_requests = 0; 4824 + mrioc->req_qinfo[i].qid = 0; 4825 + mrioc->req_qinfo[i].reply_qid = 0; 4826 + spin_lock_init(&mrioc->req_qinfo[i].q_lock); 4827 + mpi3mr_memset_op_req_q_buffers(mrioc, i); 4828 + } 4825 4829 } 4826 4830 4827 4831 atomic_set(&mrioc->pend_large_data_sz, 0);
+3 -2
drivers/scsi/pm8001/pm8001_sas.c
··· 525 525 } else { 526 526 task->task_done(task); 527 527 } 528 - rc = -ENODEV; 529 - goto err_out; 528 + spin_unlock_irqrestore(&pm8001_ha->lock, flags); 529 + pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device gone\n"); 530 + return 0; 530 531 } 531 532 532 533 ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
+2 -3
drivers/scsi/ses.c
··· 528 528 }; 529 529 530 530 static int ses_enclosure_find_by_addr(struct enclosure_device *edev, 531 - void *data) 531 + struct efd *efd) 532 532 { 533 - struct efd *efd = data; 534 533 int i; 535 534 struct ses_component *scomp; 536 535 ··· 682 683 if (efd.addr) { 683 684 efd.dev = &sdev->sdev_gendev; 684 685 685 - enclosure_for_each_device(ses_enclosure_find_by_addr, &efd); 686 + ses_enclosure_find_by_addr(edev, &efd); 686 687 } 687 688 } 688 689
-9
drivers/scsi/snic/vnic_dev.c
··· 42 42 struct vnic_devcmd_notify *notify; 43 43 struct vnic_devcmd_notify notify_copy; 44 44 dma_addr_t notify_pa; 45 - u32 *linkstatus; 46 - dma_addr_t linkstatus_pa; 47 45 struct vnic_stats *stats; 48 46 dma_addr_t stats_pa; 49 47 struct vnic_devcmd_fw_info *fw_info; ··· 648 650 649 651 int svnic_dev_link_status(struct vnic_dev *vdev) 650 652 { 651 - if (vdev->linkstatus) 652 - return *vdev->linkstatus; 653 653 654 654 if (!vnic_dev_notify_ready(vdev)) 655 655 return 0; ··· 682 686 sizeof(struct vnic_devcmd_notify), 683 687 vdev->notify, 684 688 vdev->notify_pa); 685 - if (vdev->linkstatus) 686 - dma_free_coherent(&vdev->pdev->dev, 687 - sizeof(u32), 688 - vdev->linkstatus, 689 - vdev->linkstatus_pa); 690 689 if (vdev->stats) 691 690 dma_free_coherent(&vdev->pdev->dev, 692 691 sizeof(struct vnic_stats),
+3 -2
drivers/scsi/storvsc_drv.c
··· 1856 1856 cmd_request->payload_sz = payload_sz; 1857 1857 1858 1858 /* Invokes the vsc to start an IO */ 1859 - ret = storvsc_do_io(dev, cmd_request, get_cpu()); 1860 - put_cpu(); 1859 + migrate_disable(); 1860 + ret = storvsc_do_io(dev, cmd_request, smp_processor_id()); 1861 + migrate_enable(); 1861 1862 1862 1863 if (ret) 1863 1864 scsi_dma_unmap(scmnd);
+34 -13
drivers/ufs/core/ufshcd.c
··· 24 24 #include <linux/pm_opp.h> 25 25 #include <linux/regulator/consumer.h> 26 26 #include <linux/sched/clock.h> 27 + #include <linux/sizes.h> 27 28 #include <linux/iopoll.h> 28 29 #include <scsi/scsi_cmnd.h> 29 30 #include <scsi/scsi_dbg.h> ··· 518 517 519 518 if (hba->mcq_enabled) { 520 519 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq); 521 - 522 - hwq_id = hwq->id; 520 + if (hwq) 521 + hwq_id = hwq->id; 523 522 } else { 524 523 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 525 524 } ··· 4390 4389 spin_unlock_irqrestore(hba->host->host_lock, flags); 4391 4390 mutex_unlock(&hba->uic_cmd_mutex); 4392 4391 4393 - /* 4394 - * If the h8 exit fails during the runtime resume process, it becomes 4395 - * stuck and cannot be recovered through the error handler. To fix 4396 - * this, use link recovery instead of the error handler. 4397 - */ 4398 - if (ret && hba->pm_op_in_progress) 4399 - ret = ufshcd_link_recovery(hba); 4400 - 4401 4392 return ret; 4402 4393 } 4403 4394 ··· 5242 5249 hba->dev_info.rpmb_region_size[1] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION1_SIZE]; 5243 5250 hba->dev_info.rpmb_region_size[2] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION2_SIZE]; 5244 5251 hba->dev_info.rpmb_region_size[3] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION3_SIZE]; 5252 + 5253 + if (hba->dev_info.wspecversion <= 0x0220) { 5254 + /* 5255 + * These older spec chips have only one RPMB region, 5256 + * sized between 128 kB minimum and 16 MB maximum. 5257 + * No per region size fields are provided (respective 5258 + * REGIONX_SIZE fields always contain zeros), so get 5259 + * it from the logical block count and size fields for 5260 + * compatibility 5261 + * 5262 + * (See JESD220C-2_2 Section 14.1.4.6 5263 + * RPMB Unit Descriptor,* offset 13h, 4 bytes) 5264 + */ 5265 + hba->dev_info.rpmb_region_size[0] = 5266 + (get_unaligned_be64(desc_buf 5267 + + RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT) 5268 + << desc_buf[RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE]) 5269 + / SZ_128K; 5270 + } 5245 5271 } 5246 5272 5247 5273 ··· 5975 5963 5976 5964 hba->auto_bkops_enabled = false; 5977 5965 trace_ufshcd_auto_bkops_state(hba, "Disabled"); 5966 + hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; 5978 5967 hba->is_urgent_bkops_lvl_checked = false; 5979 5968 out: 5980 5969 return err; ··· 6079 6066 * impacted or critical. Handle these device by determining their urgent 6080 6067 * bkops status at runtime. 6081 6068 */ 6082 - if (curr_status < BKOPS_STATUS_PERF_IMPACT) { 6069 + if ((curr_status > BKOPS_STATUS_NO_OP) && (curr_status < BKOPS_STATUS_PERF_IMPACT)) { 6083 6070 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", 6084 6071 __func__, curr_status); 6085 6072 /* update the current status as the urgent bkops level */ ··· 7110 7097 7111 7098 ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs); 7112 7099 if (ret) 7113 - outstanding_cqs = (1U << hba->nr_hw_queues) - 1; 7100 + outstanding_cqs = (1ULL << hba->nr_hw_queues) - 1; 7114 7101 7115 7102 /* Exclude the poll queues */ 7116 7103 nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; ··· 10192 10179 } else { 10193 10180 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", 10194 10181 __func__, ret); 10195 - goto vendor_suspend; 10182 + /* 10183 + * If the h8 exit fails during the runtime resume 10184 + * process, it becomes stuck and cannot be recovered 10185 + * through the error handler. To fix this, use link 10186 + * recovery instead of the error handler. 10187 + */ 10188 + ret = ufshcd_link_recovery(hba); 10189 + if (ret) 10190 + goto vendor_suspend; 10196 10191 } 10197 10192 } else if (ufshcd_is_link_off(hba)) { 10198 10193 /*