Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
"The only core fix is in doc; all the others are in drivers, with the
biggest impacts in libsas being the rollback on error handling and in
ufs coming from a couple of error handling fixes, one causing a crash
if it's activated before scanning and the other fixing W-LUN
resumption"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
scsi: ufs: qcom: Fix confusing cleanup.h syntax
scsi: libsas: Add rollback handling when an error occurs
scsi: device_handler: Return error pointer in scsi_dh_attached_handler_name()
scsi: ufs: core: Fix a deadlock in the frequency scaling code
scsi: ufs: core: Fix an error handler crash
scsi: Revert "scsi: libsas: Fix exp-attached device scan after probe failure scanned in again after probe failed"
scsi: ufs: core: Fix RPMB link error by reversing Kconfig dependencies
scsi: qla4xxx: Use time conversion macros
scsi: qla2xxx: Enable/disable IRQD_NO_BALANCING during reset
scsi: ipr: Enable/disable IRQD_NO_BALANCING during reset
scsi: imm: Fix use-after-free bug caused by unfinished delayed work
scsi: target: sbp: Remove KMSG_COMPONENT macro
scsi: core: Correct documentation for scsi_device_quiesce()
scsi: mpi3mr: Prevent duplicate SAS/SATA device entries in channel 1
scsi: target: Reset t_task_cdb pointer in error case
scsi: ufs: core: Fix EH failure after W-LUN resume error

+13
drivers/md/dm-mpath.c
··· 950 950 951 951 q = bdev_get_queue(p->path.dev->bdev); 952 952 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); 953 + if (IS_ERR(attached_handler_name)) { 954 + if (PTR_ERR(attached_handler_name) == -ENODEV) { 955 + if (m->hw_handler_name) { 956 + DMERR("hardware handlers are only allowed for SCSI devices"); 957 + kfree(m->hw_handler_name); 958 + m->hw_handler_name = NULL; 959 + } 960 + attached_handler_name = NULL; 961 + } else { 962 + r = PTR_ERR(attached_handler_name); 963 + goto bad; 964 + } 965 + } 953 966 if (attached_handler_name || m->hw_handler_name) { 954 967 INIT_DELAYED_WORK(&p->activate_path, activate_path_work); 955 968 r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
-1
drivers/misc/Kconfig
··· 106 106 107 107 config RPMB 108 108 tristate "RPMB partition interface" 109 - depends on MMC || SCSI_UFSHCD 110 109 help 111 110 Unified RPMB unit interface for RPMB capable devices such as eMMC and 112 111 UFS. Provides interface for in-kernel security controllers to access
+1
drivers/scsi/imm.c
··· 1260 1260 imm_struct *dev; 1261 1261 list_for_each_entry(dev, &imm_hosts, list) { 1262 1262 if (dev->dev->port == pb) { 1263 + disable_delayed_work_sync(&dev->imm_tq); 1263 1264 list_del_init(&dev->list); 1264 1265 scsi_remove_host(dev->host); 1265 1266 scsi_host_put(dev->host);
+27 -1
drivers/scsi/ipr.c
··· 61 61 #include <linux/hdreg.h> 62 62 #include <linux/reboot.h> 63 63 #include <linux/stringify.h> 64 + #include <linux/irq.h> 64 65 #include <asm/io.h> 65 - #include <asm/irq.h> 66 66 #include <asm/processor.h> 67 67 #include <scsi/scsi.h> 68 68 #include <scsi/scsi_host.h> ··· 7844 7844 } 7845 7845 7846 7846 /** 7847 + * ipr_set_affinity_nobalance 7848 + * @ioa_cfg: ipr_ioa_cfg struct for an ipr device 7849 + * @flag: bool 7850 + * true: ensable "IRQ_NO_BALANCING" bit for msix interrupt 7851 + * false: disable "IRQ_NO_BALANCING" bit for msix interrupt 7852 + * Description: This function will be called to disable/enable 7853 + * "IRQ_NO_BALANCING" to avoid irqbalance daemon 7854 + * kicking in during adapter reset. 7855 + **/ 7856 + static void ipr_set_affinity_nobalance(struct ipr_ioa_cfg *ioa_cfg, bool flag) 7857 + { 7858 + int irq, i; 7859 + 7860 + for (i = 0; i < ioa_cfg->nvectors; i++) { 7861 + irq = pci_irq_vector(ioa_cfg->pdev, i); 7862 + 7863 + if (flag) 7864 + irq_set_status_flags(irq, IRQ_NO_BALANCING); 7865 + else 7866 + irq_clear_status_flags(irq, IRQ_NO_BALANCING); 7867 + } 7868 + } 7869 + 7870 + /** 7847 7871 * ipr_reset_restore_cfg_space - Restore PCI config space. 7848 7872 * @ipr_cmd: ipr command struct 7849 7873 * ··· 7890 7866 return IPR_RC_JOB_CONTINUE; 7891 7867 } 7892 7868 7869 + ipr_set_affinity_nobalance(ioa_cfg, false); 7893 7870 ipr_fail_all_ops(ioa_cfg); 7894 7871 7895 7872 if (ioa_cfg->sis64) { ··· 7970 7945 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); 7971 7946 7972 7947 if (rc == PCIBIOS_SUCCESSFUL) { 7948 + ipr_set_affinity_nobalance(ioa_cfg, true); 7973 7949 ipr_cmd->job_step = ipr_reset_bist_done; 7974 7950 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); 7975 7951 rc = IPR_RC_JOB_RETURN;
+1
drivers/scsi/libsas/sas_init.c
··· 141 141 Undo_ports: 142 142 sas_unregister_ports(sas_ha); 143 143 Undo_phys: 144 + sas_unregister_phys(sas_ha); 144 145 145 146 return error; 146 147 }
+1 -14
drivers/scsi/libsas/sas_internal.h
··· 54 54 void sas_scsi_recover_host(struct Scsi_Host *shost); 55 55 56 56 int sas_register_phys(struct sas_ha_struct *sas_ha); 57 + void sas_unregister_phys(struct sas_ha_struct *sas_ha); 57 58 58 59 struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy, gfp_t gfp_flags); 59 60 void sas_free_event(struct asd_sas_event *event); ··· 146 145 func, dev->parent ? "exp-attached" : 147 146 "direct-attached", 148 147 SAS_ADDR(dev->sas_addr), err); 149 - 150 - /* 151 - * If the device probe failed, the expander phy attached address 152 - * needs to be reset so that the phy will not be treated as flutter 153 - * in the next revalidation 154 - */ 155 - if (dev->parent && !dev_is_expander(dev->dev_type)) { 156 - struct sas_phy *phy = dev->phy; 157 - struct domain_device *parent = dev->parent; 158 - struct ex_phy *ex_phy = &parent->ex_dev.ex_phy[phy->number]; 159 - 160 - memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 161 - } 162 - 163 148 sas_unregister_dev(dev->port, dev); 164 149 } 165 150
+30 -3
drivers/scsi/libsas/sas_phy.c
··· 116 116 int sas_register_phys(struct sas_ha_struct *sas_ha) 117 117 { 118 118 int i; 119 + int err; 119 120 120 121 /* Now register the phys. */ 121 122 for (i = 0; i < sas_ha->num_phys; i++) { ··· 133 132 phy->frame_rcvd_size = 0; 134 133 135 134 phy->phy = sas_phy_alloc(&sas_ha->shost->shost_gendev, i); 136 - if (!phy->phy) 137 - return -ENOMEM; 135 + if (!phy->phy) { 136 + err = -ENOMEM; 137 + goto rollback; 138 + } 138 139 139 140 phy->phy->identify.initiator_port_protocols = 140 141 phy->iproto; ··· 149 146 phy->phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN; 150 147 phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 151 148 152 - sas_phy_add(phy->phy); 149 + err = sas_phy_add(phy->phy); 150 + if (err) { 151 + sas_phy_free(phy->phy); 152 + goto rollback; 153 + } 153 154 } 154 155 155 156 return 0; 157 + rollback: 158 + for (i-- ; i >= 0 ; i--) { 159 + struct asd_sas_phy *phy = sas_ha->sas_phy[i]; 160 + 161 + sas_phy_delete(phy->phy); 162 + sas_phy_free(phy->phy); 163 + } 164 + return err; 165 + } 166 + 167 + void sas_unregister_phys(struct sas_ha_struct *sas_ha) 168 + { 169 + int i; 170 + 171 + for (i = 0 ; i < sas_ha->num_phys ; i++) { 172 + struct asd_sas_phy *phy = sas_ha->sas_phy[i]; 173 + 174 + sas_phy_delete(phy->phy); 175 + sas_phy_free(phy->phy); 176 + } 156 177 } 157 178 158 179 const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
+2 -2
drivers/scsi/mpi3mr/mpi3mr.h
··· 56 56 extern int prot_mask; 57 57 extern atomic64_t event_counter; 58 58 59 - #define MPI3MR_DRIVER_VERSION "8.15.0.5.50" 60 - #define MPI3MR_DRIVER_RELDATE "12-August-2025" 59 + #define MPI3MR_DRIVER_VERSION "8.15.0.5.51" 60 + #define MPI3MR_DRIVER_RELDATE "18-November-2025" 61 61 62 62 #define MPI3MR_DRIVER_NAME "mpi3mr" 63 63 #define MPI3MR_DRIVER_LICENSE "GPL"
+3 -1
drivers/scsi/mpi3mr/mpi3mr_os.c
··· 1184 1184 if (is_added == true) 1185 1185 tgtdev->io_throttle_enabled = 1186 1186 (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; 1187 + if (!mrioc->sas_transport_enabled) 1188 + tgtdev->non_stl = 1; 1187 1189 1188 1190 switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) { 1189 1191 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB: ··· 4846 4844 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4847 4845 if (starget->channel == mrioc->scsi_device_channel) { 4848 4846 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4849 - if (tgt_dev && !tgt_dev->is_hidden) { 4847 + if (tgt_dev && !tgt_dev->is_hidden && tgt_dev->non_stl) { 4850 4848 scsi_tgt_priv_data->starget = starget; 4851 4849 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4852 4850 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
+30
drivers/scsi/qla2xxx/qla_os.c
··· 17 17 #include <linux/crash_dump.h> 18 18 #include <linux/trace_events.h> 19 19 #include <linux/trace.h> 20 + #include <linux/irq.h> 20 21 21 22 #include <scsi/scsi_tcq.h> 22 23 #include <scsi/scsicam.h> ··· 7777 7776 } 7778 7777 7779 7778 7779 + /** 7780 + * qla2xxx_set_affinity_nobalance 7781 + * @pdev: pci_dev struct for a qla2xxx device 7782 + * @flag: bool 7783 + * true: enable "IRQ_NO_BALANCING" bit for msix interrupt 7784 + * false: disable "IRQ_NO_BALANCING" bit for msix interrupt 7785 + * Description: This function will be called to disable/enable 7786 + * "IRQ_NO_BALANCING" to avoid irqbalance daemon 7787 + * kicking in during adapter reset. 7788 + **/ 7789 + 7790 + static void qla2xxx_set_affinity_nobalance(struct pci_dev *pdev, bool flag) 7791 + { 7792 + int irq, i; 7793 + 7794 + for (i = 0; i < QLA_BASE_VECTORS; i++) { 7795 + irq = pci_irq_vector(pdev, i); 7796 + 7797 + if (flag) 7798 + irq_set_status_flags(irq, IRQ_NO_BALANCING); 7799 + else 7800 + irq_clear_status_flags(irq, IRQ_NO_BALANCING); 7801 + } 7802 + } 7803 + 7780 7804 static pci_ers_result_t 7781 7805 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 7782 7806 { ··· 7819 7793 ret = PCI_ERS_RESULT_NEED_RESET; 7820 7794 goto out; 7821 7795 } 7796 + 7797 + qla2xxx_set_affinity_nobalance(pdev, false); 7822 7798 7823 7799 switch (state) { 7824 7800 case pci_channel_io_normal: ··· 7957 7929 exit_slot_reset: 7958 7930 ql_dbg(ql_dbg_aer, base_vha, 0x900e, 7959 7931 "Slot Reset returning %x.\n", ret); 7932 + 7933 + qla2xxx_set_affinity_nobalance(pdev, true); 7960 7934 7961 7935 return ret; 7962 7936 }
+1 -1
drivers/scsi/qla4xxx/ql4_nx.c
··· 1552 1552 (val == PHAN_INITIALIZE_ACK)) 1553 1553 return 0; 1554 1554 set_current_state(TASK_UNINTERRUPTIBLE); 1555 - schedule_timeout(500); 1555 + schedule_timeout(msecs_to_jiffies(500)); 1556 1556 1557 1557 } while (--retries); 1558 1558
+5 -3
drivers/scsi/scsi_dh.c
··· 353 353 * that may have a device handler attached 354 354 * @gfp - the GFP mask used in the kmalloc() call when allocating memory 355 355 * 356 - * Returns name of attached handler, NULL if no handler is attached. 356 + * Returns name of attached handler, NULL if no handler is attached, or 357 + * and error pointer if an error occurred. 357 358 * Caller must take care to free the returned string. 358 359 */ 359 360 const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) ··· 364 363 365 364 sdev = scsi_device_from_queue(q); 366 365 if (!sdev) 367 - return NULL; 366 + return ERR_PTR(-ENODEV); 368 367 369 368 if (sdev->handler) 370 - handler_name = kstrdup(sdev->handler->name, gfp); 369 + handler_name = kstrdup(sdev->handler->name, gfp) ? : 370 + ERR_PTR(-ENOMEM); 371 371 put_device(&sdev->sdev_gendev); 372 372 return handler_name; 373 373 }
+1 -1
drivers/scsi/scsi_lib.c
··· 2801 2801 * 2802 2802 * Must be called with user context, may sleep. 2803 2803 * 2804 - * Returns zero if unsuccessful or an error if not. 2804 + * Returns zero if successful or an error if not. 2805 2805 */ 2806 2806 int 2807 2807 scsi_device_quiesce(struct scsi_device *sdev)
+1 -2
drivers/target/sbp/sbp_target.c
··· 5 5 * Copyright (C) 2011 Chris Boot <bootc@bootc.net> 6 6 */ 7 7 8 - #define KMSG_COMPONENT "sbp_target" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "sbp_target: " fmt 10 9 11 10 #include <linux/kernel.h> 12 11 #include <linux/module.h>
+1
drivers/target/target_core_transport.c
··· 1524 1524 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1525 1525 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp); 1526 1526 if (!cmd->t_task_cdb) { 1527 + cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1527 1528 pr_err("Unable to allocate cmd->t_task_cdb" 1528 1529 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1529 1530 scsi_command_size(cdb),
+1
drivers/ufs/Kconfig
··· 6 6 menuconfig SCSI_UFSHCD 7 7 tristate "Universal Flash Storage Controller" 8 8 depends on SCSI && SCSI_DMA 9 + depends on RPMB || !RPMB 9 10 select PM_DEVFREQ 10 11 select DEVFREQ_GOV_SIMPLE_ONDEMAND 11 12 select NLS
+47 -25
drivers/ufs/core/ufshcd.c
··· 1455 1455 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err) 1456 1456 { 1457 1457 up_write(&hba->clk_scaling_lock); 1458 + mutex_unlock(&hba->wb_mutex); 1459 + blk_mq_unquiesce_tagset(&hba->host->tag_set); 1460 + mutex_unlock(&hba->host->scan_mutex); 1458 1461 1459 1462 /* Enable Write Booster if current gear requires it else disable it */ 1460 1463 if (ufshcd_enable_wb_if_scaling_up(hba) && !err) 1461 1464 ufshcd_wb_toggle(hba, hba->pwr_info.gear_rx >= hba->clk_scaling.wb_gear); 1462 1465 1463 - mutex_unlock(&hba->wb_mutex); 1464 - 1465 - blk_mq_unquiesce_tagset(&hba->host->tag_set); 1466 - mutex_unlock(&hba->host->scan_mutex); 1467 1466 ufshcd_release(hba); 1468 1467 } 1469 1468 ··· 6503 6504 6504 6505 static void ufshcd_err_handling_prepare(struct ufs_hba *hba) 6505 6506 { 6507 + /* 6508 + * A WLUN resume failure could potentially lead to the HBA being 6509 + * runtime suspended, so take an extra reference on hba->dev. 6510 + */ 6511 + pm_runtime_get_sync(hba->dev); 6506 6512 ufshcd_rpm_get_sync(hba); 6507 6513 if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) || 6508 6514 hba->is_sys_suspended) { ··· 6547 6543 if (ufshcd_is_clkscaling_supported(hba)) 6548 6544 ufshcd_clk_scaling_suspend(hba, false); 6549 6545 ufshcd_rpm_put(hba); 6546 + pm_runtime_put(hba->dev); 6550 6547 } 6551 6548 6552 6549 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) ··· 6562 6557 #ifdef CONFIG_PM 6563 6558 static void ufshcd_recover_pm_error(struct ufs_hba *hba) 6564 6559 { 6560 + struct scsi_target *starget = hba->ufs_device_wlun->sdev_target; 6565 6561 struct Scsi_Host *shost = hba->host; 6566 6562 struct scsi_device *sdev; 6567 6563 struct request_queue *q; 6568 - int ret; 6564 + bool resume_sdev_queues = false; 6569 6565 6570 6566 hba->is_sys_suspended = false; 6571 - /* 6572 - * Set RPM status of wlun device to RPM_ACTIVE, 6573 - * this also clears its runtime error. 6574 - */ 6575 - ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev); 6576 6567 6577 - /* hba device might have a runtime error otherwise */ 6578 - if (ret) 6579 - ret = pm_runtime_set_active(hba->dev); 6568 + /* 6569 + * Ensure the parent's error status is cleared before proceeding 6570 + * to the child, as the parent must be active to activate the child. 6571 + */ 6572 + if (hba->dev->power.runtime_error) { 6573 + /* hba->dev has no functional parent thus simplily set RPM_ACTIVE */ 6574 + pm_runtime_set_active(hba->dev); 6575 + resume_sdev_queues = true; 6576 + } 6577 + 6578 + if (hba->ufs_device_wlun->sdev_gendev.power.runtime_error) { 6579 + /* 6580 + * starget, parent of wlun, might be suspended if wlun resume failed. 6581 + * Make sure parent is resumed before set child (wlun) active. 6582 + */ 6583 + pm_runtime_get_sync(&starget->dev); 6584 + pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev); 6585 + pm_runtime_put_sync(&starget->dev); 6586 + resume_sdev_queues = true; 6587 + } 6588 + 6580 6589 /* 6581 6590 * If wlun device had runtime error, we also need to resume those 6582 6591 * consumer scsi devices in case any of them has failed to be 6583 6592 * resumed due to supplier runtime resume failure. This is to unblock 6584 6593 * blk_queue_enter in case there are bios waiting inside it. 6585 6594 */ 6586 - if (!ret) { 6595 + if (resume_sdev_queues) { 6587 6596 shost_for_each_device(sdev, shost) { 6588 6597 q = sdev->request_queue; 6589 6598 if (q->dev && (q->rpm_status == RPM_SUSPENDED || ··· 6698 6679 hba->saved_uic_err, hba->force_reset, 6699 6680 ufshcd_is_link_broken(hba) ? "; link is broken" : ""); 6700 6681 6701 - /* 6702 - * Use ufshcd_rpm_get_noresume() here to safely perform link recovery 6703 - * even if an error occurs during runtime suspend or runtime resume. 6704 - * This avoids potential deadlocks that could happen if we tried to 6705 - * resume the device while a PM operation is already in progress. 6706 - */ 6707 - ufshcd_rpm_get_noresume(hba); 6708 - if (hba->pm_op_in_progress) { 6709 - ufshcd_link_recovery(hba); 6682 + if (hba->ufs_device_wlun) { 6683 + /* 6684 + * Use ufshcd_rpm_get_noresume() here to safely perform link 6685 + * recovery even if an error occurs during runtime suspend or 6686 + * runtime resume. This avoids potential deadlocks that could 6687 + * happen if we tried to resume the device while a PM operation 6688 + * is already in progress. 6689 + */ 6690 + ufshcd_rpm_get_noresume(hba); 6691 + if (hba->pm_op_in_progress) { 6692 + ufshcd_link_recovery(hba); 6693 + ufshcd_rpm_put(hba); 6694 + return; 6695 + } 6710 6696 ufshcd_rpm_put(hba); 6711 - return; 6712 6697 } 6713 - ufshcd_rpm_put(hba); 6714 6698 6715 6699 down(&hba->host_sem); 6716 6700 spin_lock_irqsave(hba->host->host_lock, flags);
+2 -4
drivers/ufs/host/ufs-qcom.c
··· 1769 1769 { 1770 1770 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1771 1771 int i, j, nminor = 0, testbus_len = 0; 1772 - u32 *testbus __free(kfree) = NULL; 1773 1772 char *prefix; 1774 1773 1775 - testbus = kmalloc_array(256, sizeof(u32), GFP_KERNEL); 1774 + u32 *testbus __free(kfree) = kmalloc_array(256, sizeof(u32), GFP_KERNEL); 1776 1775 if (!testbus) 1777 1776 return; 1778 1777 ··· 1793 1794 static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, 1794 1795 const char *prefix, void __iomem *base) 1795 1796 { 1796 - u32 *regs __free(kfree) = NULL; 1797 1797 size_t pos; 1798 1798 1799 1799 if (offset % 4 != 0 || len % 4 != 0) 1800 1800 return -EINVAL; 1801 1801 1802 - regs = kzalloc(len, GFP_ATOMIC); 1802 + u32 *regs __free(kfree) = kzalloc(len, GFP_ATOMIC); 1803 1803 if (!regs) 1804 1804 return -ENOMEM; 1805 1805