Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
"All fixes in drivers. The largest diffstat in ufs is caused by the doc
update with the next being the qcom null pointer deref fix"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
scsi: ufs: ufs-qcom: Fix ESI null pointer dereference
scsi: ufs: core: Rename ufshcd_wait_for_doorbell_clr()
scsi: ufs: core: Fix the return value documentation
scsi: ufs: core: Remove WARN_ON_ONCE() call from ufshcd_uic_cmd_compl()
scsi: ufs: core: Fix IRQ lock inversion for the SCSI host lock
scsi: qla4xxx: Prevent a potential error pointer dereference
scsi: ufs: ufs-pci: Add support for Intel Wildcat Lake
scsi: fnic: Remove a useless struct mempool forward declaration

+62 -58
-2
drivers/scsi/fnic/fnic.h
··· 323 323 FNIC_IN_ETH_TRANS_FC_MODE, 324 324 }; 325 325 326 - struct mempool; 327 - 328 326 enum fnic_role_e { 329 327 FNIC_ROLE_FCP_INITIATOR = 0, 330 328 };
+2
drivers/scsi/qla4xxx/ql4_os.c
··· 6606 6606 6607 6607 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); 6608 6608 vfree(dst_addr); 6609 + if (IS_ERR(ep)) 6610 + return NULL; 6609 6611 return ep; 6610 6612 } 6611 6613
+44 -32
drivers/ufs/core/ufshcd.c
··· 1303 1303 * 1304 1304 * Return: 0 upon success; -EBUSY upon timeout. 1305 1305 */ 1306 - static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, 1306 + static int ufshcd_wait_for_pending_cmds(struct ufs_hba *hba, 1307 1307 u64 wait_timeout_us) 1308 1308 { 1309 1309 int ret = 0; ··· 1431 1431 down_write(&hba->clk_scaling_lock); 1432 1432 1433 1433 if (!hba->clk_scaling.is_allowed || 1434 - ufshcd_wait_for_doorbell_clr(hba, timeout_us)) { 1434 + ufshcd_wait_for_pending_cmds(hba, timeout_us)) { 1435 1435 ret = -EBUSY; 1436 1436 up_write(&hba->clk_scaling_lock); 1437 1437 mutex_unlock(&hba->wb_mutex); ··· 3199 3199 } 3200 3200 3201 3201 /* 3202 - * Return: 0 upon success; < 0 upon failure. 3202 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 3203 + * < 0 if another error occurred. 3203 3204 */ 3204 3205 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, 3205 3206 struct ufshcd_lrb *lrbp, int max_timeout) ··· 3276 3275 } 3277 3276 } 3278 3277 3279 - WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); 3280 3278 return err; 3281 3279 } 3282 3280 ··· 3294 3294 } 3295 3295 3296 3296 /* 3297 - * Return: 0 upon success; < 0 upon failure. 3297 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 3298 + * < 0 if another error occurred. 3298 3299 */ 3299 3300 static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, 3300 3301 const u32 tag, int timeout) ··· 3318 3317 * @cmd_type: specifies the type (NOP, Query...) 3319 3318 * @timeout: timeout in milliseconds 3320 3319 * 3321 - * Return: 0 upon success; < 0 upon failure. 3320 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 3321 + * < 0 if another error occurred. 3322 3322 * 3323 3323 * NOTE: Since there is only one available tag for device management commands, 3324 3324 * it is expected you hold the hba->dev_cmd.lock mutex. ··· 3365 3363 (*request)->upiu_req.selector = selector; 3366 3364 } 3367 3365 3366 + /* 3367 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 3368 + * < 0 if another error occurred. 3369 + */ 3368 3370 static int ufshcd_query_flag_retry(struct ufs_hba *hba, 3369 3371 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res) 3370 3372 { ··· 3389 3383 dev_err(hba->dev, 3390 3384 "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n", 3391 3385 __func__, opcode, idn, ret, retries); 3392 - WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); 3393 3386 return ret; 3394 3387 } 3395 3388 ··· 3400 3395 * @index: flag index to access 3401 3396 * @flag_res: the flag value after the query request completes 3402 3397 * 3403 - * Return: 0 for success; < 0 upon failure. 3398 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 3399 + * < 0 if another error occurred. 3404 3400 */ 3405 3401 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, 3406 3402 enum flag_idn idn, u8 index, bool *flag_res) ··· 3457 3451 3458 3452 out_unlock: 3459 3453 ufshcd_dev_man_unlock(hba); 3460 - WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); 3461 3454 return err; 3462 3455 } 3463 3456 ··· 3469 3464 * @selector: selector field 3470 3465 * @attr_val: the attribute value after the query request completes 3471 3466 * 3472 - * Return: 0 upon success; < 0 upon failure. 3473 - */ 3467 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 3468 + * < 0 if another error occurred. 3469 + */ 3474 3470 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, 3475 3471 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) 3476 3472 { ··· 3519 3513 3520 3514 out_unlock: 3521 3515 ufshcd_dev_man_unlock(hba); 3522 - WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); 3523 3516 return err; 3524 3517 } 3525 3518 ··· 3533 3528 * @attr_val: the attribute value after the query request 3534 3529 * completes 3535 3530 * 3536 - * Return: 0 for success; < 0 upon failure. 3537 - */ 3531 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 3532 + * < 0 if another error occurred. 3533 + */ 3538 3534 int ufshcd_query_attr_retry(struct ufs_hba *hba, 3539 3535 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, 3540 3536 u32 *attr_val) ··· 3557 3551 dev_err(hba->dev, 3558 3552 "%s: query attribute, idn %d, failed with error %d after %d retries\n", 3559 3553 __func__, idn, ret, QUERY_REQ_RETRIES); 3560 - WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); 3561 3554 return ret; 3562 3555 } 3563 3556 3564 3557 /* 3565 - * Return: 0 if successful; < 0 upon failure. 3558 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 3559 + * < 0 if another error occurred. 3566 3560 */ 3567 3561 static int __ufshcd_query_descriptor(struct ufs_hba *hba, 3568 3562 enum query_opcode opcode, enum desc_idn idn, u8 index, ··· 3621 3615 out_unlock: 3622 3616 hba->dev_cmd.query.descriptor = NULL; 3623 3617 ufshcd_dev_man_unlock(hba); 3624 - WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); 3625 3618 return err; 3626 3619 } 3627 3620 ··· 3637 3632 * The buf_len parameter will contain, on return, the length parameter 3638 3633 * received on the response. 3639 3634 * 3640 - * Return: 0 for success; < 0 upon failure. 3635 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 3636 + * < 0 if another error occurred. 3641 3637 */ 3642 3638 int ufshcd_query_descriptor_retry(struct ufs_hba *hba, 3643 3639 enum query_opcode opcode, ··· 3656 3650 break; 3657 3651 } 3658 3652 3659 - WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); 3660 3653 return err; 3661 3654 } 3662 3655 ··· 3668 3663 * @param_read_buf: pointer to buffer where parameter would be read 3669 3664 * @param_size: sizeof(param_read_buf) 3670 3665 * 3671 - * Return: 0 in case of success; < 0 upon failure. 3666 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 3667 + * < 0 if another error occurred. 3672 3668 */ 3673 3669 int ufshcd_read_desc_param(struct ufs_hba *hba, 3674 3670 enum desc_idn desc_id, ··· 3736 3730 out: 3737 3731 if (is_kmalloc) 3738 3732 kfree(desc_buf); 3739 - WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); 3740 3733 return ret; 3741 3734 } 3742 3735 ··· 4786 4781 * 4787 4782 * Set fDeviceInit flag and poll until device toggles it. 4788 4783 * 4789 - * Return: 0 upon success; < 0 upon failure. 4784 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 4785 + * < 0 if another error occurred. 4790 4786 */ 4791 4787 static int ufshcd_complete_dev_init(struct ufs_hba *hba) 4792 4788 { ··· 5141 5135 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT 5142 5136 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations. 5143 5137 * 5144 - * Return: 0 upon success; < 0 upon failure. 5138 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 5139 + * < 0 if another error occurred. 5145 5140 */ 5146 5141 static int ufshcd_verify_dev_init(struct ufs_hba *hba) 5147 5142 { ··· 5566 5559 irqreturn_t retval = IRQ_NONE; 5567 5560 struct uic_command *cmd; 5568 5561 5569 - spin_lock(hba->host->host_lock); 5562 + guard(spinlock_irqsave)(hba->host->host_lock); 5570 5563 cmd = hba->active_uic_cmd; 5571 - if (WARN_ON_ONCE(!cmd)) 5564 + if (!cmd) 5572 5565 goto unlock; 5573 5566 5574 5567 if (ufshcd_is_auto_hibern8_error(hba, intr_status)) ··· 5593 5586 ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP); 5594 5587 5595 5588 unlock: 5596 - spin_unlock(hba->host->host_lock); 5597 - 5598 5589 return retval; 5599 5590 } 5600 5591 ··· 5874 5869 * as the device is allowed to manage its own way of handling background 5875 5870 * operations. 5876 5871 * 5877 - * Return: zero on success, non-zero on failure. 5872 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 5873 + * < 0 if another error occurred. 5878 5874 */ 5879 5875 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) 5880 5876 { ··· 5914 5908 * host is idle so that BKOPS are managed effectively without any negative 5915 5909 * impacts. 5916 5910 * 5917 - * Return: zero on success, non-zero on failure. 5911 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 5912 + * < 0 if another error occurred. 5918 5913 */ 5919 5914 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) 5920 5915 { ··· 6065 6058 __func__, err); 6066 6059 } 6067 6060 6061 + /* 6062 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 6063 + * < 0 if another error occurred. 6064 + */ 6068 6065 int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id) 6069 6066 { 6070 6067 struct utp_upiu_query_v4_0 *upiu_resp; ··· 6931 6920 bool queue_eh_work = false; 6932 6921 irqreturn_t retval = IRQ_NONE; 6933 6922 6934 - spin_lock(hba->host->host_lock); 6923 + guard(spinlock_irqsave)(hba->host->host_lock); 6935 6924 hba->errors |= UFSHCD_ERROR_MASK & intr_status; 6936 6925 6937 6926 if (hba->errors & INT_FATAL_ERRORS) { ··· 6990 6979 */ 6991 6980 hba->errors = 0; 6992 6981 hba->uic_error = 0; 6993 - spin_unlock(hba->host->host_lock); 6982 + 6994 6983 return retval; 6995 6984 } 6996 6985 ··· 7465 7454 * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation 7466 7455 * @dir: DMA direction 7467 7456 * 7468 - * Return: zero on success, non-zero on failure. 7457 + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 7458 + * < 0 if another error occurred. 7469 7459 */ 7470 7460 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu, 7471 7461 struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
+15 -24
drivers/ufs/host/ufs-qcom.c
··· 2070 2070 return IRQ_HANDLED; 2071 2071 } 2072 2072 2073 - static void ufs_qcom_irq_free(struct ufs_qcom_irq *uqi) 2074 - { 2075 - for (struct ufs_qcom_irq *q = uqi; q->irq; q++) 2076 - devm_free_irq(q->hba->dev, q->irq, q->hba); 2077 - 2078 - platform_device_msi_free_irqs_all(uqi->hba->dev); 2079 - devm_kfree(uqi->hba->dev, uqi); 2080 - } 2081 - 2082 - DEFINE_FREE(ufs_qcom_irq, struct ufs_qcom_irq *, if (_T) ufs_qcom_irq_free(_T)) 2083 - 2084 2073 static int ufs_qcom_config_esi(struct ufs_hba *hba) 2085 2074 { 2086 2075 struct ufs_qcom_host *host = ufshcd_get_variant(hba); ··· 2084 2095 */ 2085 2096 nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; 2086 2097 2087 - struct ufs_qcom_irq *qi __free(ufs_qcom_irq) = 2088 - devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL); 2089 - if (!qi) 2090 - return -ENOMEM; 2091 - /* Preset so __free() has a pointer to hba in all error paths */ 2092 - qi[0].hba = hba; 2093 - 2094 2098 ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs, 2095 2099 ufs_qcom_write_msi_msg); 2096 2100 if (ret) { 2097 - dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret); 2098 - return ret; 2101 + dev_warn(hba->dev, "Platform MSI not supported or failed, continuing without ESI\n"); 2102 + return ret; /* Continue without ESI */ 2103 + } 2104 + 2105 + struct ufs_qcom_irq *qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL); 2106 + 2107 + if (!qi) { 2108 + platform_device_msi_free_irqs_all(hba->dev); 2109 + return -ENOMEM; 2099 2110 } 2100 2111 2101 2112 for (int idx = 0; idx < nr_irqs; idx++) { ··· 2106 2117 ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler, 2107 2118 IRQF_SHARED, "qcom-mcq-esi", qi + idx); 2108 2119 if (ret) { 2109 - dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n", 2120 + dev_err(hba->dev, "%s: Failed to request IRQ for %d, err = %d\n", 2110 2121 __func__, qi[idx].irq, ret); 2111 - qi[idx].irq = 0; 2122 + /* Free previously allocated IRQs */ 2123 + for (int j = 0; j < idx; j++) 2124 + devm_free_irq(hba->dev, qi[j].irq, qi + j); 2125 + platform_device_msi_free_irqs_all(hba->dev); 2126 + devm_kfree(hba->dev, qi); 2112 2127 return ret; 2113 2128 } 2114 2129 } 2115 - 2116 - retain_and_null_ptr(qi); 2117 2130 2118 2131 if (host->hw_ver.major >= 6) { 2119 2132 ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
+1
drivers/ufs/host/ufshcd-pci.c
··· 630 630 { PCI_VDEVICE(INTEL, 0xA847), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, 631 631 { PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, 632 632 { PCI_VDEVICE(INTEL, 0xE447), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, 633 + { PCI_VDEVICE(INTEL, 0x4D47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, 633 634 { } /* terminate list */ 634 635 }; 635 636