Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '6.6/scsi-staging' into 6.6/scsi-fixes

Pull in staged fixes for 6.6.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

+63 -55
+1 -1
drivers/scsi/megaraid/megaraid_sas.h
··· 2332 2332 u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */ 2333 2333 bool use_seqnum_jbod_fp; /* Added for PD sequence */ 2334 2334 bool smp_affinity_enable; 2335 - spinlock_t crashdump_lock; 2335 + struct mutex crashdump_lock; 2336 2336 2337 2337 struct megasas_register_set __iomem *reg_set; 2338 2338 u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
+9 -12
drivers/scsi/megaraid/megaraid_sas_base.c
··· 3271 3271 struct megasas_instance *instance = 3272 3272 (struct megasas_instance *) shost->hostdata; 3273 3273 int val = 0; 3274 - unsigned long flags; 3275 3274 3276 3275 if (kstrtoint(buf, 0, &val) != 0) 3277 3276 return -EINVAL; 3278 3277 3279 - spin_lock_irqsave(&instance->crashdump_lock, flags); 3278 + mutex_lock(&instance->crashdump_lock); 3280 3279 instance->fw_crash_buffer_offset = val; 3281 - spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3280 + mutex_unlock(&instance->crashdump_lock); 3282 3281 return strlen(buf); 3283 3282 } 3284 3283 ··· 3292 3293 unsigned long dmachunk = CRASH_DMA_BUF_SIZE; 3293 3294 unsigned long chunk_left_bytes; 3294 3295 unsigned long src_addr; 3295 - unsigned long flags; 3296 3296 u32 buff_offset; 3297 3297 3298 - spin_lock_irqsave(&instance->crashdump_lock, flags); 3298 + mutex_lock(&instance->crashdump_lock); 3299 3299 buff_offset = instance->fw_crash_buffer_offset; 3300 3300 if (!instance->crash_dump_buf || 3301 3301 !((instance->fw_crash_state == AVAILABLE) || 3302 3302 (instance->fw_crash_state == COPYING))) { 3303 3303 dev_err(&instance->pdev->dev, 3304 3304 "Firmware crash dump is not available\n"); 3305 - spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3305 + mutex_unlock(&instance->crashdump_lock); 3306 3306 return -EINVAL; 3307 3307 } 3308 3308 3309 3309 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { 3310 3310 dev_err(&instance->pdev->dev, 3311 3311 "Firmware crash dump offset is out of range\n"); 3312 - spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3312 + mutex_unlock(&instance->crashdump_lock); 3313 3313 return 0; 3314 3314 } 3315 3315 ··· 3320 3322 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 3321 3323 (buff_offset % dmachunk); 3322 3324 memcpy(buf, (void *)src_addr, size); 3323 - spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3325 + mutex_unlock(&instance->crashdump_lock); 3324 3326 3325 3327 return size; 3326 3328 } ··· 3345 3347 struct megasas_instance *instance = 3346 3348 (struct megasas_instance *) shost->hostdata; 3347 3349 int val = 0; 3348 - unsigned long flags; 3349 3350 3350 3351 if (kstrtoint(buf, 0, &val) != 0) 3351 3352 return -EINVAL; ··· 3358 3361 instance->fw_crash_state = val; 3359 3362 3360 3363 if ((val == COPIED) || (val == COPY_ERROR)) { 3361 - spin_lock_irqsave(&instance->crashdump_lock, flags); 3364 + mutex_lock(&instance->crashdump_lock); 3362 3365 megasas_free_host_crash_buffer(instance); 3363 - spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3366 + mutex_unlock(&instance->crashdump_lock); 3364 3367 if (val == COPY_ERROR) 3365 3368 dev_info(&instance->pdev->dev, "application failed to " 3366 3369 "copy Firmware crash dump\n"); ··· 7419 7422 init_waitqueue_head(&instance->int_cmd_wait_q); 7420 7423 init_waitqueue_head(&instance->abort_cmd_wait_q); 7421 7424 7422 - spin_lock_init(&instance->crashdump_lock); 7425 + mutex_init(&instance->crashdump_lock); 7423 7426 spin_lock_init(&instance->mfi_pool_lock); 7424 7427 spin_lock_init(&instance->hba_lock); 7425 7428 spin_lock_init(&instance->stream_lock);
+2 -2
drivers/scsi/ppa.c
··· 307 307 case PPA_EPP_8: 308 308 epp_reset(ppb); 309 309 w_ctr(ppb, 0x4); 310 - if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x01)) 310 + if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x03)) 311 311 outsl(ppb + 4, buffer, len >> 2); 312 - else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x03)) 312 + else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x01)) 313 313 outsw(ppb + 4, buffer, len >> 1); 314 314 else 315 315 outsb(ppb + 4, buffer, len);
+8 -2
drivers/scsi/qedf/qedf_io.c
··· 1904 1904 goto drop_rdata_kref; 1905 1905 } 1906 1906 1907 + spin_lock_irqsave(&fcport->rport_lock, flags); 1907 1908 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || 1908 1909 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) || 1909 1910 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) { ··· 1912 1911 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n", 1913 1912 io_req->xid, io_req->sc_cmd); 1914 1913 rc = 1; 1914 + spin_unlock_irqrestore(&fcport->rport_lock, flags); 1915 1915 goto drop_rdata_kref; 1916 1916 } 1917 + 1918 + /* Set the command type to abort */ 1919 + io_req->cmd_type = QEDF_ABTS; 1920 + spin_unlock_irqrestore(&fcport->rport_lock, flags); 1917 1921 1918 1922 kref_get(&io_req->refcount); 1919 1923 ··· 1926 1920 qedf->control_requests++; 1927 1921 qedf->packet_aborts++; 1928 1922 1929 - /* Set the command type to abort */ 1930 - io_req->cmd_type = QEDF_ABTS; 1931 1923 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; 1932 1924 1933 1925 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags); ··· 2214 2210 refcount, fcport, fcport->rdata->ids.port_id); 2215 2211 2216 2212 /* Cleanup cmds re-use the same TID as the original I/O */ 2213 + spin_lock_irqsave(&fcport->rport_lock, flags); 2217 2214 io_req->cmd_type = QEDF_CLEANUP; 2215 + spin_unlock_irqrestore(&fcport->rport_lock, flags); 2218 2216 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; 2219 2217 2220 2218 init_completion(&io_req->cleanup_done);
+6 -1
drivers/scsi/qedf/qedf_main.c
··· 2805 2805 struct qedf_ioreq *io_req; 2806 2806 struct qedf_rport *fcport; 2807 2807 u32 comp_type; 2808 + u8 io_comp_type; 2809 + unsigned long flags; 2808 2810 2809 2811 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & 2810 2812 FCOE_CQE_CQE_TYPE_MASK; ··· 2840 2838 return; 2841 2839 } 2842 2840 2841 + spin_lock_irqsave(&fcport->rport_lock, flags); 2842 + io_comp_type = io_req->cmd_type; 2843 + spin_unlock_irqrestore(&fcport->rport_lock, flags); 2843 2844 2844 2845 switch (comp_type) { 2845 2846 case FCOE_GOOD_COMPLETION_CQE_TYPE: 2846 2847 atomic_inc(&fcport->free_sqes); 2847 - switch (io_req->cmd_type) { 2848 + switch (io_comp_type) { 2848 2849 case QEDF_SCSI_CMD: 2849 2850 qedf_scsi_completion(qedf, cqe, io_req); 2850 2851 break;
+3 -3
drivers/scsi/qla2xxx/qla_dfs.c
··· 116 116 117 117 sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name)); 118 118 fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root); 119 - if (!fp->dfs_rport_dir) 119 + if (IS_ERR(fp->dfs_rport_dir)) 120 120 return; 121 121 if (NVME_TARGET(vha->hw, fp)) 122 122 debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir, ··· 708 708 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) { 709 709 ha->tgt.dfs_naqp = debugfs_create_file("naqp", 710 710 0400, ha->dfs_dir, vha, &dfs_naqp_ops); 711 - if (!ha->tgt.dfs_naqp) { 711 + if (IS_ERR(ha->tgt.dfs_naqp)) { 712 712 ql_log(ql_log_warn, vha, 0xd011, 713 713 "Unable to create debugFS naqp node.\n"); 714 714 goto out; 715 715 } 716 716 } 717 717 vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir); 718 - if (!vha->dfs_rport_root) { 718 + if (IS_ERR(vha->dfs_rport_root)) { 719 719 ql_log(ql_log_warn, vha, 0xd012, 720 720 "Unable to create debugFS rports node.\n"); 721 721 goto out;
+1 -1
drivers/scsi/qla2xxx/qla_inline.h
··· 577 577 static inline struct qla_qpair * 578 578 qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair) 579 579 { 580 - int cpuid = smp_processor_id(); 580 + int cpuid = raw_smp_processor_id(); 581 581 582 582 if (qpair->cpuid != cpuid && 583 583 ha->qp_cpu_map[cpuid]) {
+3 -3
drivers/scsi/qla2xxx/qla_isr.c
··· 3965 3965 if (!ha->flags.fw_started) 3966 3966 return; 3967 3967 3968 - if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) { 3968 + if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) { 3969 3969 rsp->qpair->rcv_intr = 1; 3970 3970 3971 3971 if (!rsp->qpair->cpu_mapped) ··· 4468 4468 } 4469 4469 ha = qpair->hw; 4470 4470 4471 - queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work); 4471 + queue_work(ha->wq, &qpair->q_work); 4472 4472 4473 4473 return IRQ_HANDLED; 4474 4474 } ··· 4494 4494 wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT); 4495 4495 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4496 4496 4497 - queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work); 4497 + queue_work(ha->wq, &qpair->q_work); 4498 4498 4499 4499 return IRQ_HANDLED; 4500 4500 }
+5 -5
drivers/scsi/qla2xxx/qla_nvme.c
··· 399 399 nvme->u.nvme.dl = 0; 400 400 nvme->u.nvme.timeout_sec = 0; 401 401 nvme->u.nvme.cmd_dma = fd_resp->rspdma; 402 - nvme->u.nvme.cmd_len = fd_resp->rsplen; 402 + nvme->u.nvme.cmd_len = cpu_to_le32(fd_resp->rsplen); 403 403 nvme->u.nvme.rsp_len = 0; 404 404 nvme->u.nvme.rsp_dma = 0; 405 405 nvme->u.nvme.exchange_address = uctx->exchange_address; 406 406 nvme->u.nvme.nport_handle = uctx->nport_handle; 407 407 nvme->u.nvme.ox_id = uctx->ox_id; 408 408 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, 409 - le32_to_cpu(fd_resp->rsplen), DMA_TO_DEVICE); 409 + fd_resp->rsplen, DMA_TO_DEVICE); 410 410 411 411 ql_dbg(ql_dbg_unsol, vha, 0x2122, 412 412 "Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n", ··· 504 504 nvme->u.nvme.desc = fd; 505 505 nvme->u.nvme.dir = 0; 506 506 nvme->u.nvme.dl = 0; 507 - nvme->u.nvme.cmd_len = fd->rqstlen; 508 - nvme->u.nvme.rsp_len = fd->rsplen; 507 + nvme->u.nvme.cmd_len = cpu_to_le32(fd->rqstlen); 508 + nvme->u.nvme.rsp_len = cpu_to_le32(fd->rsplen); 509 509 nvme->u.nvme.rsp_dma = fd->rspdma; 510 510 nvme->u.nvme.timeout_sec = fd->timeout; 511 511 nvme->u.nvme.cmd_dma = fd->rqstdma; 512 512 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, 513 - le32_to_cpu(fd->rqstlen), DMA_TO_DEVICE); 513 + fd->rqstlen, DMA_TO_DEVICE); 514 514 515 515 rval = qla2x00_start_sp(sp); 516 516 if (rval != QLA_SUCCESS) {
+1 -2
drivers/scsi/qla2xxx/qla_target.c
··· 4425 4425 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); 4426 4426 } else if (ha->msix_count) { 4427 4427 if (cmd->atio.u.isp24.fcp_cmnd.rddata) 4428 - queue_work_on(smp_processor_id(), qla_tgt_wq, 4429 - &cmd->work); 4428 + queue_work(qla_tgt_wq, &cmd->work); 4430 4429 else 4431 4430 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, 4432 4431 &cmd->work);
+2 -2
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 310 310 cmd->trc_flags |= TRC_CMD_DONE; 311 311 312 312 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); 313 - queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); 313 + queue_work(tcm_qla2xxx_free_wq, &cmd->work); 314 314 } 315 315 316 316 /* ··· 547 547 cmd->trc_flags |= TRC_DATA_IN; 548 548 cmd->cmd_in_wq = 1; 549 549 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); 550 - queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); 550 + queue_work(tcm_qla2xxx_free_wq, &cmd->work); 551 551 } 552 552 553 553 static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
+12 -12
drivers/target/target_core_configfs.c
··· 1392 1392 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ 1393 1393 unsigned char buf[INQUIRY_VENDOR_LEN + 2]; 1394 1394 char *stripped = NULL; 1395 - size_t len; 1395 + ssize_t len; 1396 1396 ssize_t ret; 1397 1397 1398 - len = strlcpy(buf, page, sizeof(buf)); 1399 - if (len < sizeof(buf)) { 1398 + len = strscpy(buf, page, sizeof(buf)); 1399 + if (len > 0) { 1400 1400 /* Strip any newline added from userspace. */ 1401 1401 stripped = strstrip(buf); 1402 1402 len = strlen(stripped); 1403 1403 } 1404 - if (len > INQUIRY_VENDOR_LEN) { 1404 + if (len < 0 || len > INQUIRY_VENDOR_LEN) { 1405 1405 pr_err("Emulated T10 Vendor Identification exceeds" 1406 1406 " INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN) 1407 1407 "\n"); ··· 1448 1448 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ 1449 1449 unsigned char buf[INQUIRY_MODEL_LEN + 2]; 1450 1450 char *stripped = NULL; 1451 - size_t len; 1451 + ssize_t len; 1452 1452 ssize_t ret; 1453 1453 1454 - len = strlcpy(buf, page, sizeof(buf)); 1455 - if (len < sizeof(buf)) { 1454 + len = strscpy(buf, page, sizeof(buf)); 1455 + if (len > 0) { 1456 1456 /* Strip any newline added from userspace. */ 1457 1457 stripped = strstrip(buf); 1458 1458 len = strlen(stripped); 1459 1459 } 1460 - if (len > INQUIRY_MODEL_LEN) { 1460 + if (len < 0 || len > INQUIRY_MODEL_LEN) { 1461 1461 pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: " 1462 1462 __stringify(INQUIRY_MODEL_LEN) 1463 1463 "\n"); ··· 1504 1504 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ 1505 1505 unsigned char buf[INQUIRY_REVISION_LEN + 2]; 1506 1506 char *stripped = NULL; 1507 - size_t len; 1507 + ssize_t len; 1508 1508 ssize_t ret; 1509 1509 1510 - len = strlcpy(buf, page, sizeof(buf)); 1511 - if (len < sizeof(buf)) { 1510 + len = strscpy(buf, page, sizeof(buf)); 1511 + if (len > 0) { 1512 1512 /* Strip any newline added from userspace. */ 1513 1513 stripped = strstrip(buf); 1514 1514 len = strlen(stripped); 1515 1515 } 1516 - if (len > INQUIRY_REVISION_LEN) { 1516 + if (len < 0 || len > INQUIRY_REVISION_LEN) { 1517 1517 pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: " 1518 1518 __stringify(INQUIRY_REVISION_LEN) 1519 1519 "\n");
+7 -6
drivers/ufs/core/ufshcd.c
··· 22 22 #include <linux/module.h> 23 23 #include <linux/regulator/consumer.h> 24 24 #include <linux/sched/clock.h> 25 + #include <linux/iopoll.h> 25 26 #include <scsi/scsi_cmnd.h> 26 27 #include <scsi/scsi_dbg.h> 27 28 #include <scsi/scsi_driver.h> ··· 2300 2299 */ 2301 2300 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) 2302 2301 { 2303 - return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY; 2302 + u32 val; 2303 + int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY, 2304 + 500, UIC_CMD_TIMEOUT * 1000, false, hba, 2305 + REG_CONTROLLER_STATUS); 2306 + return ret == 0 ? true : false; 2304 2307 } 2305 2308 2306 2309 /** ··· 2397 2392 bool completion) 2398 2393 { 2399 2394 lockdep_assert_held(&hba->uic_cmd_mutex); 2400 - lockdep_assert_held(hba->host->host_lock); 2401 2395 2402 2396 if (!ufshcd_ready_for_uic_cmd(hba)) { 2403 2397 dev_err(hba->dev, ··· 2423 2419 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) 2424 2420 { 2425 2421 int ret; 2426 - unsigned long flags; 2427 2422 2428 2423 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) 2429 2424 return 0; ··· 2431 2428 mutex_lock(&hba->uic_cmd_mutex); 2432 2429 ufshcd_add_delay_before_dme_cmd(hba); 2433 2430 2434 - spin_lock_irqsave(hba->host->host_lock, flags); 2435 2431 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); 2436 - spin_unlock_irqrestore(hba->host->host_lock, flags); 2437 2432 if (!ret) 2438 2433 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); 2439 2434 ··· 4134 4133 wmb(); 4135 4134 reenable_intr = true; 4136 4135 } 4137 - ret = __ufshcd_send_uic_cmd(hba, cmd, false); 4138 4136 spin_unlock_irqrestore(hba->host->host_lock, flags); 4137 + ret = __ufshcd_send_uic_cmd(hba, cmd, false); 4139 4138 if (ret) { 4140 4139 dev_err(hba->dev, 4141 4140 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+3 -3
include/linux/nvme-fc-driver.h
··· 53 53 struct nvmefc_ls_req { 54 54 void *rqstaddr; 55 55 dma_addr_t rqstdma; 56 - __le32 rqstlen; 56 + u32 rqstlen; 57 57 void *rspaddr; 58 58 dma_addr_t rspdma; 59 - __le32 rsplen; 59 + u32 rsplen; 60 60 u32 timeout; 61 61 62 62 void *private; ··· 120 120 struct nvmefc_ls_rsp { 121 121 void *rspbuf; 122 122 dma_addr_t rspdma; 123 - __le32 rsplen; 123 + u16 rsplen; 124 124 125 125 void (*done)(struct nvmefc_ls_rsp *rsp); 126 126 void *nvme_fc_private; /* LLDD is not to access !! */