Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull more SCSI updates from James Bottomley:
"This series is all the stragglers that didn't quite make the first
merge window pull. It's mostly minor updates and bug fixes of merge
window code"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
scsi: nsp_cs: Check of ioremap return value
scsi: ufs: ufs-mediatek: Fix error checking in ufs_mtk_init_va09_pwr_ctrl()
scsi: ufs: Modify Tactive time setting conditions
scsi: efct: Remove useless DMA-32 fallback configuration
scsi: message: fusion: mptctl: Use dma_alloc_coherent()
scsi: message: fusion: mptsas: Use dma_alloc_coherent()
scsi: message: fusion: Use dma_alloc_coherent() in mptsas_exp_repmanufacture_info()
scsi: message: fusion: mptbase: Use dma_alloc_coherent()
scsi: message: fusion: Use dma_alloc_coherent() in mpt_alloc_fw_memory()
scsi: message: fusion: Remove usage of the deprecated "pci-dma-compat.h" API
scsi: megaraid: Avoid mismatched storage type sizes
scsi: hisi_sas: Remove unused variable and check in hisi_sas_send_ata_reset_each_phy()
scsi: aic79xx: Remove redundant error variable
scsi: pm80xx: Port reset timeout error handling correction
scsi: mpi3mr: Fix formatting problems in some kernel-doc comments
scsi: mpi3mr: Fix some spelling mistakes
scsi: mpt3sas: Update persistent trigger pages from sysfs interface
scsi: core: Fix scsi_mode_select() interface
scsi: aacraid: Fix spelling of "its"
scsi: qedf: Fix potential dereference of NULL pointer

+386 -280
+84 -65
drivers/message/fusion/mptbase.c
··· 300 if (!hdr.ExtPageLength) 301 goto out; 302 303 - buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 304 - &dma_handle); 305 if (!buffer) 306 goto out; 307 ··· 316 rc = 1; 317 318 out_free_consistent: 319 - pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 320 - buffer, dma_handle); 321 out: 322 return rc; 323 } ··· 1661 const uint64_t required_mask = dma_get_required_mask 1662 (&pdev->dev); 1663 if (required_mask > DMA_BIT_MASK(32) 1664 - && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 1665 - && !pci_set_consistent_dma_mask(pdev, 1666 - DMA_BIT_MASK(64))) { 1667 ioc->dma_mask = DMA_BIT_MASK(64); 1668 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1669 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", 1670 ioc->name)); 1671 - } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 1672 - && !pci_set_consistent_dma_mask(pdev, 1673 - DMA_BIT_MASK(32))) { 1674 ioc->dma_mask = DMA_BIT_MASK(32); 1675 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1676 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", ··· 1679 goto out_pci_release_region; 1680 } 1681 } else { 1682 - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 1683 - && !pci_set_consistent_dma_mask(pdev, 1684 - DMA_BIT_MASK(32))) { 1685 ioc->dma_mask = DMA_BIT_MASK(32); 1686 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1687 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", ··· 2766 2767 if (ioc->spi_data.pIocPg4 != NULL) { 2768 sz = ioc->spi_data.IocPg4Sz; 2769 - pci_free_consistent(ioc->pcidev, sz, 2770 - ioc->spi_data.pIocPg4, 2771 - ioc->spi_data.IocPg4_dma); 2772 ioc->spi_data.pIocPg4 = NULL; 2773 ioc->alloc_total -= sz; 2774 } ··· 3512 rc = 0; 3513 goto out; 3514 } 3515 - ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma); 3516 if (!ioc->cached_fw) { 3517 printk(MYIOC_s_ERR_FMT "Unable to allocate memory for the cached firmware image!\n", 3518 ioc->name); ··· 3546 sz = ioc->facts.FWImageSize; 3547 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n", 3548 ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz)); 3549 - pci_free_consistent(ioc->pcidev, sz, ioc->cached_fw, ioc->cached_fw_dma); 3550 ioc->alloc_total -= sz; 3551 ioc->cached_fw = NULL; 3552 } ··· 4446 */ 4447 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 && 4448 ioc->dma_mask > DMA_BIT_MASK(35)) { 4449 - if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32)) 4450 - && !pci_set_consistent_dma_mask(ioc->pcidev, 4451 - DMA_BIT_MASK(32))) { 4452 dma_mask = DMA_BIT_MASK(35); 4453 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT 4454 "setting 35 bit addressing for " ··· 4455 ioc->name)); 4456 } else { 4457 /*Reseting DMA mask to 64 bit*/ 4458 - pci_set_dma_mask(ioc->pcidev, 4459 - DMA_BIT_MASK(64)); 4460 - pci_set_consistent_dma_mask(ioc->pcidev, 4461 - DMA_BIT_MASK(64)); 4462 4463 printk(MYIOC_s_ERR_FMT 4464 "failed setting 35 bit addressing for " ··· 4593 alloc_dma += ioc->reply_sz; 4594 } 4595 4596 - if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev, 4597 - ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev, 4598 ioc->dma_mask)) 4599 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT 4600 "restoring 64 bit addressing\n", ioc->name)); ··· 4618 ioc->sense_buf_pool = NULL; 4619 } 4620 4621 - if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev, 4622 - DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev, 4623 DMA_BIT_MASK(64))) 4624 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT 4625 "restoring 64 bit addressing\n", ioc->name)); ··· 4966 4967 if (hdr.PageLength > 0) { 4968 data_sz = hdr.PageLength * 4; 4969 - ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma); 4970 rc = -ENOMEM; 4971 if (ppage0_alloc) { 4972 memset((u8 *)ppage0_alloc, 0, data_sz); ··· 4981 4982 } 4983 4984 - pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma); 4985 4986 /* FIXME! 4987 * Normalize endianness of structure data, ··· 5014 5015 data_sz = hdr.PageLength * 4; 5016 rc = -ENOMEM; 5017 - ppage1_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma); 5018 if (ppage1_alloc) { 5019 memset((u8 *)ppage1_alloc, 0, data_sz); 5020 cfg.physAddr = page1_dma; ··· 5027 memcpy(&ioc->lan_cnfg_page1, ppage1_alloc, copy_sz); 5028 } 5029 5030 - pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage1_alloc, page1_dma); 5031 5032 /* FIXME! 5033 * Normalize endianness of structure data, ··· 5317 /* Read the config page */ 5318 data_sz = hdr.PageLength * 4; 5319 rc = -ENOMEM; 5320 - ppage_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma); 5321 if (ppage_alloc) { 5322 memset((u8 *)ppage_alloc, 0, data_sz); 5323 cfg.physAddr = page_dma; ··· 5328 if ((rc = mpt_config(ioc, &cfg)) == 0) 5329 ioc->biosVersion = le32_to_cpu(ppage_alloc->BiosVersion); 5330 5331 - pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage_alloc, page_dma); 5332 } 5333 5334 return rc; ··· 5404 return -EFAULT; 5405 5406 if (header.PageLength > 0) { 5407 - pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma); 5408 if (pbuf) { 5409 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 5410 cfg.physAddr = buf_dma; ··· 5462 } 5463 } 5464 if (pbuf) { 5465 - pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma); 5466 } 5467 } 5468 } ··· 5486 if (header.PageLength > 0) { 5487 /* Allocate memory and read SCSI Port Page 2 5488 */ 5489 - pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma); 5490 if (pbuf) { 5491 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_NVRAM; 5492 cfg.physAddr = buf_dma; ··· 5553 } 5554 } 5555 5556 - pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma); 5557 } 5558 } 5559 ··· 5671 if (!hdr.PageLength) 5672 goto out; 5673 5674 - buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, 5675 - &dma_handle); 5676 5677 if (!buffer) 5678 goto out; ··· 5719 5720 out: 5721 if (buffer) 5722 - pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, 5723 - dma_handle); 5724 } 5725 5726 /** ··· 5764 goto out; 5765 } 5766 5767 - buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, 5768 - &dma_handle); 5769 5770 if (!buffer) { 5771 rc = -ENOMEM; ··· 5788 out: 5789 5790 if (buffer) 5791 - pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, 5792 - dma_handle); 5793 5794 return rc; 5795 } ··· 5831 goto out; 5832 } 5833 5834 - buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, 5835 - &dma_handle); 5836 5837 if (!buffer) { 5838 rc = 0; ··· 5852 out: 5853 5854 if (buffer) 5855 - pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, 5856 - dma_handle); 5857 5858 return rc; 5859 } ··· 5903 goto out; 5904 } 5905 5906 - buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, 5907 - &dma_handle); 5908 5909 if (!buffer) { 5910 rc = -ENOMEM; ··· 5941 out: 5942 5943 if (buffer) 5944 - pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, 5945 - dma_handle); 5946 5947 return rc; 5948 } ··· 5998 return -EFAULT; 5999 6000 iocpage2sz = header.PageLength * 4; 6001 - pIoc2 = pci_alloc_consistent(ioc->pcidev, iocpage2sz, &ioc2_dma); 6002 if (!pIoc2) 6003 return -ENOMEM; 6004 ··· 6024 pIoc2->RaidVolume[i].VolumeID); 6025 6026 out: 6027 - pci_free_consistent(ioc->pcidev, iocpage2sz, pIoc2, ioc2_dma); 6028 6029 return rc; 6030 } ··· 6066 /* Read Header good, alloc memory 6067 */ 6068 iocpage3sz = header.PageLength * 4; 6069 - pIoc3 = pci_alloc_consistent(ioc->pcidev, iocpage3sz, &ioc3_dma); 6070 if (!pIoc3) 6071 return 0; 6072 ··· 6084 } 6085 } 6086 6087 - pci_free_consistent(ioc->pcidev, iocpage3sz, pIoc3, ioc3_dma); 6088 6089 return 0; 6090 } ··· 6118 6119 if ( (pIoc4 = ioc->spi_data.pIocPg4) == NULL ) { 6120 iocpage4sz = (header.PageLength + 4) * 4; /* Allow 4 additional SEP's */ 6121 - pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma); 6122 if (!pIoc4) 6123 return; 6124 ioc->alloc_total += iocpage4sz; ··· 6137 ioc->spi_data.IocPg4_dma = ioc4_dma; 6138 ioc->spi_data.IocPg4Sz = iocpage4sz; 6139 } else { 6140 - pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma); 6141 ioc->spi_data.pIocPg4 = NULL; 6142 ioc->alloc_total -= iocpage4sz; 6143 } ··· 6175 /* Read Header good, alloc memory 6176 */ 6177 iocpage1sz = header.PageLength * 4; 6178 - pIoc1 = pci_alloc_consistent(ioc->pcidev, iocpage1sz, &ioc1_dma); 6179 if (!pIoc1) 6180 return; 6181 ··· 6227 } 6228 } 6229 6230 - pci_free_consistent(ioc->pcidev, iocpage1sz, pIoc1, ioc1_dma); 6231 6232 return; 6233 } ··· 6256 goto out; 6257 6258 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 6259 - pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma); 6260 if (!pbuf) 6261 goto out; 6262 ··· 6273 out: 6274 6275 if (pbuf) 6276 - pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma); 6277 } 6278 6279 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
··· 300 if (!hdr.ExtPageLength) 301 goto out; 302 303 + buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, 304 + &dma_handle, GFP_KERNEL); 305 if (!buffer) 306 goto out; 307 ··· 316 rc = 1; 317 318 out_free_consistent: 319 + dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer, 320 + dma_handle); 321 out: 322 return rc; 323 } ··· 1661 const uint64_t required_mask = dma_get_required_mask 1662 (&pdev->dev); 1663 if (required_mask > DMA_BIT_MASK(32) 1664 + && !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) 1665 + && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 1666 ioc->dma_mask = DMA_BIT_MASK(64); 1667 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1668 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", 1669 ioc->name)); 1670 + } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) 1671 + && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { 1672 ioc->dma_mask = DMA_BIT_MASK(32); 1673 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1674 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", ··· 1681 goto out_pci_release_region; 1682 } 1683 } else { 1684 + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) 1685 + && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { 1686 ioc->dma_mask = DMA_BIT_MASK(32); 1687 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1688 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", ··· 2769 2770 if (ioc->spi_data.pIocPg4 != NULL) { 2771 sz = ioc->spi_data.IocPg4Sz; 2772 + dma_free_coherent(&ioc->pcidev->dev, sz, 2773 + ioc->spi_data.pIocPg4, 2774 + ioc->spi_data.IocPg4_dma); 2775 ioc->spi_data.pIocPg4 = NULL; 2776 ioc->alloc_total -= sz; 2777 } ··· 3515 rc = 0; 3516 goto out; 3517 } 3518 + ioc->cached_fw = dma_alloc_coherent(&ioc->pcidev->dev, size, 3519 + &ioc->cached_fw_dma, GFP_ATOMIC); 3520 if (!ioc->cached_fw) { 3521 printk(MYIOC_s_ERR_FMT "Unable to allocate memory for the cached firmware image!\n", 3522 ioc->name); ··· 3548 sz = ioc->facts.FWImageSize; 3549 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n", 3550 ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz)); 3551 + dma_free_coherent(&ioc->pcidev->dev, sz, ioc->cached_fw, 3552 + ioc->cached_fw_dma); 3553 ioc->alloc_total -= sz; 3554 ioc->cached_fw = NULL; 3555 } ··· 4447 */ 4448 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 && 4449 ioc->dma_mask > DMA_BIT_MASK(35)) { 4450 + if (!dma_set_mask(&ioc->pcidev->dev, DMA_BIT_MASK(32)) 4451 + && !dma_set_coherent_mask(&ioc->pcidev->dev, DMA_BIT_MASK(32))) { 4452 dma_mask = DMA_BIT_MASK(35); 4453 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT 4454 "setting 35 bit addressing for " ··· 4457 ioc->name)); 4458 } else { 4459 /*Reseting DMA mask to 64 bit*/ 4460 + dma_set_mask(&ioc->pcidev->dev, 4461 + DMA_BIT_MASK(64)); 4462 + dma_set_coherent_mask(&ioc->pcidev->dev, 4463 + DMA_BIT_MASK(64)); 4464 4465 printk(MYIOC_s_ERR_FMT 4466 "failed setting 35 bit addressing for " ··· 4595 alloc_dma += ioc->reply_sz; 4596 } 4597 4598 + if (dma_mask == DMA_BIT_MASK(35) && !dma_set_mask(&ioc->pcidev->dev, 4599 + ioc->dma_mask) && !dma_set_coherent_mask(&ioc->pcidev->dev, 4600 ioc->dma_mask)) 4601 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT 4602 "restoring 64 bit addressing\n", ioc->name)); ··· 4620 ioc->sense_buf_pool = NULL; 4621 } 4622 4623 + if (dma_mask == DMA_BIT_MASK(35) && !dma_set_mask(&ioc->pcidev->dev, 4624 + DMA_BIT_MASK(64)) && !dma_set_coherent_mask(&ioc->pcidev->dev, 4625 DMA_BIT_MASK(64))) 4626 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT 4627 "restoring 64 bit addressing\n", ioc->name)); ··· 4968 4969 if (hdr.PageLength > 0) { 4970 data_sz = hdr.PageLength * 4; 4971 + ppage0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz, 4972 + &page0_dma, GFP_KERNEL); 4973 rc = -ENOMEM; 4974 if (ppage0_alloc) { 4975 memset((u8 *)ppage0_alloc, 0, data_sz); ··· 4982 4983 } 4984 4985 + dma_free_coherent(&ioc->pcidev->dev, data_sz, 4986 + (u8 *)ppage0_alloc, page0_dma); 4987 4988 /* FIXME! 4989 * Normalize endianness of structure data, ··· 5014 5015 data_sz = hdr.PageLength * 4; 5016 rc = -ENOMEM; 5017 + ppage1_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz, 5018 + &page1_dma, GFP_KERNEL); 5019 if (ppage1_alloc) { 5020 memset((u8 *)ppage1_alloc, 0, data_sz); 5021 cfg.physAddr = page1_dma; ··· 5026 memcpy(&ioc->lan_cnfg_page1, ppage1_alloc, copy_sz); 5027 } 5028 5029 + dma_free_coherent(&ioc->pcidev->dev, data_sz, 5030 + (u8 *)ppage1_alloc, page1_dma); 5031 5032 /* FIXME! 5033 * Normalize endianness of structure data, ··· 5315 /* Read the config page */ 5316 data_sz = hdr.PageLength * 4; 5317 rc = -ENOMEM; 5318 + ppage_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz, 5319 + &page_dma, GFP_KERNEL); 5320 if (ppage_alloc) { 5321 memset((u8 *)ppage_alloc, 0, data_sz); 5322 cfg.physAddr = page_dma; ··· 5325 if ((rc = mpt_config(ioc, &cfg)) == 0) 5326 ioc->biosVersion = le32_to_cpu(ppage_alloc->BiosVersion); 5327 5328 + dma_free_coherent(&ioc->pcidev->dev, data_sz, 5329 + (u8 *)ppage_alloc, page_dma); 5330 } 5331 5332 return rc; ··· 5400 return -EFAULT; 5401 5402 if (header.PageLength > 0) { 5403 + pbuf = dma_alloc_coherent(&ioc->pcidev->dev, 5404 + header.PageLength * 4, &buf_dma, 5405 + GFP_KERNEL); 5406 if (pbuf) { 5407 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 5408 cfg.physAddr = buf_dma; ··· 5456 } 5457 } 5458 if (pbuf) { 5459 + dma_free_coherent(&ioc->pcidev->dev, 5460 + header.PageLength * 4, pbuf, 5461 + buf_dma); 5462 } 5463 } 5464 } ··· 5478 if (header.PageLength > 0) { 5479 /* Allocate memory and read SCSI Port Page 2 5480 */ 5481 + pbuf = dma_alloc_coherent(&ioc->pcidev->dev, 5482 + header.PageLength * 4, &buf_dma, 5483 + GFP_KERNEL); 5484 if (pbuf) { 5485 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_NVRAM; 5486 cfg.physAddr = buf_dma; ··· 5543 } 5544 } 5545 5546 + dma_free_coherent(&ioc->pcidev->dev, 5547 + header.PageLength * 4, pbuf, 5548 + buf_dma); 5549 } 5550 } 5551 ··· 5659 if (!hdr.PageLength) 5660 goto out; 5661 5662 + buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, 5663 + &dma_handle, GFP_KERNEL); 5664 5665 if (!buffer) 5666 goto out; ··· 5707 5708 out: 5709 if (buffer) 5710 + dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, 5711 + buffer, dma_handle); 5712 } 5713 5714 /** ··· 5752 goto out; 5753 } 5754 5755 + buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, 5756 + &dma_handle, GFP_KERNEL); 5757 5758 if (!buffer) { 5759 rc = -ENOMEM; ··· 5776 out: 5777 5778 if (buffer) 5779 + dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, 5780 + buffer, dma_handle); 5781 5782 return rc; 5783 } ··· 5819 goto out; 5820 } 5821 5822 + buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, 5823 + &dma_handle, GFP_KERNEL); 5824 5825 if (!buffer) { 5826 rc = 0; ··· 5840 out: 5841 5842 if (buffer) 5843 + dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, 5844 + buffer, dma_handle); 5845 5846 return rc; 5847 } ··· 5891 goto out; 5892 } 5893 5894 + buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, 5895 + &dma_handle, GFP_KERNEL); 5896 5897 if (!buffer) { 5898 rc = -ENOMEM; ··· 5929 out: 5930 5931 if (buffer) 5932 + dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, 5933 + buffer, dma_handle); 5934 5935 return rc; 5936 } ··· 5986 return -EFAULT; 5987 5988 iocpage2sz = header.PageLength * 4; 5989 + pIoc2 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage2sz, &ioc2_dma, 5990 + GFP_KERNEL); 5991 if (!pIoc2) 5992 return -ENOMEM; 5993 ··· 6011 pIoc2->RaidVolume[i].VolumeID); 6012 6013 out: 6014 + dma_free_coherent(&ioc->pcidev->dev, iocpage2sz, pIoc2, ioc2_dma); 6015 6016 return rc; 6017 } ··· 6053 /* Read Header good, alloc memory 6054 */ 6055 iocpage3sz = header.PageLength * 4; 6056 + pIoc3 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage3sz, &ioc3_dma, 6057 + GFP_KERNEL); 6058 if (!pIoc3) 6059 return 0; 6060 ··· 6070 } 6071 } 6072 6073 + dma_free_coherent(&ioc->pcidev->dev, iocpage3sz, pIoc3, ioc3_dma); 6074 6075 return 0; 6076 } ··· 6104 6105 if ( (pIoc4 = ioc->spi_data.pIocPg4) == NULL ) { 6106 iocpage4sz = (header.PageLength + 4) * 4; /* Allow 4 additional SEP's */ 6107 + pIoc4 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage4sz, 6108 + &ioc4_dma, GFP_KERNEL); 6109 if (!pIoc4) 6110 return; 6111 ioc->alloc_total += iocpage4sz; ··· 6122 ioc->spi_data.IocPg4_dma = ioc4_dma; 6123 ioc->spi_data.IocPg4Sz = iocpage4sz; 6124 } else { 6125 + dma_free_coherent(&ioc->pcidev->dev, iocpage4sz, pIoc4, 6126 + ioc4_dma); 6127 ioc->spi_data.pIocPg4 = NULL; 6128 ioc->alloc_total -= iocpage4sz; 6129 } ··· 6159 /* Read Header good, alloc memory 6160 */ 6161 iocpage1sz = header.PageLength * 4; 6162 + pIoc1 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage1sz, &ioc1_dma, 6163 + GFP_KERNEL); 6164 if (!pIoc1) 6165 return; 6166 ··· 6210 } 6211 } 6212 6213 + dma_free_coherent(&ioc->pcidev->dev, iocpage1sz, pIoc1, ioc1_dma); 6214 6215 return; 6216 } ··· 6239 goto out; 6240 6241 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 6242 + pbuf = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, 6243 + &buf_dma, GFP_KERNEL); 6244 if (!pbuf) 6245 goto out; 6246 ··· 6255 out: 6256 6257 if (pbuf) 6258 + dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, pbuf, 6259 + buf_dma); 6260 } 6261 6262 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+49 -33
drivers/message/fusion/mptctl.c
··· 1041 * copying the data in this array into the correct place in the 1042 * request and chain buffers. 1043 */ 1044 - sglbuf = pci_alloc_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf_dma); 1045 if (sglbuf == NULL) 1046 goto free_and_fail; 1047 1048 if (sgdir & 0x04000000) 1049 - dir = PCI_DMA_TODEVICE; 1050 else 1051 - dir = PCI_DMA_FROMDEVICE; 1052 1053 /* At start: 1054 * sgl = sglbuf = point to beginning of sg buffer ··· 1063 while (bytes_allocd < bytes) { 1064 this_alloc = min(alloc_sz, bytes-bytes_allocd); 1065 buflist[buflist_ent].len = this_alloc; 1066 - buflist[buflist_ent].kptr = pci_alloc_consistent(ioc->pcidev, 1067 - this_alloc, 1068 - &pa); 1069 if (buflist[buflist_ent].kptr == NULL) { 1070 alloc_sz = alloc_sz / 2; 1071 if (alloc_sz == 0) { ··· 1081 1082 bytes_allocd += this_alloc; 1083 sgl->FlagsLength = (0x10000000|sgdir|this_alloc); 1084 - dma_addr = pci_map_single(ioc->pcidev, 1085 - buflist[buflist_ent].kptr, this_alloc, dir); 1086 sgl->Address = dma_addr; 1087 1088 fragcnt++; ··· 1142 kptr = buflist[i].kptr; 1143 len = buflist[i].len; 1144 1145 - pci_free_consistent(ioc->pcidev, len, kptr, dma_addr); 1146 } 1147 - pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf, *sglbuf_dma); 1148 } 1149 kfree(buflist); 1150 return NULL; ··· 1166 int n = 0; 1167 1168 if (sg->FlagsLength & 0x04000000) 1169 - dir = PCI_DMA_TODEVICE; 1170 else 1171 - dir = PCI_DMA_FROMDEVICE; 1172 1173 nib = (sg->FlagsLength & 0xF0000000) >> 28; 1174 while (! (nib & 0x4)) { /* eob */ ··· 1183 dma_addr = sg->Address; 1184 kptr = bl->kptr; 1185 len = bl->len; 1186 - pci_unmap_single(ioc->pcidev, dma_addr, len, dir); 1187 - pci_free_consistent(ioc->pcidev, len, kptr, dma_addr); 1188 n++; 1189 } 1190 sg++; ··· 1203 dma_addr = sg->Address; 1204 kptr = bl->kptr; 1205 len = bl->len; 1206 - pci_unmap_single(ioc->pcidev, dma_addr, len, dir); 1207 - pci_free_consistent(ioc->pcidev, len, kptr, dma_addr); 1208 n++; 1209 } 1210 1211 - pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sgl, sgl_dma); 1212 kfree(buflist); 1213 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: Free'd 1 SGL buf + %d kbufs!\n", 1214 ioc->name, n)); ··· 2106 } 2107 flagsLength |= karg.dataOutSize; 2108 bufOut.len = karg.dataOutSize; 2109 - bufOut.kptr = pci_alloc_consistent( 2110 - ioc->pcidev, bufOut.len, &dma_addr_out); 2111 2112 if (bufOut.kptr == NULL) { 2113 rc = -ENOMEM; ··· 2141 flagsLength |= karg.dataInSize; 2142 2143 bufIn.len = karg.dataInSize; 2144 - bufIn.kptr = pci_alloc_consistent(ioc->pcidev, 2145 - bufIn.len, &dma_addr_in); 2146 2147 if (bufIn.kptr == NULL) { 2148 rc = -ENOMEM; ··· 2291 /* Free the allocated memory. 2292 */ 2293 if (bufOut.kptr != NULL) { 2294 - pci_free_consistent(ioc->pcidev, 2295 - bufOut.len, (void *) bufOut.kptr, dma_addr_out); 2296 } 2297 2298 if (bufIn.kptr != NULL) { 2299 - pci_free_consistent(ioc->pcidev, 2300 - bufIn.len, (void *) bufIn.kptr, dma_addr_in); 2301 } 2302 2303 /* mf is null if command issued successfully ··· 2403 /* Issue the second config page request */ 2404 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 2405 2406 - pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma); 2407 if (pbuf) { 2408 cfg.physAddr = buf_dma; 2409 if (mpt_config(ioc, &cfg) == 0) { ··· 2415 pdata->BoardTracerNumber, 24); 2416 } 2417 } 2418 - pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma); 2419 pbuf = NULL; 2420 } 2421 } ··· 2482 else 2483 IstwiRWRequest->DeviceAddr = 0xB0; 2484 2485 - pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); 2486 if (!pbuf) 2487 goto out; 2488 ioc->add_sge((char *)&IstwiRWRequest->SGL, ··· 2531 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); 2532 2533 if (pbuf) 2534 - pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); 2535 2536 /* Copy the data from kernel memory to user memory 2537 */ ··· 2597 /* Get the data transfer speeds 2598 */ 2599 data_sz = ioc->spi_data.sdp0length * 4; 2600 - pg0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma); 2601 if (pg0_alloc) { 2602 hdr.PageVersion = ioc->spi_data.sdp0version; 2603 hdr.PageLength = data_sz; ··· 2636 karg.negotiated_speed = HP_DEV_SPEED_ASYNC; 2637 } 2638 2639 - pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg0_alloc, page_dma); 2640 } 2641 2642 /* Set defaults ··· 2663 /* Issue the second config page request */ 2664 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 2665 data_sz = (int) cfg.cfghdr.hdr->PageLength * 4; 2666 - pg3_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma); 2667 if (pg3_alloc) { 2668 cfg.physAddr = page_dma; 2669 cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id; ··· 2673 karg.phase_errors = (u32) le16_to_cpu(pg3_alloc->PhaseErrorCount); 2674 karg.parity_errors = (u32) le16_to_cpu(pg3_alloc->ParityErrorCount); 2675 } 2676 - pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg3_alloc, page_dma); 2677 } 2678 } 2679 hd = shost_priv(ioc->sh);
··· 1041 * copying the data in this array into the correct place in the 1042 * request and chain buffers. 1043 */ 1044 + sglbuf = dma_alloc_coherent(&ioc->pcidev->dev, MAX_SGL_BYTES, 1045 + sglbuf_dma, GFP_KERNEL); 1046 if (sglbuf == NULL) 1047 goto free_and_fail; 1048 1049 if (sgdir & 0x04000000) 1050 + dir = DMA_TO_DEVICE; 1051 else 1052 + dir = DMA_FROM_DEVICE; 1053 1054 /* At start: 1055 * sgl = sglbuf = point to beginning of sg buffer ··· 1062 while (bytes_allocd < bytes) { 1063 this_alloc = min(alloc_sz, bytes-bytes_allocd); 1064 buflist[buflist_ent].len = this_alloc; 1065 + buflist[buflist_ent].kptr = dma_alloc_coherent(&ioc->pcidev->dev, 1066 + this_alloc, 1067 + &pa, GFP_KERNEL); 1068 if (buflist[buflist_ent].kptr == NULL) { 1069 alloc_sz = alloc_sz / 2; 1070 if (alloc_sz == 0) { ··· 1080 1081 bytes_allocd += this_alloc; 1082 sgl->FlagsLength = (0x10000000|sgdir|this_alloc); 1083 + dma_addr = dma_map_single(&ioc->pcidev->dev, 1084 + buflist[buflist_ent].kptr, 1085 + this_alloc, dir); 1086 sgl->Address = dma_addr; 1087 1088 fragcnt++; ··· 1140 kptr = buflist[i].kptr; 1141 len = buflist[i].len; 1142 1143 + dma_free_coherent(&ioc->pcidev->dev, len, kptr, 1144 + dma_addr); 1145 } 1146 + dma_free_coherent(&ioc->pcidev->dev, MAX_SGL_BYTES, sglbuf, 1147 + *sglbuf_dma); 1148 } 1149 kfree(buflist); 1150 return NULL; ··· 1162 int n = 0; 1163 1164 if (sg->FlagsLength & 0x04000000) 1165 + dir = DMA_TO_DEVICE; 1166 else 1167 + dir = DMA_FROM_DEVICE; 1168 1169 nib = (sg->FlagsLength & 0xF0000000) >> 28; 1170 while (! (nib & 0x4)) { /* eob */ ··· 1179 dma_addr = sg->Address; 1180 kptr = bl->kptr; 1181 len = bl->len; 1182 + dma_unmap_single(&ioc->pcidev->dev, dma_addr, len, 1183 + dir); 1184 + dma_free_coherent(&ioc->pcidev->dev, len, kptr, 1185 + dma_addr); 1186 n++; 1187 } 1188 sg++; ··· 1197 dma_addr = sg->Address; 1198 kptr = bl->kptr; 1199 len = bl->len; 1200 + dma_unmap_single(&ioc->pcidev->dev, dma_addr, len, dir); 1201 + dma_free_coherent(&ioc->pcidev->dev, len, kptr, dma_addr); 1202 n++; 1203 } 1204 1205 + dma_free_coherent(&ioc->pcidev->dev, MAX_SGL_BYTES, sgl, sgl_dma); 1206 kfree(buflist); 1207 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: Free'd 1 SGL buf + %d kbufs!\n", 1208 ioc->name, n)); ··· 2100 } 2101 flagsLength |= karg.dataOutSize; 2102 bufOut.len = karg.dataOutSize; 2103 + bufOut.kptr = dma_alloc_coherent(&ioc->pcidev->dev, 2104 + bufOut.len, 2105 + &dma_addr_out, GFP_KERNEL); 2106 2107 if (bufOut.kptr == NULL) { 2108 rc = -ENOMEM; ··· 2134 flagsLength |= karg.dataInSize; 2135 2136 bufIn.len = karg.dataInSize; 2137 + bufIn.kptr = dma_alloc_coherent(&ioc->pcidev->dev, 2138 + bufIn.len, 2139 + &dma_addr_in, GFP_KERNEL); 2140 2141 if (bufIn.kptr == NULL) { 2142 rc = -ENOMEM; ··· 2283 /* Free the allocated memory. 2284 */ 2285 if (bufOut.kptr != NULL) { 2286 + dma_free_coherent(&ioc->pcidev->dev, bufOut.len, 2287 + (void *)bufOut.kptr, dma_addr_out); 2288 } 2289 2290 if (bufIn.kptr != NULL) { 2291 + dma_free_coherent(&ioc->pcidev->dev, bufIn.len, 2292 + (void *)bufIn.kptr, dma_addr_in); 2293 } 2294 2295 /* mf is null if command issued successfully ··· 2395 /* Issue the second config page request */ 2396 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 2397 2398 + pbuf = dma_alloc_coherent(&ioc->pcidev->dev, 2399 + hdr.PageLength * 4, 2400 + &buf_dma, GFP_KERNEL); 2401 if (pbuf) { 2402 cfg.physAddr = buf_dma; 2403 if (mpt_config(ioc, &cfg) == 0) { ··· 2405 pdata->BoardTracerNumber, 24); 2406 } 2407 } 2408 + dma_free_coherent(&ioc->pcidev->dev, 2409 + hdr.PageLength * 4, pbuf, 2410 + buf_dma); 2411 pbuf = NULL; 2412 } 2413 } ··· 2470 else 2471 IstwiRWRequest->DeviceAddr = 0xB0; 2472 2473 + pbuf = dma_alloc_coherent(&ioc->pcidev->dev, 4, &buf_dma, GFP_KERNEL); 2474 if (!pbuf) 2475 goto out; 2476 ioc->add_sge((char *)&IstwiRWRequest->SGL, ··· 2519 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); 2520 2521 if (pbuf) 2522 + dma_free_coherent(&ioc->pcidev->dev, 4, pbuf, buf_dma); 2523 2524 /* Copy the data from kernel memory to user memory 2525 */ ··· 2585 /* Get the data transfer speeds 2586 */ 2587 data_sz = ioc->spi_data.sdp0length * 4; 2588 + pg0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz, &page_dma, 2589 + GFP_KERNEL); 2590 if (pg0_alloc) { 2591 hdr.PageVersion = ioc->spi_data.sdp0version; 2592 hdr.PageLength = data_sz; ··· 2623 karg.negotiated_speed = HP_DEV_SPEED_ASYNC; 2624 } 2625 2626 + dma_free_coherent(&ioc->pcidev->dev, data_sz, (u8 *)pg0_alloc, 2627 + page_dma); 2628 } 2629 2630 /* Set defaults ··· 2649 /* Issue the second config page request */ 2650 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 2651 data_sz = (int) cfg.cfghdr.hdr->PageLength * 4; 2652 + pg3_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz, 2653 + &page_dma, GFP_KERNEL); 2654 if (pg3_alloc) { 2655 cfg.physAddr = page_dma; 2656 cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id; ··· 2658 karg.phase_errors = (u32) le16_to_cpu(pg3_alloc->PhaseErrorCount); 2659 karg.parity_errors = (u32) le16_to_cpu(pg3_alloc->ParityErrorCount); 2660 } 2661 + dma_free_coherent(&ioc->pcidev->dev, data_sz, 2662 + (u8 *)pg3_alloc, page_dma); 2663 } 2664 } 2665 hd = shost_priv(ioc->sh);
+48 -42
drivers/message/fusion/mptlan.c
··· 516 if (priv->RcvCtl[i].skb != NULL) { 517 /**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x " 518 /**/ "is still out\n", i)); 519 - pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma, 520 - priv->RcvCtl[i].len, 521 - PCI_DMA_FROMDEVICE); 522 dev_kfree_skb(priv->RcvCtl[i].skb); 523 } 524 } ··· 528 529 for (i = 0; i < priv->tx_max_out; i++) { 530 if (priv->SendCtl[i].skb != NULL) { 531 - pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma, 532 - priv->SendCtl[i].len, 533 - PCI_DMA_TODEVICE); 534 dev_kfree_skb(priv->SendCtl[i].skb); 535 } 536 } ··· 582 __func__, sent)); 583 584 priv->SendCtl[ctx].skb = NULL; 585 - pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, 586 - priv->SendCtl[ctx].len, PCI_DMA_TODEVICE); 587 dev_kfree_skb_irq(sent); 588 589 spin_lock_irqsave(&priv->txfidx_lock, flags); ··· 648 __func__, sent)); 649 650 priv->SendCtl[ctx].skb = NULL; 651 - pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, 652 - priv->SendCtl[ctx].len, PCI_DMA_TODEVICE); 653 dev_kfree_skb_irq(sent); 654 655 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx; ··· 721 skb_reset_mac_header(skb); 722 skb_pull(skb, 12); 723 724 - dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len, 725 - PCI_DMA_TODEVICE); 726 727 priv->SendCtl[ctx].skb = skb; 728 priv->SendCtl[ctx].dma = dma; ··· 869 return -ENOMEM; 870 } 871 872 - pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, 873 - priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); 874 875 skb_copy_from_linear_data(old_skb, skb_put(skb, len), len); 876 877 - pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, 878 - priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); 879 goto out; 880 } 881 ··· 887 888 priv->RcvCtl[ctx].skb = NULL; 889 890 - pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, 891 - priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); 892 893 out: 894 spin_lock_irqsave(&priv->rxfidx_lock, flags); ··· 932 // dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n")); 933 934 priv->RcvCtl[ctx].skb = NULL; 935 - pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, 936 - priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); 937 dev_kfree_skb_any(skb); 938 939 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; ··· 1033 // IOC_AND_NETDEV_NAMES_s_s(dev), 1034 // i, l)); 1035 1036 - pci_dma_sync_single_for_cpu(mpt_dev->pcidev, 1037 - priv->RcvCtl[ctx].dma, 1038 - priv->RcvCtl[ctx].len, 1039 - PCI_DMA_FROMDEVICE); 1040 skb_copy_from_linear_data(old_skb, skb_put(skb, l), l); 1041 1042 - pci_dma_sync_single_for_device(mpt_dev->pcidev, 1043 - priv->RcvCtl[ctx].dma, 1044 - priv->RcvCtl[ctx].len, 1045 - PCI_DMA_FROMDEVICE); 1046 1047 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; 1048 szrem -= l; ··· 1061 return -ENOMEM; 1062 } 1063 1064 - pci_dma_sync_single_for_cpu(mpt_dev->pcidev, 1065 - priv->RcvCtl[ctx].dma, 1066 - priv->RcvCtl[ctx].len, 1067 - PCI_DMA_FROMDEVICE); 1068 1069 skb_copy_from_linear_data(old_skb, skb_put(skb, len), len); 1070 1071 - pci_dma_sync_single_for_device(mpt_dev->pcidev, 1072 - priv->RcvCtl[ctx].dma, 1073 - priv->RcvCtl[ctx].len, 1074 - PCI_DMA_FROMDEVICE); 1075 1076 spin_lock_irqsave(&priv->rxfidx_lock, flags); 1077 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; ··· 1082 1083 priv->RcvCtl[ctx].skb = NULL; 1084 1085 - pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, 1086 - priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); 1087 priv->RcvCtl[ctx].dma = 0; 1088 1089 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; ··· 1204 1205 skb = priv->RcvCtl[ctx].skb; 1206 if (skb && (priv->RcvCtl[ctx].len != len)) { 1207 - pci_unmap_single(mpt_dev->pcidev, 1208 priv->RcvCtl[ctx].dma, 1209 priv->RcvCtl[ctx].len, 1210 - PCI_DMA_FROMDEVICE); 1211 dev_kfree_skb(priv->RcvCtl[ctx].skb); 1212 skb = priv->RcvCtl[ctx].skb = NULL; 1213 } ··· 1223 break; 1224 } 1225 1226 - dma = pci_map_single(mpt_dev->pcidev, skb->data, 1227 - len, PCI_DMA_FROMDEVICE); 1228 1229 priv->RcvCtl[ctx].skb = skb; 1230 priv->RcvCtl[ctx].dma = dma;
··· 516 if (priv->RcvCtl[i].skb != NULL) { 517 /**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x " 518 /**/ "is still out\n", i)); 519 + dma_unmap_single(&mpt_dev->pcidev->dev, 520 + priv->RcvCtl[i].dma, 521 + priv->RcvCtl[i].len, DMA_FROM_DEVICE); 522 dev_kfree_skb(priv->RcvCtl[i].skb); 523 } 524 } ··· 528 529 for (i = 0; i < priv->tx_max_out; i++) { 530 if (priv->SendCtl[i].skb != NULL) { 531 + dma_unmap_single(&mpt_dev->pcidev->dev, 532 + priv->SendCtl[i].dma, 533 + priv->SendCtl[i].len, DMA_TO_DEVICE); 534 dev_kfree_skb(priv->SendCtl[i].skb); 535 } 536 } ··· 582 __func__, sent)); 583 584 priv->SendCtl[ctx].skb = NULL; 585 + dma_unmap_single(&mpt_dev->pcidev->dev, priv->SendCtl[ctx].dma, 586 + priv->SendCtl[ctx].len, DMA_TO_DEVICE); 587 dev_kfree_skb_irq(sent); 588 589 spin_lock_irqsave(&priv->txfidx_lock, flags); ··· 648 __func__, sent)); 649 650 priv->SendCtl[ctx].skb = NULL; 651 + dma_unmap_single(&mpt_dev->pcidev->dev, 652 + priv->SendCtl[ctx].dma, 653 + priv->SendCtl[ctx].len, DMA_TO_DEVICE); 654 dev_kfree_skb_irq(sent); 655 656 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx; ··· 720 skb_reset_mac_header(skb); 721 skb_pull(skb, 12); 722 723 + dma = dma_map_single(&mpt_dev->pcidev->dev, skb->data, skb->len, 724 + DMA_TO_DEVICE); 725 726 priv->SendCtl[ctx].skb = skb; 727 priv->SendCtl[ctx].dma = dma; ··· 868 return -ENOMEM; 869 } 870 871 + dma_sync_single_for_cpu(&mpt_dev->pcidev->dev, 872 + priv->RcvCtl[ctx].dma, 873 + priv->RcvCtl[ctx].len, 874 + DMA_FROM_DEVICE); 875 876 skb_copy_from_linear_data(old_skb, skb_put(skb, len), len); 877 878 + dma_sync_single_for_device(&mpt_dev->pcidev->dev, 879 + priv->RcvCtl[ctx].dma, 880 + priv->RcvCtl[ctx].len, 881 + DMA_FROM_DEVICE); 882 goto out; 883 } 884 ··· 882 883 priv->RcvCtl[ctx].skb = NULL; 884 885 + dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma, 886 + priv->RcvCtl[ctx].len, DMA_FROM_DEVICE); 887 888 out: 889 spin_lock_irqsave(&priv->rxfidx_lock, flags); ··· 927 // dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n")); 928 929 priv->RcvCtl[ctx].skb = NULL; 930 + dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma, 931 + priv->RcvCtl[ctx].len, DMA_FROM_DEVICE); 932 dev_kfree_skb_any(skb); 933 934 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; ··· 1028 // IOC_AND_NETDEV_NAMES_s_s(dev), 1029 // i, l)); 1030 1031 + dma_sync_single_for_cpu(&mpt_dev->pcidev->dev, 1032 + priv->RcvCtl[ctx].dma, 1033 + priv->RcvCtl[ctx].len, 1034 + DMA_FROM_DEVICE); 1035 skb_copy_from_linear_data(old_skb, skb_put(skb, l), l); 1036 1037 + dma_sync_single_for_device(&mpt_dev->pcidev->dev, 1038 + priv->RcvCtl[ctx].dma, 1039 + priv->RcvCtl[ctx].len, 1040 + DMA_FROM_DEVICE); 1041 1042 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; 1043 szrem -= l; ··· 1056 return -ENOMEM; 1057 } 1058 1059 + dma_sync_single_for_cpu(&mpt_dev->pcidev->dev, 1060 + priv->RcvCtl[ctx].dma, 1061 + priv->RcvCtl[ctx].len, 1062 + DMA_FROM_DEVICE); 1063 1064 skb_copy_from_linear_data(old_skb, skb_put(skb, len), len); 1065 1066 + dma_sync_single_for_device(&mpt_dev->pcidev->dev, 1067 + priv->RcvCtl[ctx].dma, 1068 + priv->RcvCtl[ctx].len, 1069 + DMA_FROM_DEVICE); 1070 1071 spin_lock_irqsave(&priv->rxfidx_lock, flags); 1072 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; ··· 1077 1078 priv->RcvCtl[ctx].skb = NULL; 1079 1080 + dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma, 1081 + priv->RcvCtl[ctx].len, DMA_FROM_DEVICE); 1082 priv->RcvCtl[ctx].dma = 0; 1083 1084 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; ··· 1199 1200 skb = priv->RcvCtl[ctx].skb; 1201 if (skb && (priv->RcvCtl[ctx].len != len)) { 1202 + dma_unmap_single(&mpt_dev->pcidev->dev, 1203 priv->RcvCtl[ctx].dma, 1204 priv->RcvCtl[ctx].len, 1205 + DMA_FROM_DEVICE); 1206 dev_kfree_skb(priv->RcvCtl[ctx].skb); 1207 skb = priv->RcvCtl[ctx].skb = NULL; 1208 } ··· 1218 break; 1219 } 1220 1221 + dma = dma_map_single(&mpt_dev->pcidev->dev, 1222 + skb->data, len, 1223 + DMA_FROM_DEVICE); 1224 1225 priv->RcvCtl[ctx].skb = skb; 1226 priv->RcvCtl[ctx].dma = dma;
+48 -46
drivers/message/fusion/mptsas.c
··· 702 if (!hdr.PageLength) 703 goto out; 704 705 - buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, 706 - &dma_handle); 707 708 if (!buffer) 709 goto out; ··· 769 770 out: 771 if (buffer) 772 - pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, 773 - dma_handle); 774 } 775 776 /** ··· 1399 goto out; 1400 } 1401 1402 - buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 1403 - &dma_handle); 1404 if (!buffer) { 1405 error = -ENOMEM; 1406 goto out; ··· 1426 enclosure->sep_channel = buffer->SEPBus; 1427 1428 out_free_consistent: 1429 - pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 1430 - buffer, dma_handle); 1431 out: 1432 return error; 1433 } ··· 2058 if (!hdr.ExtPageLength) 2059 return -ENXIO; 2060 2061 - buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 2062 - &dma_handle); 2063 if (!buffer) 2064 return -ENOMEM; 2065 ··· 2081 le32_to_cpu(buffer->PhyResetProblemCount); 2082 2083 out_free_consistent: 2084 - pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 2085 - buffer, dma_handle); 2086 return error; 2087 } 2088 ··· 2301 << MPI_SGE_FLAGS_SHIFT; 2302 2303 if (!dma_map_sg(&ioc->pcidev->dev, job->request_payload.sg_list, 2304 - 1, PCI_DMA_BIDIRECTIONAL)) 2305 goto put_mf; 2306 2307 flagsLength |= (sg_dma_len(job->request_payload.sg_list) - 4); ··· 2318 flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT; 2319 2320 if (!dma_map_sg(&ioc->pcidev->dev, job->reply_payload.sg_list, 2321 - 1, PCI_DMA_BIDIRECTIONAL)) 2322 goto unmap_out; 2323 flagsLength |= sg_dma_len(job->reply_payload.sg_list) + 4; 2324 ioc->add_sge(psge, flagsLength, ··· 2356 2357 unmap_in: 2358 dma_unmap_sg(&ioc->pcidev->dev, job->reply_payload.sg_list, 1, 2359 - PCI_DMA_BIDIRECTIONAL); 2360 unmap_out: 2361 dma_unmap_sg(&ioc->pcidev->dev, job->request_payload.sg_list, 1, 2362 - PCI_DMA_BIDIRECTIONAL); 2363 put_mf: 2364 if (mf) 2365 mpt_free_msg_frame(ioc, mf); ··· 2412 goto out; 2413 } 2414 2415 - buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 2416 - &dma_handle); 2417 if (!buffer) { 2418 error = -ENOMEM; 2419 goto out; ··· 2452 } 2453 2454 out_free_consistent: 2455 - pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 2456 - buffer, dma_handle); 2457 out: 2458 return error; 2459 } ··· 2487 goto out; 2488 } 2489 2490 - buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 2491 - &dma_handle); 2492 if (!buffer) { 2493 error = -ENOMEM; 2494 goto out; ··· 2509 device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 2510 2511 out_free_consistent: 2512 - pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 2513 - buffer, dma_handle); 2514 out: 2515 return error; 2516 } ··· 2551 goto out; 2552 } 2553 2554 - buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 2555 - &dma_handle); 2556 if (!buffer) { 2557 error = -ENOMEM; 2558 goto out; ··· 2573 phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle); 2574 2575 out_free_consistent: 2576 - pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 2577 - buffer, dma_handle); 2578 out: 2579 return error; 2580 } ··· 2614 goto out; 2615 } 2616 2617 - buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 2618 - &dma_handle); 2619 if (!buffer) { 2620 error = -ENOMEM; 2621 goto out; ··· 2654 device_info->flags = le16_to_cpu(buffer->Flags); 2655 2656 out_free_consistent: 2657 - pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 2658 - buffer, dma_handle); 2659 out: 2660 return error; 2661 } ··· 2697 goto out; 2698 } 2699 2700 - buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 2701 - &dma_handle); 2702 if (!buffer) { 2703 error = -ENOMEM; 2704 goto out; ··· 2737 } 2738 2739 out_free_consistent: 2740 - pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 2741 - buffer, dma_handle); 2742 out: 2743 return error; 2744 } ··· 2777 goto out; 2778 } 2779 2780 - buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 2781 - &dma_handle); 2782 if (!buffer) { 2783 error = -ENOMEM; 2784 goto out; ··· 2810 phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle); 2811 2812 out_free_consistent: 2813 - pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 2814 - buffer, dma_handle); 2815 out: 2816 return error; 2817 } ··· 2896 2897 sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply); 2898 2899 - data_out = pci_alloc_consistent(ioc->pcidev, sz, &data_out_dma); 2900 if (!data_out) { 2901 printk(KERN_ERR "Memory allocation failure at %s:%d/%s()!\n", 2902 __FILE__, __LINE__, __func__); ··· 2988 } 2989 out_free: 2990 if (data_out_dma) 2991 - pci_free_consistent(ioc->pcidev, sz, data_out, data_out_dma); 2992 put_mf: 2993 if (mf) 2994 mpt_free_msg_frame(ioc, mf); ··· 4273 if (!hdr.PageLength) 4274 goto out; 4275 4276 - buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, 4277 - &dma_handle); 4278 4279 if (!buffer) 4280 goto out; ··· 4320 4321 out: 4322 if (buffer) 4323 - pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, 4324 - dma_handle); 4325 } 4326 /* 4327 * Work queue thread to handle SAS hotplug events
··· 702 if (!hdr.PageLength) 703 goto out; 704 705 + buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, 706 + &dma_handle, GFP_KERNEL); 707 708 if (!buffer) 709 goto out; ··· 769 770 out: 771 if (buffer) 772 + dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, 773 + buffer, dma_handle); 774 } 775 776 /** ··· 1399 goto out; 1400 } 1401 1402 + buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, 1403 + &dma_handle, GFP_KERNEL); 1404 if (!buffer) { 1405 error = -ENOMEM; 1406 goto out; ··· 1426 enclosure->sep_channel = buffer->SEPBus; 1427 1428 out_free_consistent: 1429 + dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer, 1430 + dma_handle); 1431 out: 1432 return error; 1433 } ··· 2058 if (!hdr.ExtPageLength) 2059 return -ENXIO; 2060 2061 + buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, 2062 + &dma_handle, GFP_KERNEL); 2063 if (!buffer) 2064 return -ENOMEM; 2065 ··· 2081 le32_to_cpu(buffer->PhyResetProblemCount); 2082 2083 out_free_consistent: 2084 + dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer, 2085 + dma_handle); 2086 return error; 2087 } 2088 ··· 2301 << MPI_SGE_FLAGS_SHIFT; 2302 2303 if (!dma_map_sg(&ioc->pcidev->dev, job->request_payload.sg_list, 2304 + 1, DMA_BIDIRECTIONAL)) 2305 goto put_mf; 2306 2307 flagsLength |= (sg_dma_len(job->request_payload.sg_list) - 4); ··· 2318 flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT; 2319 2320 if (!dma_map_sg(&ioc->pcidev->dev, job->reply_payload.sg_list, 2321 + 1, DMA_BIDIRECTIONAL)) 2322 goto unmap_out; 2323 flagsLength |= sg_dma_len(job->reply_payload.sg_list) + 4; 2324 ioc->add_sge(psge, flagsLength, ··· 2356 2357 unmap_in: 2358 dma_unmap_sg(&ioc->pcidev->dev, job->reply_payload.sg_list, 1, 2359 + DMA_BIDIRECTIONAL); 2360 unmap_out: 2361 dma_unmap_sg(&ioc->pcidev->dev, job->request_payload.sg_list, 1, 2362 + DMA_BIDIRECTIONAL); 2363 put_mf: 2364 if (mf) 2365 mpt_free_msg_frame(ioc, mf); ··· 2412 goto out; 2413 } 2414 2415 + buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, 2416 + &dma_handle, GFP_KERNEL); 2417 if (!buffer) { 2418 error = -ENOMEM; 2419 goto out; ··· 2452 } 2453 2454 out_free_consistent: 2455 + dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer, 2456 + dma_handle); 2457 out: 2458 return error; 2459 } ··· 2487 goto out; 2488 } 2489 2490 + buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, 2491 + &dma_handle, GFP_KERNEL); 2492 if (!buffer) { 2493 error = -ENOMEM; 2494 goto out; ··· 2509 device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 2510 2511 out_free_consistent: 2512 + dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer, 2513 + dma_handle); 2514 out: 2515 return error; 2516 } ··· 2551 goto out; 2552 } 2553 2554 + buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, 2555 + &dma_handle, GFP_KERNEL); 2556 if (!buffer) { 2557 error = -ENOMEM; 2558 goto out; ··· 2573 phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle); 2574 2575 out_free_consistent: 2576 + dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer, 2577 + dma_handle); 2578 out: 2579 return error; 2580 } ··· 2614 goto out; 2615 } 2616 2617 + buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, 2618 + &dma_handle, GFP_KERNEL); 2619 if (!buffer) { 2620 error = -ENOMEM; 2621 goto out; ··· 2654 device_info->flags = le16_to_cpu(buffer->Flags); 2655 2656 out_free_consistent: 2657 + dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer, 2658 + dma_handle); 2659 out: 2660 return error; 2661 } ··· 2697 goto out; 2698 } 2699 2700 + buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, 2701 + &dma_handle, GFP_KERNEL); 2702 if (!buffer) { 2703 error = -ENOMEM; 2704 goto out; ··· 2737 } 2738 2739 out_free_consistent: 2740 + dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer, 2741 + dma_handle); 2742 out: 2743 return error; 2744 } ··· 2777 goto out; 2778 } 2779 2780 + buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, 2781 + &dma_handle, GFP_KERNEL); 2782 if (!buffer) { 2783 error = -ENOMEM; 2784 goto out; ··· 2810 phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle); 2811 2812 out_free_consistent: 2813 + dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer, 2814 + dma_handle); 2815 out: 2816 return error; 2817 } ··· 2896 2897 sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply); 2898 2899 + data_out = dma_alloc_coherent(&ioc->pcidev->dev, sz, &data_out_dma, 2900 + GFP_KERNEL); 2901 if (!data_out) { 2902 printk(KERN_ERR "Memory allocation failure at %s:%d/%s()!\n", 2903 __FILE__, __LINE__, __func__); ··· 2987 } 2988 out_free: 2989 if (data_out_dma) 2990 + dma_free_coherent(&ioc->pcidev->dev, sz, data_out, 2991 + data_out_dma); 2992 put_mf: 2993 if (mf) 2994 mpt_free_msg_frame(ioc, mf); ··· 4271 if (!hdr.PageLength) 4272 goto out; 4273 4274 + buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, 4275 + &dma_handle, GFP_KERNEL); 4276 4277 if (!buffer) 4278 goto out; ··· 4318 4319 out: 4320 if (buffer) 4321 + dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, 4322 + buffer, dma_handle); 4323 } 4324 /* 4325 * Work queue thread to handle SAS hotplug events
+1 -1
drivers/scsi/aacraid/aachba.c
··· 271 " 0=PIC(default), 1=MSI, 2=MSI-X)"); 272 module_param(startup_timeout, int, S_IRUGO|S_IWUSR); 273 MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for" 274 - " adapter to have it's kernel up and\n" 275 "running. This is typically adjusted for large systems that do not" 276 " have a BIOS."); 277 module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
··· 271 " 0=PIC(default), 1=MSI, 2=MSI-X)"); 272 module_param(startup_timeout, int, S_IRUGO|S_IWUSR); 273 MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for" 274 + " adapter to have its kernel up and\n" 275 "running. This is typically adjusted for large systems that do not" 276 " have a BIOS."); 277 module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
+1 -5
drivers/scsi/aic7xxx/aic79xx_osm.c
··· 755 static int 756 ahd_linux_abort(struct scsi_cmnd *cmd) 757 { 758 - int error; 759 - 760 - error = ahd_linux_queue_abort_cmd(cmd); 761 - 762 - return error; 763 } 764 765 /*
··· 755 static int 756 ahd_linux_abort(struct scsi_cmnd *cmd) 757 { 758 + return ahd_linux_queue_abort_cmd(cmd); 759 } 760 761 /*
+4 -7
drivers/scsi/elx/efct/efct_driver.c
··· 541 542 pci_set_drvdata(pdev, efct); 543 544 - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0) { 545 - dev_warn(&pdev->dev, "trying DMA_BIT_MASK(32)\n"); 546 - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 547 - dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n"); 548 - rc = -1; 549 - goto dma_mask_out; 550 - } 551 } 552 553 num_interrupts = efct_device_interrupts_required(efct);
··· 541 542 pci_set_drvdata(pdev, efct); 543 544 + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 545 + if (rc) { 546 + dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n"); 547 + goto dma_mask_out; 548 } 549 550 num_interrupts = efct_device_interrupts_required(efct);
-5
drivers/scsi/hisi_sas/hisi_sas_main.c
··· 1525 struct device *dev = hisi_hba->dev; 1526 int s = sizeof(struct host_to_dev_fis); 1527 int rc = TMF_RESP_FUNC_FAILED; 1528 - struct asd_sas_phy *sas_phy; 1529 struct ata_link *link; 1530 u8 fis[20] = {0}; 1531 - u32 state; 1532 int i; 1533 1534 - state = hisi_hba->hw->get_phys_state(hisi_hba); 1535 for (i = 0; i < hisi_hba->n_phy; i++) { 1536 - if (!(state & BIT(sas_phy->id))) 1537 - continue; 1538 if (!(sas_port->phy_mask & BIT(i))) 1539 continue; 1540
··· 1525 struct device *dev = hisi_hba->dev; 1526 int s = sizeof(struct host_to_dev_fis); 1527 int rc = TMF_RESP_FUNC_FAILED; 1528 struct ata_link *link; 1529 u8 fis[20] = {0}; 1530 int i; 1531 1532 for (i = 0; i < hisi_hba->n_phy; i++) { 1533 if (!(sas_port->phy_mask & BIT(i))) 1534 continue; 1535
+34 -50
drivers/scsi/megaraid.c
··· 192 { 193 dma_addr_t prod_info_dma_handle; 194 mega_inquiry3 *inquiry3; 195 - u8 raw_mbox[sizeof(struct mbox_out)]; 196 - mbox_t *mbox; 197 int retval; 198 199 /* Initialize adapter inquiry mailbox */ 200 201 - mbox = (mbox_t *)raw_mbox; 202 - 203 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 204 - memset(&mbox->m_out, 0, sizeof(raw_mbox)); 205 206 /* 207 * Try to issue Inquiry3 command 208 * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and 209 * update enquiry3 structure 210 */ 211 - mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 212 213 inquiry3 = (mega_inquiry3 *)adapter->mega_buffer; 214 ··· 230 231 inq = &ext_inq->raid_inq; 232 233 - mbox->m_out.xferaddr = (u32)dma_handle; 234 235 /*issue old 0x04 command to adapter */ 236 - mbox->m_out.cmd = MEGA_MBOXCMD_ADPEXTINQ; 237 238 issue_scb_block(adapter, raw_mbox); 239 ··· 260 sizeof(mega_product_info), 261 DMA_FROM_DEVICE); 262 263 - mbox->m_out.xferaddr = prod_info_dma_handle; 264 265 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ 266 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */ ··· 3567 static int 3568 mega_is_bios_enabled(adapter_t *adapter) 3569 { 3570 - unsigned char raw_mbox[sizeof(struct mbox_out)]; 3571 - mbox_t *mbox; 3572 3573 - mbox = (mbox_t *)raw_mbox; 3574 - 3575 - memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3576 3577 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3578 3579 - mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3580 3581 raw_mbox[0] = IS_BIOS_ENABLED; 3582 raw_mbox[2] = GET_BIOS; ··· 3596 static void 3597 mega_enum_raid_scsi(adapter_t *adapter) 3598 { 3599 - unsigned char raw_mbox[sizeof(struct mbox_out)]; 3600 - mbox_t *mbox; 3601 int i; 3602 3603 - mbox = (mbox_t *)raw_mbox; 3604 - 3605 - memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3606 3607 /* 3608 * issue command to find out what channels are raid/scsi ··· 3610 3611 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3612 3613 - mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3614 3615 /* 3616 * Non-ROMB firmware fail this command, so all channels ··· 3649 mega_get_boot_drv(adapter_t *adapter) 3650 { 3651 struct private_bios_data *prv_bios_data; 3652 - unsigned char raw_mbox[sizeof(struct mbox_out)]; 3653 - mbox_t *mbox; 3654 u16 cksum = 0; 3655 u8 *cksum_p; 3656 u8 boot_pdrv; 3657 int i; 3658 3659 - mbox = (mbox_t *)raw_mbox; 3660 - 3661 - memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3662 3663 raw_mbox[0] = BIOS_PVT_DATA; 3664 raw_mbox[2] = GET_BIOS_PVT_DATA; 3665 3666 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3667 3668 - mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3669 3670 adapter->boot_ldrv_enabled = 0; 3671 adapter->boot_ldrv = 0; ··· 3713 static int 3714 mega_support_random_del(adapter_t *adapter) 3715 { 3716 - unsigned char raw_mbox[sizeof(struct mbox_out)]; 3717 - mbox_t *mbox; 3718 int rval; 3719 3720 - mbox = (mbox_t *)raw_mbox; 3721 - 3722 - memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3723 3724 /* 3725 * issue command ··· 3740 static int 3741 mega_support_ext_cdb(adapter_t *adapter) 3742 { 3743 - unsigned char raw_mbox[sizeof(struct mbox_out)]; 3744 - mbox_t *mbox; 3745 int rval; 3746 3747 - mbox = (mbox_t *)raw_mbox; 3748 - 3749 - memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3750 /* 3751 * issue command to find out if controller supports extended CDBs. 3752 */ ··· 3853 static void 3854 mega_get_max_sgl(adapter_t *adapter) 3855 { 3856 - unsigned char raw_mbox[sizeof(struct mbox_out)]; 3857 - mbox_t *mbox; 3858 3859 - mbox = (mbox_t *)raw_mbox; 3860 - 3861 - memset(mbox, 0, sizeof(raw_mbox)); 3862 3863 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3864 3865 - mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3866 3867 raw_mbox[0] = MAIN_MISC_OPCODE; 3868 raw_mbox[2] = GET_MAX_SG_SUPPORT; ··· 3874 } 3875 else { 3876 adapter->sglen = *((char *)adapter->mega_buffer); 3877 - 3878 /* 3879 * Make sure this is not more than the resources we are 3880 * planning to allocate ··· 3896 static int 3897 mega_support_cluster(adapter_t *adapter) 3898 { 3899 - unsigned char raw_mbox[sizeof(struct mbox_out)]; 3900 - mbox_t *mbox; 3901 3902 - mbox = (mbox_t *)raw_mbox; 3903 - 3904 - memset(mbox, 0, sizeof(raw_mbox)); 3905 3906 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3907 3908 - mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3909 3910 /* 3911 * Try to get the initiator id. This command will succeed iff the
··· 192 { 193 dma_addr_t prod_info_dma_handle; 194 mega_inquiry3 *inquiry3; 195 + struct mbox_out mbox; 196 + u8 *raw_mbox = (u8 *)&mbox; 197 int retval; 198 199 /* Initialize adapter inquiry mailbox */ 200 201 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 202 + memset(&mbox, 0, sizeof(mbox)); 203 204 /* 205 * Try to issue Inquiry3 command 206 * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and 207 * update enquiry3 structure 208 */ 209 + mbox.xferaddr = (u32)adapter->buf_dma_handle; 210 211 inquiry3 = (mega_inquiry3 *)adapter->mega_buffer; 212 ··· 232 233 inq = &ext_inq->raid_inq; 234 235 + mbox.xferaddr = (u32)dma_handle; 236 237 /*issue old 0x04 command to adapter */ 238 + mbox.cmd = MEGA_MBOXCMD_ADPEXTINQ; 239 240 issue_scb_block(adapter, raw_mbox); 241 ··· 262 sizeof(mega_product_info), 263 DMA_FROM_DEVICE); 264 265 + mbox.xferaddr = prod_info_dma_handle; 266 267 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ 268 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */ ··· 3569 static int 3570 mega_is_bios_enabled(adapter_t *adapter) 3571 { 3572 + struct mbox_out mbox; 3573 + unsigned char *raw_mbox = (u8 *)&mbox; 3574 3575 + memset(&mbox, 0, sizeof(mbox)); 3576 3577 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3578 3579 + mbox.xferaddr = (u32)adapter->buf_dma_handle; 3580 3581 raw_mbox[0] = IS_BIOS_ENABLED; 3582 raw_mbox[2] = GET_BIOS; ··· 3600 static void 3601 mega_enum_raid_scsi(adapter_t *adapter) 3602 { 3603 + struct mbox_out mbox; 3604 + unsigned char *raw_mbox = (u8 *)&mbox; 3605 int i; 3606 3607 + memset(&mbox, 0, sizeof(mbox)); 3608 3609 /* 3610 * issue command to find out what channels are raid/scsi ··· 3616 3617 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3618 3619 + mbox.xferaddr = (u32)adapter->buf_dma_handle; 3620 3621 /* 3622 * Non-ROMB firmware fail this command, so all channels ··· 3655 mega_get_boot_drv(adapter_t *adapter) 3656 { 3657 struct private_bios_data *prv_bios_data; 3658 + struct mbox_out mbox; 3659 + unsigned char *raw_mbox = (u8 *)&mbox; 3660 u16 cksum = 0; 3661 u8 *cksum_p; 3662 u8 boot_pdrv; 3663 int i; 3664 3665 + memset(&mbox, 0, sizeof(mbox)); 3666 3667 raw_mbox[0] = BIOS_PVT_DATA; 3668 raw_mbox[2] = GET_BIOS_PVT_DATA; 3669 3670 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3671 3672 + mbox.xferaddr = (u32)adapter->buf_dma_handle; 3673 3674 adapter->boot_ldrv_enabled = 0; 3675 adapter->boot_ldrv = 0; ··· 3721 static int 3722 mega_support_random_del(adapter_t *adapter) 3723 { 3724 + struct mbox_out mbox; 3725 + unsigned char *raw_mbox = (u8 *)&mbox; 3726 int rval; 3727 3728 + memset(&mbox, 0, sizeof(mbox)); 3729 3730 /* 3731 * issue command ··· 3750 static int 3751 mega_support_ext_cdb(adapter_t *adapter) 3752 { 3753 + struct mbox_out mbox; 3754 + unsigned char *raw_mbox = (u8 *)&mbox; 3755 int rval; 3756 3757 + memset(&mbox, 0, sizeof(mbox)); 3758 /* 3759 * issue command to find out if controller supports extended CDBs. 3760 */ ··· 3865 static void 3866 mega_get_max_sgl(adapter_t *adapter) 3867 { 3868 + struct mbox_out mbox; 3869 + unsigned char *raw_mbox = (u8 *)&mbox; 3870 3871 + memset(&mbox, 0, sizeof(mbox)); 3872 3873 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3874 3875 + mbox.xferaddr = (u32)adapter->buf_dma_handle; 3876 3877 raw_mbox[0] = MAIN_MISC_OPCODE; 3878 raw_mbox[2] = GET_MAX_SG_SUPPORT; ··· 3888 } 3889 else { 3890 adapter->sglen = *((char *)adapter->mega_buffer); 3891 + 3892 /* 3893 * Make sure this is not more than the resources we are 3894 * planning to allocate ··· 3910 static int 3911 mega_support_cluster(adapter_t *adapter) 3912 { 3913 + struct mbox_out mbox; 3914 + unsigned char *raw_mbox = (u8 *)&mbox; 3915 3916 + memset(&mbox, 0, sizeof(mbox)); 3917 3918 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3919 3920 + mbox.xferaddr = (u32)adapter->buf_dma_handle; 3921 3922 /* 3923 * Try to get the initiator id. This command will succeed iff the
+5 -6
drivers/scsi/mpi3mr/mpi3mr_fw.c
··· 901 }, 902 { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" }, 903 { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" }, 904 - { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronus reset" }, 905 }; 906 907 /** ··· 1242 ioc_state = mpi3mr_get_iocstate(mrioc); 1243 if (ioc_state == MRIOC_STATE_READY) { 1244 ioc_info(mrioc, 1245 - "successfully transistioned to %s state\n", 1246 mpi3mr_iocstate_name(ioc_state)); 1247 return 0; 1248 } ··· 2174 * mpi3mr_check_rh_fault_ioc - check reset history and fault 2175 * controller 2176 * @mrioc: Adapter instance reference 2177 - * @reason_code, reason code for the fault. 2178 * 2179 * This routine will save snapdump and fault the controller with 2180 * the given reason code if it is not already in the fault or ··· 3633 /** 3634 * mpi3mr_init_ioc - Initialize the controller 3635 * @mrioc: Adapter instance reference 3636 - * @init_type: Flag to indicate is the init_type 3637 * 3638 * This the controller initialization routine, executed either 3639 * after soft reset or from pci probe callback. ··· 3843 3844 if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) { 3845 ioc_err(mrioc, 3846 - "cannot create minimum number of operatioanl queues expected:%d created:%d\n", 3847 mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q); 3848 goto out_failed_noretry; 3849 } ··· 4173 /** 4174 * mpi3mr_cleanup_ioc - Cleanup controller 4175 * @mrioc: Adapter instance reference 4176 - 4177 * controller cleanup handler, Message unit reset or soft reset 4178 * and shutdown notification is issued to the controller. 4179 *
··· 901 }, 902 { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" }, 903 { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" }, 904 + { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" }, 905 }; 906 907 /** ··· 1242 ioc_state = mpi3mr_get_iocstate(mrioc); 1243 if (ioc_state == MRIOC_STATE_READY) { 1244 ioc_info(mrioc, 1245 + "successfully transitioned to %s state\n", 1246 mpi3mr_iocstate_name(ioc_state)); 1247 return 0; 1248 } ··· 2174 * mpi3mr_check_rh_fault_ioc - check reset history and fault 2175 * controller 2176 * @mrioc: Adapter instance reference 2177 + * @reason_code: reason code for the fault. 2178 * 2179 * This routine will save snapdump and fault the controller with 2180 * the given reason code if it is not already in the fault or ··· 3633 /** 3634 * mpi3mr_init_ioc - Initialize the controller 3635 * @mrioc: Adapter instance reference 3636 * 3637 * This the controller initialization routine, executed either 3638 * after soft reset or from pci probe callback. ··· 3844 3845 if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) { 3846 ioc_err(mrioc, 3847 + "cannot create minimum number of operational queues expected:%d created:%d\n", 3848 mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q); 3849 goto out_failed_noretry; 3850 } ··· 4174 /** 4175 * mpi3mr_cleanup_ioc - Cleanup controller 4176 * @mrioc: Adapter instance reference 4177 + * 4178 * controller cleanup handler, Message unit reset or soft reset 4179 * and shutdown notification is issued to the controller. 4180 *
+2 -2
drivers/scsi/mpt3sas/mpt3sas_base.h
··· 77 #define MPT3SAS_DRIVER_NAME "mpt3sas" 78 #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" 79 #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" 80 - #define MPT3SAS_DRIVER_VERSION "39.100.00.00" 81 - #define MPT3SAS_MAJOR_VERSION 39 82 #define MPT3SAS_MINOR_VERSION 100 83 #define MPT3SAS_BUILD_VERSION 0 84 #define MPT3SAS_RELEASE_VERSION 00
··· 77 #define MPT3SAS_DRIVER_NAME "mpt3sas" 78 #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" 79 #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" 80 + #define MPT3SAS_DRIVER_VERSION "40.100.00.00" 81 + #define MPT3SAS_MAJOR_VERSION 40 82 #define MPT3SAS_MINOR_VERSION 100 83 #define MPT3SAS_BUILD_VERSION 0 84 #define MPT3SAS_RELEASE_VERSION 00
+83 -4
drivers/scsi/mpt3sas/mpt3sas_ctl.c
··· 3533 { 3534 struct Scsi_Host *shost = class_to_shost(cdev); 3535 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3536 unsigned long flags; 3537 ssize_t rc; 3538 3539 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3540 - rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count); 3541 memset(&ioc->diag_trigger_master, 0, 3542 sizeof(struct SL_WH_MASTER_TRIGGER_T)); 3543 memcpy(&ioc->diag_trigger_master, buf, rc); ··· 3609 { 3610 struct Scsi_Host *shost = class_to_shost(cdev); 3611 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3612 unsigned long flags; 3613 ssize_t sz; 3614 3615 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3616 - sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count); 3617 memset(&ioc->diag_trigger_event, 0, 3618 sizeof(struct SL_WH_EVENT_TRIGGERS_T)); 3619 memcpy(&ioc->diag_trigger_event, buf, sz); ··· 3684 { 3685 struct Scsi_Host *shost = class_to_shost(cdev); 3686 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3687 unsigned long flags; 3688 ssize_t sz; 3689 3690 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3691 - sz = min(sizeof(ioc->diag_trigger_scsi), count); 3692 memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi)); 3693 memcpy(&ioc->diag_trigger_scsi, buf, sz); 3694 if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES) ··· 3758 { 3759 struct Scsi_Host *shost = class_to_shost(cdev); 3760 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3761 unsigned long flags; 3762 ssize_t sz; 3763 3764 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3765 - sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); 3766 memset(&ioc->diag_trigger_mpi, 0, 3767 sizeof(ioc->diag_trigger_mpi)); 3768 memcpy(&ioc->diag_trigger_mpi, buf, sz);
··· 3533 { 3534 struct Scsi_Host *shost = class_to_shost(cdev); 3535 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3536 + struct SL_WH_MASTER_TRIGGER_T *master_tg; 3537 unsigned long flags; 3538 ssize_t rc; 3539 + bool set = 1; 3540 + 3541 + rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count); 3542 + 3543 + if (ioc->supports_trigger_pages) { 3544 + master_tg = kzalloc(sizeof(struct SL_WH_MASTER_TRIGGER_T), 3545 + GFP_KERNEL); 3546 + if (!master_tg) 3547 + return -ENOMEM; 3548 + 3549 + memcpy(master_tg, buf, rc); 3550 + if (!master_tg->MasterData) 3551 + set = 0; 3552 + if (mpt3sas_config_update_driver_trigger_pg1(ioc, master_tg, 3553 + set)) { 3554 + kfree(master_tg); 3555 + return -EFAULT; 3556 + } 3557 + kfree(master_tg); 3558 + } 3559 3560 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3561 memset(&ioc->diag_trigger_master, 0, 3562 sizeof(struct SL_WH_MASTER_TRIGGER_T)); 3563 memcpy(&ioc->diag_trigger_master, buf, rc); ··· 3589 { 3590 struct Scsi_Host *shost = class_to_shost(cdev); 3591 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3592 + struct SL_WH_EVENT_TRIGGERS_T *event_tg; 3593 unsigned long flags; 3594 ssize_t sz; 3595 + bool set = 1; 3596 + 3597 + sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count); 3598 + if (ioc->supports_trigger_pages) { 3599 + event_tg = kzalloc(sizeof(struct SL_WH_EVENT_TRIGGERS_T), 3600 + GFP_KERNEL); 3601 + if (!event_tg) 3602 + return -ENOMEM; 3603 + 3604 + memcpy(event_tg, buf, sz); 3605 + if (!event_tg->ValidEntries) 3606 + set = 0; 3607 + if (mpt3sas_config_update_driver_trigger_pg2(ioc, event_tg, 3608 + set)) { 3609 + kfree(event_tg); 3610 + return -EFAULT; 3611 + } 3612 + kfree(event_tg); 3613 + } 3614 3615 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3616 + 3617 memset(&ioc->diag_trigger_event, 0, 3618 sizeof(struct SL_WH_EVENT_TRIGGERS_T)); 3619 memcpy(&ioc->diag_trigger_event, buf, sz); ··· 3644 { 3645 struct Scsi_Host *shost = class_to_shost(cdev); 3646 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3647 + struct SL_WH_SCSI_TRIGGERS_T *scsi_tg; 3648 unsigned long flags; 3649 ssize_t sz; 3650 + bool set = 1; 3651 + 3652 + sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count); 3653 + if (ioc->supports_trigger_pages) { 3654 + scsi_tg = kzalloc(sizeof(struct SL_WH_SCSI_TRIGGERS_T), 3655 + GFP_KERNEL); 3656 + if (!scsi_tg) 3657 + return -ENOMEM; 3658 + 3659 + memcpy(scsi_tg, buf, sz); 3660 + if (!scsi_tg->ValidEntries) 3661 + set = 0; 3662 + if (mpt3sas_config_update_driver_trigger_pg3(ioc, scsi_tg, 3663 + set)) { 3664 + kfree(scsi_tg); 3665 + return -EFAULT; 3666 + } 3667 + kfree(scsi_tg); 3668 + } 3669 3670 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3671 + 3672 memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi)); 3673 memcpy(&ioc->diag_trigger_scsi, buf, sz); 3674 if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES) ··· 3698 { 3699 struct Scsi_Host *shost = class_to_shost(cdev); 3700 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3701 + struct SL_WH_MPI_TRIGGERS_T *mpi_tg; 3702 unsigned long flags; 3703 ssize_t sz; 3704 + bool set = 1; 3705 + 3706 + sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); 3707 + if (ioc->supports_trigger_pages) { 3708 + mpi_tg = kzalloc(sizeof(struct SL_WH_MPI_TRIGGERS_T), 3709 + GFP_KERNEL); 3710 + if (!mpi_tg) 3711 + return -ENOMEM; 3712 + 3713 + memcpy(mpi_tg, buf, sz); 3714 + if (!mpi_tg->ValidEntries) 3715 + set = 0; 3716 + if (mpt3sas_config_update_driver_trigger_pg4(ioc, mpi_tg, 3717 + set)) { 3718 + kfree(mpi_tg); 3719 + return -EFAULT; 3720 + } 3721 + kfree(mpi_tg); 3722 + } 3723 3724 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3725 memset(&ioc->diag_trigger_mpi, 0, 3726 sizeof(ioc->diag_trigger_mpi)); 3727 memcpy(&ioc->diag_trigger_mpi, buf, sz);
+3
drivers/scsi/pcmcia/nsp_cs.c
··· 1557 data->MmioAddress = (unsigned long) 1558 ioremap(p_dev->resource[2]->start, 1559 resource_size(p_dev->resource[2])); 1560 data->MmioLength = resource_size(p_dev->resource[2]); 1561 } 1562 /* If we got this far, we're cool! */
··· 1557 data->MmioAddress = (unsigned long) 1558 ioremap(p_dev->resource[2]->start, 1559 resource_size(p_dev->resource[2])); 1560 + if (!data->MmioAddress) 1561 + goto next_entry; 1562 + 1563 data->MmioLength = resource_size(p_dev->resource[2]); 1564 } 1565 /* If we got this far, we're cool! */
+6 -1
drivers/scsi/pm8001/pm8001_sas.c
··· 1199 struct pm8001_device *pm8001_dev; 1200 struct pm8001_tmf_task tmf_task; 1201 int rc = TMF_RESP_FUNC_FAILED, ret; 1202 - u32 phy_id; 1203 struct sas_task_slow slow_task; 1204 1205 if (unlikely(!task || !task->lldd_task || !task->dev)) ··· 1246 DECLARE_COMPLETION_ONSTACK(completion_reset); 1247 DECLARE_COMPLETION_ONSTACK(completion); 1248 struct pm8001_phy *phy = pm8001_ha->phy + phy_id; 1249 1250 /* 1. Set Device state as Recovery */ 1251 pm8001_dev->setds_completion = &completion; ··· 1298 PORT_RESET_TMO); 1299 if (phy->port_reset_status == PORT_RESET_TMO) { 1300 pm8001_dev_gone_notify(dev); 1301 goto out; 1302 } 1303 }
··· 1199 struct pm8001_device *pm8001_dev; 1200 struct pm8001_tmf_task tmf_task; 1201 int rc = TMF_RESP_FUNC_FAILED, ret; 1202 + u32 phy_id, port_id; 1203 struct sas_task_slow slow_task; 1204 1205 if (unlikely(!task || !task->lldd_task || !task->dev)) ··· 1246 DECLARE_COMPLETION_ONSTACK(completion_reset); 1247 DECLARE_COMPLETION_ONSTACK(completion); 1248 struct pm8001_phy *phy = pm8001_ha->phy + phy_id; 1249 + port_id = phy->port->port_id; 1250 1251 /* 1. Set Device state as Recovery */ 1252 pm8001_dev->setds_completion = &completion; ··· 1297 PORT_RESET_TMO); 1298 if (phy->port_reset_status == PORT_RESET_TMO) { 1299 pm8001_dev_gone_notify(dev); 1300 + PM8001_CHIP_DISP->hw_event_ack_req( 1301 + pm8001_ha, 0, 1302 + 0x07, /*HW_EVENT_PHY_DOWN ack*/ 1303 + port_id, phy_id, 0, 0); 1304 goto out; 1305 } 1306 }
+3
drivers/scsi/pm8001/pm8001_sas.h
··· 216 u32 state); 217 int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha); 218 int (*fatal_errors)(struct pm8001_hba_info *pm8001_ha); 219 }; 220 221 struct pm8001_chip_info {
··· 216 u32 state); 217 int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha); 218 int (*fatal_errors)(struct pm8001_hba_info *pm8001_ha); 219 + void (*hw_event_ack_req)(struct pm8001_hba_info *pm8001_ha, 220 + u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, 221 + u32 param1); 222 }; 223 224 struct pm8001_chip_info {
+5 -2
drivers/scsi/pm8001/pm80xx_hwi.c
··· 3712 break; 3713 case HW_EVENT_PORT_RESET_TIMER_TMO: 3714 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n"); 3715 - pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3716 - port_id, phy_id, 0, 0); 3717 sas_phy_disconnected(sas_phy); 3718 phy->phy_attached = 0; 3719 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, ··· 5057 .fw_flash_update_req = pm8001_chip_fw_flash_update_req, 5058 .set_dev_state_req = pm8001_chip_set_dev_state_req, 5059 .fatal_errors = pm80xx_fatal_errors, 5060 };
··· 3712 break; 3713 case HW_EVENT_PORT_RESET_TIMER_TMO: 3714 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n"); 3715 + if (!pm8001_ha->phy[phy_id].reset_completion) { 3716 + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3717 + port_id, phy_id, 0, 0); 3718 + } 3719 sas_phy_disconnected(sas_phy); 3720 phy->phy_attached = 0; 3721 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, ··· 5055 .fw_flash_update_req = pm8001_chip_fw_flash_update_req, 5056 .set_dev_state_req = pm8001_chip_set_dev_state_req, 5057 .fatal_errors = pm80xx_fatal_errors, 5058 + .hw_event_ack_req = pm80xx_hw_event_ack_req, 5059 };
+2
drivers/scsi/qedf/qedf_main.c
··· 1415 */ 1416 term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, 1417 &term_params_dma, GFP_KERNEL); 1418 1419 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection " 1420 "port_id=%06x.\n", fcport->rdata->ids.port_id);
··· 1415 */ 1416 term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, 1417 &term_params_dma, GFP_KERNEL); 1418 + if (!term_params) 1419 + return; 1420 1421 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection " 1422 "port_id=%06x.\n", fcport->rdata->ids.port_id);
+3 -5
drivers/scsi/scsi_lib.c
··· 2067 * @sdev: SCSI device to be queried 2068 * @pf: Page format bit (1 == standard, 0 == vendor specific) 2069 * @sp: Save page bit (0 == don't save, 1 == save) 2070 - * @modepage: mode page being requested 2071 * @buffer: request buffer (may not be smaller than eight bytes) 2072 * @len: length of request buffer. 2073 * @timeout: command timeout ··· 2079 * status on error 2080 * 2081 */ 2082 - int 2083 - scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 2084 - unsigned char *buffer, int len, int timeout, int retries, 2085 - struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 2086 { 2087 unsigned char cmd[10]; 2088 unsigned char *real_buffer;
··· 2067 * @sdev: SCSI device to be queried 2068 * @pf: Page format bit (1 == standard, 0 == vendor specific) 2069 * @sp: Save page bit (0 == don't save, 1 == save) 2070 * @buffer: request buffer (may not be smaller than eight bytes) 2071 * @len: length of request buffer. 2072 * @timeout: command timeout ··· 2080 * status on error 2081 * 2082 */ 2083 + int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, 2084 + unsigned char *buffer, int len, int timeout, int retries, 2085 + struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 2086 { 2087 unsigned char cmd[10]; 2088 unsigned char *real_buffer;
+1 -1
drivers/scsi/sd.c
··· 209 */ 210 data.device_specific = 0; 211 212 - if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, 213 sdkp->max_retries, &data, &sshdr)) { 214 if (scsi_sense_valid(&sshdr)) 215 sd_print_sense_hdr(sdkp, &sshdr);
··· 209 */ 210 data.device_specific = 0; 211 212 + if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT, 213 sdkp->max_retries, &data, &sshdr)) { 214 if (scsi_sense_valid(&sshdr)) 215 sd_print_sense_hdr(sdkp, &sshdr);
+1 -1
drivers/scsi/ufs/ufs-mediatek.c
··· 557 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 558 559 host->reg_va09 = regulator_get(hba->dev, "va09"); 560 - if (!host->reg_va09) 561 dev_info(hba->dev, "failed to get va09"); 562 else 563 host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
··· 557 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 558 559 host->reg_va09 = regulator_get(hba->dev, "va09"); 560 + if (IS_ERR(host->reg_va09)) 561 dev_info(hba->dev, "failed to get va09"); 562 else 563 host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
+1 -1
drivers/scsi/ufs/ufshcd.c
··· 7815 peer_pa_tactivate_us = peer_pa_tactivate * 7816 gran_to_us_table[peer_granularity - 1]; 7817 7818 - if (pa_tactivate_us > peer_pa_tactivate_us) { 7819 u32 new_peer_pa_tactivate; 7820 7821 new_peer_pa_tactivate = pa_tactivate_us /
··· 7815 peer_pa_tactivate_us = peer_pa_tactivate * 7816 gran_to_us_table[peer_granularity - 1]; 7817 7818 + if (pa_tactivate_us >= peer_pa_tactivate_us) { 7819 u32 new_peer_pa_tactivate; 7820 7821 new_peer_pa_tactivate = pa_tactivate_us /
+2 -3
include/scsi/scsi_device.h
··· 415 int retries, struct scsi_mode_data *data, 416 struct scsi_sense_hdr *); 417 extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, 418 - int modepage, unsigned char *buffer, int len, 419 - int timeout, int retries, 420 - struct scsi_mode_data *data, 421 struct scsi_sense_hdr *); 422 extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, 423 int retries, struct scsi_sense_hdr *sshdr);
··· 415 int retries, struct scsi_mode_data *data, 416 struct scsi_sense_hdr *); 417 extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, 418 + unsigned char *buffer, int len, int timeout, 419 + int retries, struct scsi_mode_data *data, 420 struct scsi_sense_hdr *); 421 extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, 422 int retries, struct scsi_sense_hdr *sshdr);