Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata

Pull libata updates from Tejun Heo:

- Hannes's patchset implements support for better error reporting
introduced by the new ATA command spec.

- the deperecated pci_ dma API usages have been replaced by dma_ ones.

- a bunch of hardware specific updates and some cleanups.

* 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata:
ata: remove deprecated use of pci api
ahci: st: st_configure_oob must be called after IP is clocked.
ahci: st: Update the ahci_st DT documentation
ahci: st: Update the DT example for how to obtain the PHY.
sata_dwc_460ex: indent an if statement
libata: Add tracepoints
libata-eh: Set 'information' field for autosense
libata: Implement support for sense data reporting
libata: Implement NCQ autosense
libata: use status bit definitions in ata_dump_status()
ide,ata: Rename ATA_IDX to ATA_SENSE
libata: whitespace fixes in ata_to_sense_error()
libata: whitespace cleanup in ata_get_cmd_descript()
libata: use READ_LOG_DMA_EXT
libata: remove ATA_FLAG_LOWTAG
sata_dwc_460ex: re-use hsdev->dev instead of dwc_dev
sata_dwc_460ex: move to generic DMA driver
sata_dwc_460ex: join messages back
sata: xgene: add ACPI support for APM X-Gene SATA ports
ata: sata_mv: add proper definitions for LP_PHY_CTL register values

+1096 -836
+33 -14
Documentation/devicetree/bindings/ata/ahci-st.txt
··· 3 3 This binding describes a SATA device. 4 4 5 5 Required properties: 6 - - compatible : Must be "st,sti-ahci" 6 + - compatible : Must be "st,ahci" 7 7 - reg : Physical base addresses and length of register sets 8 8 - interrupts : Interrupt associated with the SATA device 9 9 - interrupt-names : Associated name must be; "hostc" 10 - - resets : The power-down and soft-reset lines of SATA IP 11 - - reset-names : Associated names must be; "pwr-dwn" and "sw-rst" 12 10 - clocks : The phandle for the clock 13 11 - clock-names : Associated name must be; "ahci_clk" 14 - - phys : The phandle for the PHY device 12 + - phys : The phandle for the PHY port 15 13 - phy-names : Associated name must be; "ahci_phy" 14 + 15 + Optional properties: 16 + - resets : The power-down, soft-reset and power-reset lines of SATA IP 17 + - reset-names : Associated names must be; "pwr-dwn", "sw-rst" and "pwr-rst" 16 18 17 19 Example: 18 20 21 + /* Example for stih416 */ 19 22 sata0: sata@fe380000 { 20 - compatible = "st,sti-ahci"; 21 - reg = <0xfe380000 0x1000>; 22 - interrupts = <GIC_SPI 157 IRQ_TYPE_NONE>; 23 - interrupt-names = "hostc"; 24 - phys = <&miphy365x_phy MIPHY_PORT_0 MIPHY_TYPE_SATA>; 25 - phy-names = "ahci_phy"; 26 - resets = <&powerdown STIH416_SATA0_POWERDOWN>, 23 + compatible = "st,ahci"; 24 + reg = <0xfe380000 0x1000>; 25 + interrupts = <GIC_SPI 157 IRQ_TYPE_NONE>; 26 + interrupt-names = "hostc"; 27 + phys = <&phy_port0 PHY_TYPE_SATA>; 28 + phy-names = "ahci_phy"; 29 + resets = <&powerdown STIH416_SATA0_POWERDOWN>, 27 30 <&softreset STIH416_SATA0_SOFTRESET>; 28 - reset-names = "pwr-dwn", "sw-rst"; 29 - clocks = <&clk_s_a0_ls CLK_ICN_REG>; 30 - clock-names = "ahci_clk"; 31 + reset-names = "pwr-dwn", "sw-rst"; 32 + clocks = <&clk_s_a0_ls CLK_ICN_REG>; 33 + clock-names = "ahci_clk"; 34 + }; 35 + 36 + /* Example for stih407 family silicon */ 37 + sata0: sata@9b20000 { 38 + compatible = "st,ahci"; 39 + reg = <0x9b20000 0x1000>; 40 + interrupts = <GIC_SPI 159 IRQ_TYPE_NONE>; 41 + interrupt-names = "hostc"; 42 + phys = <&phy_port0 PHY_TYPE_SATA>; 43 + phy-names = "ahci_phy"; 44 + resets = <&powerdown STIH407_SATA0_POWERDOWN>, 45 + <&softreset STIH407_SATA0_SOFTRESET>, 46 + <&softreset STIH407_SATA0_PWR_SOFTRESET>; 47 + reset-names = "pwr-dwn", "sw-rst", "pwr-rst"; 48 + clocks = <&clk_s_c0_flexgen CLK_ICN_REG>; 49 + clock-names = "ahci_clk"; 31 50 };
+2 -1
drivers/ata/Makefile
··· 111 111 # Should be last libata driver 112 112 obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o 113 113 114 - libata-y := libata-core.o libata-scsi.o libata-eh.o libata-transport.o 114 + libata-y := libata-core.o libata-scsi.o libata-eh.o \ 115 + libata-transport.o libata-trace.o 115 116 libata-$(CONFIG_ATA_SFF) += libata-sff.o 116 117 libata-$(CONFIG_SATA_PMP) += libata-pmp.o 117 118 libata-$(CONFIG_ATA_ACPI) += libata-acpi.o
+5 -5
drivers/ata/acard-ahci.c
··· 181 181 int rc; 182 182 183 183 if (using_dac && 184 - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 185 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 184 + !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 185 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 186 186 if (rc) { 187 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 187 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 188 188 if (rc) { 189 189 dev_err(&pdev->dev, 190 190 "64-bit DMA enable failed\n"); ··· 192 192 } 193 193 } 194 194 } else { 195 - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 195 + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 196 196 if (rc) { 197 197 dev_err(&pdev->dev, "32-bit DMA enable failed\n"); 198 198 return rc; 199 199 } 200 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 200 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 201 201 if (rc) { 202 202 dev_err(&pdev->dev, 203 203 "32-bit consistent DMA enable failed\n");
+5 -5
drivers/ata/ahci.c
··· 738 738 return 0; 739 739 740 740 if (using_dac && 741 - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 742 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 741 + !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 742 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 743 743 if (rc) { 744 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 744 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 745 745 if (rc) { 746 746 dev_err(&pdev->dev, 747 747 "64-bit DMA enable failed\n"); ··· 749 749 } 750 750 } 751 751 } else { 752 - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 752 + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 753 753 if (rc) { 754 754 dev_err(&pdev->dev, "32-bit DMA enable failed\n"); 755 755 return rc; 756 756 } 757 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 757 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 758 758 if (rc) { 759 759 dev_err(&pdev->dev, 760 760 "32-bit consistent DMA enable failed\n");
+4 -2
drivers/ata/ahci_st.c
··· 68 68 } 69 69 } 70 70 71 - st_ahci_configure_oob(drv_data->hpriv->mmio); 72 - 73 71 if (drv_data->sw_rst) { 74 72 err = reset_control_deassert(drv_data->sw_rst); 75 73 if (err) { ··· 170 172 if (err) 171 173 return err; 172 174 175 + st_ahci_configure_oob(drv_data->hpriv->mmio); 176 + 173 177 err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, 174 178 &ahci_platform_sht); 175 179 if (err) { ··· 221 221 ahci_platform_disable_resources(hpriv); 222 222 return err; 223 223 } 224 + 225 + st_ahci_configure_oob(drv_data->hpriv->mmio); 224 226 225 227 return ahci_platform_resume_host(dev); 226 228 }
+10
drivers/ata/ahci_xgene.c
··· 22 22 * NOTE: PM support is not currently available. 23 23 * 24 24 */ 25 + #include <linux/acpi.h> 25 26 #include <linux/module.h> 26 27 #include <linux/platform_device.h> 27 28 #include <linux/ahci_platform.h> ··· 719 718 return rc; 720 719 } 721 720 721 + #ifdef CONFIG_ACPI 722 + static const struct acpi_device_id xgene_ahci_acpi_match[] = { 723 + { "APMC0D0D", }, 724 + { } 725 + }; 726 + MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match); 727 + #endif 728 + 722 729 static const struct of_device_id xgene_ahci_of_match[] = { 723 730 {.compatible = "apm,xgene-ahci"}, 724 731 {}, ··· 739 730 .driver = { 740 731 .name = DRV_NAME, 741 732 .of_match_table = xgene_ahci_of_match, 733 + .acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match), 742 734 }, 743 735 }; 744 736
+28 -4
drivers/ata/libata-core.c
··· 70 70 #include <linux/pm_runtime.h> 71 71 #include <linux/platform_device.h> 72 72 73 + #define CREATE_TRACE_POINTS 74 + #include <trace/events/libata.h> 75 + 73 76 #include "libata.h" 74 77 #include "libata-transport.h" 75 78 ··· 694 691 * RETURNS: 695 692 * Block address read from @tf. 696 693 */ 697 - u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) 694 + u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev) 698 695 { 699 696 u64 block = 0; 700 697 701 - if (tf->flags & ATA_TFLAG_LBA) { 698 + if (!dev || tf->flags & ATA_TFLAG_LBA) { 702 699 if (tf->flags & ATA_TFLAG_LBA48) { 703 700 block |= (u64)tf->hob_lbah << 40; 704 701 block |= (u64)tf->hob_lbam << 32; ··· 2147 2144 return 0; 2148 2145 } 2149 2146 2147 + static void ata_dev_config_sense_reporting(struct ata_device *dev) 2148 + { 2149 + unsigned int err_mask; 2150 + 2151 + if (!ata_id_has_sense_reporting(dev->id)) 2152 + return; 2153 + 2154 + if (ata_id_sense_reporting_enabled(dev->id)) 2155 + return; 2156 + 2157 + err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1); 2158 + if (err_mask) { 2159 + ata_dev_dbg(dev, 2160 + "failed to enable Sense Data Reporting, Emask 0x%x\n", 2161 + err_mask); 2162 + } 2163 + } 2164 + 2150 2165 /** 2151 2166 * ata_dev_configure - Configure the specified ATA/ATAPI device 2152 2167 * @dev: Target device to configure ··· 2387 2366 dev->devslp_timing[i] = sata_setting[j]; 2388 2367 } 2389 2368 } 2390 - 2369 + ata_dev_config_sense_reporting(dev); 2391 2370 dev->cdb_len = 16; 2392 2371 } 2393 2372 ··· 4918 4897 */ 4919 4898 if (unlikely(ata_tag_internal(qc->tag))) { 4920 4899 fill_result_tf(qc); 4900 + trace_ata_qc_complete_internal(qc); 4921 4901 __ata_qc_complete(qc); 4922 4902 return; 4923 4903 } ··· 4929 4907 */ 4930 4908 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4931 4909 fill_result_tf(qc); 4910 + trace_ata_qc_complete_failed(qc); 4932 4911 ata_qc_schedule_eh(qc); 4933 4912 return; 4934 4913 } ··· 4940 4917 if (qc->flags & ATA_QCFLAG_RESULT_TF) 4941 4918 fill_result_tf(qc); 4942 4919 4920 + trace_ata_qc_complete_done(qc); 4943 4921 /* Some commands need post-processing after successful 4944 4922 * completion. 4945 4923 */ ··· 5088 5064 } 5089 5065 5090 5066 ap->ops->qc_prep(qc); 5091 - 5067 + trace_ata_qc_issue(qc); 5092 5068 qc->err_mask |= ap->ops->qc_issue(qc); 5093 5069 if (unlikely(qc->err_mask)) 5094 5070 goto err;
+138 -30
drivers/ata/libata-eh.c
··· 46 46 47 47 #include <linux/libata.h> 48 48 49 + #include <trace/events/libata.h> 49 50 #include "libata.h" 50 51 51 52 enum { ··· 1511 1510 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); 1512 1511 1513 1512 ata_tf_init(dev, &tf); 1514 - tf.command = ATA_CMD_READ_LOG_EXT; 1513 + if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id)) { 1514 + tf.command = ATA_CMD_READ_LOG_DMA_EXT; 1515 + tf.protocol = ATA_PROT_DMA; 1516 + } else { 1517 + tf.command = ATA_CMD_READ_LOG_EXT; 1518 + tf.protocol = ATA_PROT_PIO; 1519 + } 1515 1520 tf.lbal = log; 1516 1521 tf.lbam = page; 1517 1522 tf.nsect = sectors; 1518 1523 tf.hob_nsect = sectors >> 8; 1519 1524 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 1520 - tf.protocol = ATA_PROT_PIO; 1521 1525 1522 1526 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 1523 1527 buf, sectors * ATA_SECT_SIZE, 0); ··· 1581 1575 tf->hob_lbah = buf[10]; 1582 1576 tf->nsect = buf[12]; 1583 1577 tf->hob_nsect = buf[13]; 1578 + if (ata_id_has_ncq_autosense(dev->id)) 1579 + tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16]; 1584 1580 1585 1581 return 0; 1586 1582 } ··· 1615 1607 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 1616 1608 if (err_mask == AC_ERR_DEV) 1617 1609 *r_sense_key = tf.feature >> 4; 1610 + return err_mask; 1611 + } 1612 + 1613 + /** 1614 + * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT 1615 + * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to 1616 + * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 1617 + * @dfl_sense_key: default sense key to use 1618 + * 1619 + * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK 1620 + * SENSE. This function is EH helper. 1621 + * 1622 + * LOCKING: 1623 + * Kernel thread context (may sleep). 1624 + * 1625 + * RETURNS: 1626 + * encoded sense data on success, 0 on failure or if sense data 1627 + * is not available. 1628 + */ 1629 + static u32 ata_eh_request_sense(struct ata_queued_cmd *qc, 1630 + struct scsi_cmnd *cmd) 1631 + { 1632 + struct ata_device *dev = qc->dev; 1633 + struct ata_taskfile tf; 1634 + unsigned int err_mask; 1635 + 1636 + if (!cmd) 1637 + return 0; 1638 + 1639 + DPRINTK("ATA request sense\n"); 1640 + ata_dev_warn(dev, "request sense\n"); 1641 + if (!ata_id_sense_reporting_enabled(dev->id)) { 1642 + ata_dev_warn(qc->dev, "sense data reporting disabled\n"); 1643 + return 0; 1644 + } 1645 + ata_tf_init(dev, &tf); 1646 + 1647 + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1648 + tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1649 + tf.command = ATA_CMD_REQ_SENSE_DATA; 1650 + tf.protocol = ATA_PROT_NODATA; 1651 + 1652 + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1653 + /* 1654 + * ACS-4 states: 1655 + * The device may set the SENSE DATA AVAILABLE bit to one in the 1656 + * STATUS field and clear the ERROR bit to zero in the STATUS field 1657 + * to indicate that the command returned completion without an error 1658 + * and the sense data described in table 306 is available. 1659 + * 1660 + * IOW the 'ATA_SENSE' bit might not be set even though valid 1661 + * sense data is available. 1662 + * So check for both. 1663 + */ 1664 + if ((tf.command & ATA_SENSE) || 1665 + tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) { 1666 + ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal); 1667 + qc->flags |= ATA_QCFLAG_SENSE_VALID; 1668 + ata_dev_warn(dev, "sense data %02x/%02x/%02x\n", 1669 + tf.lbah, tf.lbam, tf.lbal); 1670 + } else { 1671 + ata_dev_warn(dev, "request sense failed stat %02x emask %x\n", 1672 + tf.command, err_mask); 1673 + } 1618 1674 return err_mask; 1619 1675 } 1620 1676 ··· 1844 1772 memcpy(&qc->result_tf, &tf, sizeof(tf)); 1845 1773 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1846 1774 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1775 + if (qc->result_tf.auxiliary) { 1776 + char sense_key, asc, ascq; 1777 + 1778 + sense_key = (qc->result_tf.auxiliary >> 16) & 0xff; 1779 + asc = (qc->result_tf.auxiliary >> 8) & 0xff; 1780 + ascq = qc->result_tf.auxiliary & 0xff; 1781 + ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n", 1782 + sense_key, asc, ascq); 1783 + ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq); 1784 + ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf); 1785 + qc->flags |= ATA_QCFLAG_SENSE_VALID; 1786 + } 1787 + 1847 1788 ehc->i.err_mask &= ~AC_ERR_DEV; 1848 1789 } 1849 1790 ··· 1885 1800 qc->err_mask |= AC_ERR_HSM; 1886 1801 return ATA_EH_RESET; 1887 1802 } 1803 + 1804 + /* 1805 + * Sense data reporting does not work if the 1806 + * device fault bit is set. 1807 + */ 1808 + if ((stat & ATA_SENSE) && !(stat & ATA_DF) && 1809 + !(qc->flags & ATA_QCFLAG_SENSE_VALID)) { 1810 + if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 1811 + tmp = ata_eh_request_sense(qc, qc->scsicmd); 1812 + if (tmp) 1813 + qc->err_mask |= tmp; 1814 + else 1815 + ata_scsi_set_sense_information(qc->scsicmd, tf); 1816 + } else { 1817 + ata_dev_warn(qc->dev, "sense data available but port frozen\n"); 1818 + } 1819 + } 1820 + 1821 + /* Set by NCQ autosense or request sense above */ 1822 + if (qc->flags & ATA_QCFLAG_SENSE_VALID) 1823 + return 0; 1888 1824 1889 1825 if (stat & (ATA_ERR | ATA_DF)) 1890 1826 qc->err_mask |= AC_ERR_DEV; ··· 2292 2186 all_err_mask |= qc->err_mask; 2293 2187 if (qc->flags & ATA_QCFLAG_IO) 2294 2188 eflags |= ATA_EFLAG_IS_IO; 2189 + trace_ata_eh_link_autopsy_qc(qc); 2295 2190 } 2296 2191 2297 2192 /* enforce default EH actions */ ··· 2327 2220 eflags |= ATA_EFLAG_DUBIOUS_XFER; 2328 2221 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 2329 2222 } 2330 - 2223 + trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); 2331 2224 DPRINTK("EXIT\n"); 2332 2225 } 2333 2226 ··· 2396 2289 const char *text; 2397 2290 } cmd_descr[] = { 2398 2291 { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 2399 - { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 2400 - { ATA_CMD_STANDBY, "STANDBY" }, 2401 - { ATA_CMD_IDLE, "IDLE" }, 2402 - { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 2403 - { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 2292 + { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 2293 + { ATA_CMD_STANDBY, "STANDBY" }, 2294 + { ATA_CMD_IDLE, "IDLE" }, 2295 + { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 2296 + { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 2404 2297 { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" }, 2405 2298 { ATA_CMD_NOP, "NOP" }, 2406 - { ATA_CMD_FLUSH, "FLUSH CACHE" }, 2407 - { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 2408 - { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 2409 - { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 2410 - { ATA_CMD_SERVICE, "SERVICE" }, 2411 - { ATA_CMD_READ, "READ DMA" }, 2412 - { ATA_CMD_READ_EXT, "READ DMA EXT" }, 2413 - { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 2414 - { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 2299 + { ATA_CMD_FLUSH, "FLUSH CACHE" }, 2300 + { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 2301 + { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 2302 + { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 2303 + { ATA_CMD_SERVICE, "SERVICE" }, 2304 + { ATA_CMD_READ, "READ DMA" }, 2305 + { ATA_CMD_READ_EXT, "READ DMA EXT" }, 2306 + { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 2307 + { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 2415 2308 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 2416 - { ATA_CMD_WRITE, "WRITE DMA" }, 2417 - { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 2418 - { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 2419 - { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 2309 + { ATA_CMD_WRITE, "WRITE DMA" }, 2310 + { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 2311 + { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 2312 + { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 2420 2313 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 2421 2314 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 2422 2315 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, ··· 2432 2325 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 2433 2326 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 2434 2327 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 2435 - { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 2328 + { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 2436 2329 { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 2437 2330 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 2438 2331 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, ··· 2449 2342 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 2450 2343 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 2451 2344 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 2452 - { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 2345 + { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 2453 2346 { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, 2454 2347 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 2455 - { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 2348 + { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 2456 2349 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 2457 - { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 2350 + { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 2458 2351 { ATA_CMD_PMP_READ, "READ BUFFER" }, 2459 2352 { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" }, 2460 2353 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, ··· 2471 2364 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 2472 2365 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2473 2366 { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, 2474 - { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 2475 - { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 2367 + { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 2368 + { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 2476 2369 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 2477 2370 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 2478 2371 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 2479 - { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 2372 + { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 2480 2373 { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" }, 2481 2374 { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" }, 2482 2375 { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, ··· 2650 2543 2651 2544 #ifdef CONFIG_ATA_VERBOSE_ERROR 2652 2545 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 2653 - ATA_ERR)) { 2546 + ATA_SENSE | ATA_ERR)) { 2654 2547 if (res->command & ATA_BUSY) 2655 2548 ata_dev_err(qc->dev, "status: { Busy }\n"); 2656 2549 else 2657 - ata_dev_err(qc->dev, "status: { %s%s%s%s}\n", 2550 + ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", 2658 2551 res->command & ATA_DRDY ? "DRDY " : "", 2659 2552 res->command & ATA_DF ? "DF " : "", 2660 2553 res->command & ATA_DRQ ? "DRQ " : "", 2554 + res->command & ATA_SENSE ? "SENSE " : "", 2661 2555 res->command & ATA_ERR ? "ERR " : ""); 2662 2556 } 2663 2557
+75 -41
drivers/ata/libata-scsi.c
··· 270 270 ata_scsi_park_show, ata_scsi_park_store); 271 271 EXPORT_SYMBOL_GPL(dev_attr_unload_heads); 272 272 273 - static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) 273 + void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) 274 274 { 275 + if (!cmd) 276 + return; 277 + 275 278 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 276 279 277 280 scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq); 281 + } 282 + 283 + void ata_scsi_set_sense_information(struct scsi_cmnd *cmd, 284 + const struct ata_taskfile *tf) 285 + { 286 + u64 information; 287 + 288 + if (!cmd) 289 + return; 290 + 291 + information = ata_tf_read_block(tf, NULL); 292 + scsi_set_sense_information(cmd->sense_buffer, information); 278 293 } 279 294 280 295 static ssize_t ··· 814 799 if (stat & ATA_BUSY) { 815 800 printk("Busy }\n"); /* Data is not valid in this case */ 816 801 } else { 817 - if (stat & 0x40) printk("DriveReady "); 818 - if (stat & 0x20) printk("DeviceFault "); 819 - if (stat & 0x10) printk("SeekComplete "); 820 - if (stat & 0x08) printk("DataRequest "); 821 - if (stat & 0x04) printk("CorrectedError "); 822 - if (stat & 0x02) printk("Index "); 823 - if (stat & 0x01) printk("Error "); 802 + if (stat & ATA_DRDY) printk("DriveReady "); 803 + if (stat & ATA_DF) printk("DeviceFault "); 804 + if (stat & ATA_DSC) printk("SeekComplete "); 805 + if (stat & ATA_DRQ) printk("DataRequest "); 806 + if (stat & ATA_CORR) printk("CorrectedError "); 807 + if (stat & ATA_SENSE) printk("Sense "); 808 + if (stat & ATA_ERR) printk("Error "); 824 809 printk("}\n"); 825 810 826 811 if (err) { 827 812 printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err); 828 - if (err & 0x04) printk("DriveStatusError "); 829 - if (err & 0x80) { 830 - if (err & 0x04) printk("BadCRC "); 813 + if (err & ATA_ABORTED) printk("DriveStatusError "); 814 + if (err & ATA_ICRC) { 815 + if (err & ATA_ABORTED) 816 + printk("BadCRC "); 831 817 else printk("Sector "); 832 818 } 833 - if (err & 0x40) printk("UncorrectableError "); 834 - if (err & 0x10) printk("SectorIdNotFound "); 835 - if (err & 0x02) printk("TrackZeroNotFound "); 836 - if (err & 0x01) printk("AddrMarkNotFound "); 819 + if (err & ATA_UNC) printk("UncorrectableError "); 820 + if (err & ATA_IDNF) printk("SectorIdNotFound "); 821 + if (err & ATA_TRK0NF) printk("TrackZeroNotFound "); 822 + if (err & ATA_AMNF) printk("AddrMarkNotFound "); 837 823 printk("}\n"); 838 824 } 839 825 } ··· 865 849 /* Based on the 3ware driver translation table */ 866 850 static const unsigned char sense_table[][4] = { 867 851 /* BBD|ECC|ID|MAR */ 868 - {0xd1, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command 852 + {0xd1, ABORTED_COMMAND, 0x00, 0x00}, 853 + // Device busy Aborted command 869 854 /* BBD|ECC|ID */ 870 - {0xd0, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command 855 + {0xd0, ABORTED_COMMAND, 0x00, 0x00}, 856 + // Device busy Aborted command 871 857 /* ECC|MC|MARK */ 872 - {0x61, HARDWARE_ERROR, 0x00, 0x00}, // Device fault Hardware error 858 + {0x61, HARDWARE_ERROR, 0x00, 0x00}, 859 + // Device fault Hardware error 873 860 /* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */ 874 - {0x84, ABORTED_COMMAND, 0x47, 0x00}, // Data CRC error SCSI parity error 861 + {0x84, ABORTED_COMMAND, 0x47, 0x00}, 862 + // Data CRC error SCSI parity error 875 863 /* MC|ID|ABRT|TRK0|MARK */ 876 - {0x37, NOT_READY, 0x04, 0x00}, // Unit offline Not ready 864 + {0x37, NOT_READY, 0x04, 0x00}, 865 + // Unit offline Not ready 877 866 /* MCR|MARK */ 878 - {0x09, NOT_READY, 0x04, 0x00}, // Unrecovered disk error Not ready 867 + {0x09, NOT_READY, 0x04, 0x00}, 868 + // Unrecovered disk error Not ready 879 869 /* Bad address mark */ 880 - {0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field 881 - /* TRK0 */ 882 - {0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error 870 + {0x01, MEDIUM_ERROR, 0x13, 0x00}, 871 + // Address mark not found for data field 872 + /* TRK0 - Track 0 not found */ 873 + {0x02, HARDWARE_ERROR, 0x00, 0x00}, 874 + // Hardware error 883 875 /* Abort: 0x04 is not translated here, see below */ 884 876 /* Media change request */ 885 - {0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline 886 - /* SRV/IDNF */ 887 - {0x10, ILLEGAL_REQUEST, 0x21, 0x00}, // ID not found Logical address out of range 888 - /* MC */ 889 - {0x20, UNIT_ATTENTION, 0x28, 0x00}, // Media Changed Not ready to ready change, medium may have changed 890 - /* ECC */ 891 - {0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error 877 + {0x08, NOT_READY, 0x04, 0x00}, 878 + // FIXME: faking offline 879 + /* SRV/IDNF - ID not found */ 880 + {0x10, ILLEGAL_REQUEST, 0x21, 0x00}, 881 + // Logical address out of range 882 + /* MC - Media Changed */ 883 + {0x20, UNIT_ATTENTION, 0x28, 0x00}, 884 + // Not ready to ready change, medium may have changed 885 + /* ECC - Uncorrectable ECC error */ 886 + {0x40, MEDIUM_ERROR, 0x11, 0x04}, 887 + // Unrecovered read error 892 888 /* BBD - block marked bad */ 893 - {0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error 889 + {0x80, MEDIUM_ERROR, 0x11, 0x04}, 890 + // Block marked bad Medium error, unrecovered read error 894 891 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark 895 892 }; 896 893 static const unsigned char stat_table[][4] = { 897 894 /* Must be first because BUSY means no other bits valid */ 898 - {0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now 899 - {0x20, HARDWARE_ERROR, 0x44, 0x00}, // Device fault, internal target failure 900 - {0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now 901 - {0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered 895 + {0x80, ABORTED_COMMAND, 0x47, 0x00}, 896 + // Busy, fake parity for now 897 + {0x40, ILLEGAL_REQUEST, 0x21, 0x04}, 898 + // Device ready, unaligned write command 899 + {0x20, HARDWARE_ERROR, 0x44, 0x00}, 900 + // Device fault, internal target failure 901 + {0x08, ABORTED_COMMAND, 0x47, 0x00}, 902 + // Timed out in xfer, fake parity for now 903 + {0x04, RECOVERED_ERROR, 0x11, 0x00}, 904 + // Recovered ECC error Medium error, recovered 902 905 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark 903 906 }; 904 907 ··· 1792 1757 ((cdb[2] & 0x20) || need_sense)) { 1793 1758 ata_gen_passthru_sense(qc); 1794 1759 } else { 1795 - if (!need_sense) { 1760 + if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 1761 + cmd->result = SAM_STAT_CHECK_CONDITION; 1762 + } else if (!need_sense) { 1796 1763 cmd->result = SAM_STAT_GOOD; 1797 1764 } else { 1798 1765 /* TODO: decide which descriptor format to use ··· 4277 4240 unsigned int i, tag; 4278 4241 4279 4242 for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) { 4280 - if (ap->flags & ATA_FLAG_LOWTAG) 4281 - tag = 1; 4282 - else 4283 - tag = tag < max_queue ? tag : 0; 4243 + tag = tag < max_queue ? tag : 0; 4284 4244 4285 4245 /* the last tag is reserved for internal command. */ 4286 4246 if (tag == ATA_TAG_INTERNAL)
+2 -2
drivers/ata/libata-sff.c
··· 3220 3220 * ->sff_irq_clear method. Try to initialize bmdma_addr 3221 3221 * regardless of dma masks. 3222 3222 */ 3223 - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 3223 + rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); 3224 3224 if (rc) 3225 3225 ata_bmdma_nodma(host, "failed to set dma mask"); 3226 3226 if (!rc) { 3227 - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 3227 + rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); 3228 3228 if (rc) 3229 3229 ata_bmdma_nodma(host, 3230 3230 "failed to set consistent dma mask");
+151
drivers/ata/libata-trace.c
··· 1 + /* 2 + * libata-trace.c - trace functions for libata 3 + * 4 + * Copyright 2015 Hannes Reinecke 5 + * Copyright 2015 SUSE Linux GmbH 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2, or (at your option) 10 + * any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program; see the file COPYING. If not, write to 19 + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 20 + */ 21 + 22 + #include <linux/kernel.h> 23 + #include <linux/trace_seq.h> 24 + #include <trace/events/libata.h> 25 + 26 + const char * 27 + libata_trace_parse_status(struct trace_seq *p, unsigned char status) 28 + { 29 + const char *ret = trace_seq_buffer_ptr(p); 30 + 31 + trace_seq_printf(p, "{ "); 32 + if (status & ATA_BUSY) 33 + trace_seq_printf(p, "BUSY "); 34 + if (status & ATA_DRDY) 35 + trace_seq_printf(p, "DRDY "); 36 + if (status & ATA_DF) 37 + trace_seq_printf(p, "DF "); 38 + if (status & ATA_DSC) 39 + trace_seq_printf(p, "DSC "); 40 + if (status & ATA_DRQ) 41 + trace_seq_printf(p, "DRQ "); 42 + if (status & ATA_CORR) 43 + trace_seq_printf(p, "CORR "); 44 + if (status & ATA_SENSE) 45 + trace_seq_printf(p, "SENSE "); 46 + if (status & ATA_ERR) 47 + trace_seq_printf(p, "ERR "); 48 + trace_seq_putc(p, '}'); 49 + trace_seq_putc(p, 0); 50 + 51 + return ret; 52 + } 53 + 54 + const char * 55 + libata_trace_parse_eh_action(struct trace_seq *p, unsigned int eh_action) 56 + { 57 + const char *ret = trace_seq_buffer_ptr(p); 58 + 59 + trace_seq_printf(p, "%x", eh_action); 60 + if (eh_action) { 61 + trace_seq_printf(p, "{ "); 62 + if (eh_action & ATA_EH_REVALIDATE) 63 + trace_seq_printf(p, "REVALIDATE "); 64 + if (eh_action & (ATA_EH_SOFTRESET | ATA_EH_HARDRESET)) 65 + trace_seq_printf(p, "RESET "); 66 + else if (eh_action & ATA_EH_SOFTRESET) 67 + trace_seq_printf(p, "SOFTRESET "); 68 + else if (eh_action & ATA_EH_HARDRESET) 69 + trace_seq_printf(p, "HARDRESET "); 70 + if (eh_action & ATA_EH_ENABLE_LINK) 71 + trace_seq_printf(p, "ENABLE_LINK "); 72 + if (eh_action & ATA_EH_PARK) 73 + trace_seq_printf(p, "PARK "); 74 + trace_seq_putc(p, '}'); 75 + } 76 + trace_seq_putc(p, 0); 77 + 78 + return ret; 79 + } 80 + 81 + const char * 82 + libata_trace_parse_eh_err_mask(struct trace_seq *p, unsigned int eh_err_mask) 83 + { 84 + const char *ret = trace_seq_buffer_ptr(p); 85 + 86 + trace_seq_printf(p, "%x", eh_err_mask); 87 + if (eh_err_mask) { 88 + trace_seq_printf(p, "{ "); 89 + if (eh_err_mask & AC_ERR_DEV) 90 + trace_seq_printf(p, "DEV "); 91 + if (eh_err_mask & AC_ERR_HSM) 92 + trace_seq_printf(p, "HSM "); 93 + if (eh_err_mask & AC_ERR_TIMEOUT) 94 + trace_seq_printf(p, "TIMEOUT "); 95 + if (eh_err_mask & AC_ERR_MEDIA) 96 + trace_seq_printf(p, "MEDIA "); 97 + if (eh_err_mask & AC_ERR_ATA_BUS) 98 + trace_seq_printf(p, "ATA_BUS "); 99 + if (eh_err_mask & AC_ERR_HOST_BUS) 100 + trace_seq_printf(p, "HOST_BUS "); 101 + if (eh_err_mask & AC_ERR_SYSTEM) 102 + trace_seq_printf(p, "SYSTEM "); 103 + if (eh_err_mask & AC_ERR_INVALID) 104 + trace_seq_printf(p, "INVALID "); 105 + if (eh_err_mask & AC_ERR_OTHER) 106 + trace_seq_printf(p, "OTHER "); 107 + if (eh_err_mask & AC_ERR_NODEV_HINT) 108 + trace_seq_printf(p, "NODEV_HINT "); 109 + if (eh_err_mask & AC_ERR_NCQ) 110 + trace_seq_printf(p, "NCQ "); 111 + trace_seq_putc(p, '}'); 112 + } 113 + trace_seq_putc(p, 0); 114 + 115 + return ret; 116 + } 117 + 118 + const char * 119 + libata_trace_parse_qc_flags(struct trace_seq *p, unsigned int qc_flags) 120 + { 121 + const char *ret = trace_seq_buffer_ptr(p); 122 + 123 + trace_seq_printf(p, "%x", qc_flags); 124 + if (qc_flags) { 125 + trace_seq_printf(p, "{ "); 126 + if (qc_flags & ATA_QCFLAG_ACTIVE) 127 + trace_seq_printf(p, "ACTIVE "); 128 + if (qc_flags & ATA_QCFLAG_DMAMAP) 129 + trace_seq_printf(p, "DMAMAP "); 130 + if (qc_flags & ATA_QCFLAG_IO) 131 + trace_seq_printf(p, "IO "); 132 + if (qc_flags & ATA_QCFLAG_RESULT_TF) 133 + trace_seq_printf(p, "RESULT_TF "); 134 + if (qc_flags & ATA_QCFLAG_CLEAR_EXCL) 135 + trace_seq_printf(p, "CLEAR_EXCL "); 136 + if (qc_flags & ATA_QCFLAG_QUIET) 137 + trace_seq_printf(p, "QUIET "); 138 + if (qc_flags & ATA_QCFLAG_RETRY) 139 + trace_seq_printf(p, "RETRY "); 140 + if (qc_flags & ATA_QCFLAG_FAILED) 141 + trace_seq_printf(p, "FAILED "); 142 + if (qc_flags & ATA_QCFLAG_SENSE_VALID) 143 + trace_seq_printf(p, "SENSE_VALID "); 144 + if (qc_flags & ATA_QCFLAG_EH_SCHEDULED) 145 + trace_seq_printf(p, "EH_SCHEDULED "); 146 + trace_seq_putc(p, '}'); 147 + } 148 + trace_seq_putc(p, 0); 149 + 150 + return ret; 151 + }
+5 -1
drivers/ata/libata.h
··· 67 67 extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 68 68 u64 block, u32 n_block, unsigned int tf_flags, 69 69 unsigned int tag); 70 - extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev); 70 + extern u64 ata_tf_read_block(const struct ata_taskfile *tf, 71 + struct ata_device *dev); 71 72 extern unsigned ata_exec_internal(struct ata_device *dev, 72 73 struct ata_taskfile *tf, const u8 *cdb, 73 74 int dma_dir, void *buf, unsigned int buflen, ··· 138 137 struct scsi_host_template *sht); 139 138 extern void ata_scsi_scan_host(struct ata_port *ap, int sync); 140 139 extern int ata_scsi_offline_dev(struct ata_device *dev); 140 + extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq); 141 + extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd, 142 + const struct ata_taskfile *tf); 141 143 extern void ata_scsi_media_change_notify(struct ata_device *dev); 142 144 extern void ata_scsi_hotplug(struct work_struct *work); 143 145 extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
+2 -2
drivers/ata/pata_atp867x.c
··· 475 475 476 476 atp867x_fixup(host); 477 477 478 - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 478 + rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); 479 479 if (rc) 480 480 return rc; 481 481 482 - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 482 + rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); 483 483 return rc; 484 484 } 485 485
+2 -2
drivers/ata/pata_cs5520.c
··· 164 164 return -ENODEV; 165 165 } 166 166 167 - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 167 + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { 168 168 printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n"); 169 169 return -ENODEV; 170 170 } 171 - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 171 + if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { 172 172 printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n"); 173 173 return -ENODEV; 174 174 }
+2 -2
drivers/ata/pata_hpt3x3.c
··· 221 221 if (rc) 222 222 return rc; 223 223 host->iomap = pcim_iomap_table(pdev); 224 - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 224 + rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); 225 225 if (rc) 226 226 return rc; 227 - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 227 + rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); 228 228 if (rc) 229 229 return rc; 230 230
+2 -2
drivers/ata/pata_ninja32.c
··· 122 122 return rc; 123 123 124 124 host->iomap = pcim_iomap_table(dev); 125 - rc = pci_set_dma_mask(dev, ATA_DMA_MASK); 125 + rc = dma_set_mask(&dev->dev, ATA_DMA_MASK); 126 126 if (rc) 127 127 return rc; 128 - rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK); 128 + rc = dma_set_coherent_mask(&dev->dev, ATA_DMA_MASK); 129 129 if (rc) 130 130 return rc; 131 131 pci_set_master(dev);
+2 -2
drivers/ata/pata_pdc2027x.c
··· 730 730 return rc; 731 731 host->iomap = pcim_iomap_table(pdev); 732 732 733 - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 733 + rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); 734 734 if (rc) 735 735 return rc; 736 736 737 - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 737 + rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); 738 738 if (rc) 739 739 return rc; 740 740
+2 -2
drivers/ata/pata_scc.c
··· 1029 1029 if (rc) 1030 1030 return rc; 1031 1031 1032 - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 1032 + rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); 1033 1033 if (rc) 1034 1034 return rc; 1035 - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 1035 + rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); 1036 1036 if (rc) 1037 1037 return rc; 1038 1038
+2 -2
drivers/ata/pata_sil680.c
··· 374 374 host->iomap = pcim_iomap_table(pdev); 375 375 376 376 /* Setup DMA masks */ 377 - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 377 + rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); 378 378 if (rc) 379 379 return rc; 380 - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 380 + rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); 381 381 if (rc) 382 382 return rc; 383 383 pci_set_master(pdev);
+2 -2
drivers/ata/pdc_adma.c
··· 593 593 { 594 594 int rc; 595 595 596 - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 596 + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 597 597 if (rc) { 598 598 dev_err(&pdev->dev, "32-bit DMA enable failed\n"); 599 599 return rc; 600 600 } 601 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 601 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 602 602 if (rc) { 603 603 dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); 604 604 return rc;
+181 -669
drivers/ata/sata_dwc_460ex.c
··· 36 36 #include <linux/platform_device.h> 37 37 #include <linux/libata.h> 38 38 #include <linux/slab.h> 39 + 39 40 #include "libata.h" 40 41 41 42 #include <scsi/scsi_host.h> 42 43 #include <scsi/scsi_cmnd.h> 44 + 45 + /* Supported DMA engine drivers */ 46 + #include <linux/platform_data/dma-dw.h> 47 + #include <linux/dma/dw.h> 43 48 44 49 /* These two are defined in "libata.h" */ 45 50 #undef DRV_NAME ··· 65 60 #define NO_IRQ 0 66 61 #endif 67 62 68 - /* SATA DMA driver Globals */ 69 - #define DMA_NUM_CHANS 1 70 - #define DMA_NUM_CHAN_REGS 8 71 - 72 - /* SATA DMA Register definitions */ 73 63 #define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/ 74 64 75 - struct dmareg { 76 - u32 low; /* Low bits 0-31 */ 77 - u32 high; /* High bits 32-63 */ 78 - }; 79 - 80 - /* DMA Per Channel registers */ 81 - struct dma_chan_regs { 82 - struct dmareg sar; /* Source Address */ 83 - struct dmareg dar; /* Destination address */ 84 - struct dmareg llp; /* Linked List Pointer */ 85 - struct dmareg ctl; /* Control */ 86 - struct dmareg sstat; /* Source Status not implemented in core */ 87 - struct dmareg dstat; /* Destination Status not implemented in core*/ 88 - struct dmareg sstatar; /* Source Status Address not impl in core */ 89 - struct dmareg dstatar; /* Destination Status Address not implemente */ 90 - struct dmareg cfg; /* Config */ 91 - struct dmareg sgr; /* Source Gather */ 92 - struct dmareg dsr; /* Destination Scatter */ 93 - }; 94 - 95 - /* Generic Interrupt Registers */ 96 - struct dma_interrupt_regs { 97 - struct dmareg tfr; /* Transfer Interrupt */ 98 - struct dmareg block; /* Block Interrupt */ 99 - struct dmareg srctran; /* Source Transfer Interrupt */ 100 - struct dmareg dsttran; /* Dest Transfer Interrupt */ 101 - struct dmareg error; /* Error */ 102 - }; 103 - 104 - struct ahb_dma_regs { 105 - struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS]; 106 - struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */ 107 - struct dma_interrupt_regs interrupt_status; /* Interrupt Status */ 108 - struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */ 109 - struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */ 110 - struct dmareg statusInt; /* Interrupt combined*/ 111 - struct dmareg rq_srcreg; /* Src Trans Req */ 112 - struct dmareg rq_dstreg; /* Dst Trans Req */ 113 - struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req*/ 114 - struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req*/ 115 - struct dmareg rq_lst_srcreg; /* Last Src Trans Req*/ 116 - struct dmareg rq_lst_dstreg; /* Last Dst Trans Req*/ 117 - struct dmareg dma_cfg; /* DMA Config */ 118 - struct dmareg dma_chan_en; /* DMA Channel Enable*/ 119 - struct dmareg dma_id; /* DMA ID */ 120 - struct dmareg dma_test; /* DMA Test */ 121 - struct dmareg res1; /* reserved */ 122 - struct dmareg res2; /* reserved */ 123 - /* 124 - * DMA Comp Params 125 - * Param 6 = dma_param[0], Param 5 = dma_param[1], 126 - * Param 4 = dma_param[2] ... 127 - */ 128 - struct dmareg dma_params[6]; 129 - }; 130 - 131 - /* Data structure for linked list item */ 132 - struct lli { 133 - u32 sar; /* Source Address */ 134 - u32 dar; /* Destination address */ 135 - u32 llp; /* Linked List Pointer */ 136 - struct dmareg ctl; /* Control */ 137 - struct dmareg dstat; /* Destination Status */ 138 - }; 139 - 140 65 enum { 141 - SATA_DWC_DMAC_LLI_SZ = (sizeof(struct lli)), 142 - SATA_DWC_DMAC_LLI_NUM = 256, 143 - SATA_DWC_DMAC_LLI_TBL_SZ = (SATA_DWC_DMAC_LLI_SZ * \ 144 - SATA_DWC_DMAC_LLI_NUM), 145 - SATA_DWC_DMAC_TWIDTH_BYTES = 4, 146 - SATA_DWC_DMAC_CTRL_TSIZE_MAX = (0x00000800 * \ 147 - SATA_DWC_DMAC_TWIDTH_BYTES), 148 - }; 149 - 150 - /* DMA Register Operation Bits */ 151 - enum { 152 - DMA_EN = 0x00000001, /* Enable AHB DMA */ 153 - DMA_CTL_LLP_SRCEN = 0x10000000, /* Blk chain enable Src */ 154 - DMA_CTL_LLP_DSTEN = 0x08000000, /* Blk chain enable Dst */ 155 - }; 156 - 157 - #define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */ 158 - #define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */ 159 - /* Enable channel */ 160 - #define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \ 161 - ((0x000000001 << (ch)) << 8)) 162 - /* Disable channel */ 163 - #define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8)) 164 - /* Transfer Type & Flow Controller */ 165 - #define DMA_CTL_TTFC(type) (((type) & 0x7) << 20) 166 - #define DMA_CTL_SMS(num) (((num) & 0x3) << 25) /* Src Master Select */ 167 - #define DMA_CTL_DMS(num) (((num) & 0x3) << 23)/* Dst Master Select */ 168 - /* Src Burst Transaction Length */ 169 - #define DMA_CTL_SRC_MSIZE(size) (((size) & 0x7) << 14) 170 - /* Dst Burst Transaction Length */ 171 - #define DMA_CTL_DST_MSIZE(size) (((size) & 0x7) << 11) 172 - /* Source Transfer Width */ 173 - #define DMA_CTL_SRC_TRWID(size) (((size) & 0x7) << 4) 174 - /* Destination Transfer Width */ 175 - #define DMA_CTL_DST_TRWID(size) (((size) & 0x7) << 1) 176 - 177 - /* Assign HW handshaking interface (x) to destination / source peripheral */ 178 - #define DMA_CFG_HW_HS_DEST(int_num) (((int_num) & 0xF) << 11) 179 - #define DMA_CFG_HW_HS_SRC(int_num) (((int_num) & 0xF) << 7) 180 - #define DMA_CFG_HW_CH_PRIOR(int_num) (((int_num) & 0xF) << 5) 181 - #define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master)) 182 - 183 - /* 184 - * This define is used to set block chaining disabled in the control low 185 - * register. It is already in little endian format so it can be &'d dirctly. 186 - * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN)) 187 - */ 188 - enum { 189 - DMA_CTL_LLP_DISABLE_LE32 = 0xffffffe7, 190 - DMA_CTL_TTFC_P2M_DMAC = 0x00000002, /* Per to mem, DMAC cntr */ 191 - DMA_CTL_TTFC_M2P_PER = 0x00000003, /* Mem to per, peripheral cntr */ 192 - DMA_CTL_SINC_INC = 0x00000000, /* Source Address Increment */ 193 - DMA_CTL_SINC_DEC = 0x00000200, 194 - DMA_CTL_SINC_NOCHANGE = 0x00000400, 195 - DMA_CTL_DINC_INC = 0x00000000, /* Destination Address Increment */ 196 - DMA_CTL_DINC_DEC = 0x00000080, 197 - DMA_CTL_DINC_NOCHANGE = 0x00000100, 198 - DMA_CTL_INT_EN = 0x00000001, /* Interrupt Enable */ 199 - 200 - /* Channel Configuration Register high bits */ 201 - DMA_CFG_FCMOD_REQ = 0x00000001, /* Flow Control - request based */ 202 - DMA_CFG_PROTCTL = (0x00000003 << 2),/* Protection Control */ 203 - 204 - /* Channel Configuration Register low bits */ 205 - DMA_CFG_RELD_DST = 0x80000000, /* Reload Dest / Src Addr */ 206 - DMA_CFG_RELD_SRC = 0x40000000, 207 - DMA_CFG_HS_SELSRC = 0x00000800, /* Software handshake Src/ Dest */ 208 - DMA_CFG_HS_SELDST = 0x00000400, 209 - DMA_CFG_FIFOEMPTY = (0x00000001 << 9), /* FIFO Empty bit */ 210 - 211 - /* Channel Linked List Pointer Register */ 212 - DMA_LLP_AHBMASTER1 = 0, /* List Master Select */ 213 - DMA_LLP_AHBMASTER2 = 1, 214 - 215 66 SATA_DWC_MAX_PORTS = 1, 216 67 217 68 SATA_DWC_SCR_OFFSET = 0x24, ··· 148 287 struct ata_host *host; 149 288 u8 __iomem *reg_base; 150 289 struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ 151 - int irq_dma; 290 + struct dw_dma_chip *dma; 152 291 }; 153 292 154 293 #define SATA_DWC_QCMD_MAX 32 ··· 156 295 struct sata_dwc_device_port { 157 296 struct sata_dwc_device *hsdev; 158 297 int cmd_issued[SATA_DWC_QCMD_MAX]; 159 - struct lli *llit[SATA_DWC_QCMD_MAX]; /* DMA LLI table */ 160 - dma_addr_t llit_dma[SATA_DWC_QCMD_MAX]; 161 - u32 dma_chan[SATA_DWC_QCMD_MAX]; 162 298 int dma_pending[SATA_DWC_QCMD_MAX]; 299 + 300 + /* DMA info */ 301 + struct dw_dma_slave *dws; 302 + struct dma_chan *chan; 303 + struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX]; 304 + u32 dma_interrupt_count; 163 305 }; 164 306 165 307 /* ··· 194 330 void __iomem *scr_addr_sstatus; 195 331 u32 sata_dwc_sactive_issued ; 196 332 u32 sata_dwc_sactive_queued ; 197 - u32 dma_interrupt_count; 198 - struct ahb_dma_regs *sata_dma_regs; 199 - struct device *dwc_dev; 200 - int dma_channel; 201 333 }; 202 334 203 335 static struct sata_dwc_host_priv host_pvt; 336 + 337 + static struct dw_dma_slave sata_dwc_dma_dws = { 338 + .src_id = 0, 339 + .dst_id = 0, 340 + .src_master = 0, 341 + .dst_master = 1, 342 + }; 204 343 205 344 /* 206 345 * Prototypes ··· 214 347 static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); 215 348 static void sata_dwc_port_stop(struct ata_port *ap); 216 349 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); 217 - static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq); 218 - static void dma_dwc_exit(struct sata_dwc_device *hsdev); 219 - static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems, 220 - struct lli *lli, dma_addr_t dma_lli, 221 - void __iomem *addr, int dir); 222 - static void dma_dwc_xfer_start(int dma_ch); 223 350 224 351 static const char *get_prot_descript(u8 protocol) 225 352 { ··· 251 390 } 252 391 } 253 392 254 - static void sata_dwc_tf_dump(struct ata_taskfile *tf) 393 + static void sata_dwc_tf_dump(struct ata_port *ap, struct ata_taskfile *tf) 255 394 { 256 - dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:" 257 - "0x%lx device: %x\n", tf->command, 258 - get_prot_descript(tf->protocol), tf->flags, tf->device); 259 - dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x " 260 - "lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal, 261 - tf->lbam, tf->lbah); 262 - dev_vdbg(host_pvt.dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x " 263 - "hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n", 395 + dev_vdbg(ap->dev, 396 + "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx device: %x\n", 397 + tf->command, get_prot_descript(tf->protocol), tf->flags, 398 + tf->device); 399 + dev_vdbg(ap->dev, 400 + "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam: 0x%x lbah: 0x%x\n", 401 + tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah); 402 + dev_vdbg(ap->dev, 403 + "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n", 264 404 tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, 265 405 tf->hob_lbah); 266 406 } 267 407 268 - /* 269 - * Function: get_burst_length_encode 270 - * arguments: datalength: length in bytes of data 271 - * returns value to be programmed in register corresponding to data length 272 - * This value is effectively the log(base 2) of the length 273 - */ 274 - static int get_burst_length_encode(int datalength) 408 + static void dma_dwc_xfer_done(void *hsdev_instance) 275 409 { 276 - int items = datalength >> 2; /* div by 4 to get lword count */ 277 - 278 - if (items >= 64) 279 - return 5; 280 - 281 - if (items >= 32) 282 - return 4; 283 - 284 - if (items >= 16) 285 - return 3; 286 - 287 - if (items >= 8) 288 - return 2; 289 - 290 - if (items >= 4) 291 - return 1; 292 - 293 - return 0; 294 - } 295 - 296 - static void clear_chan_interrupts(int c) 297 - { 298 - out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.tfr.low), 299 - DMA_CHANNEL(c)); 300 - out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.block.low), 301 - DMA_CHANNEL(c)); 302 - out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.srctran.low), 303 - DMA_CHANNEL(c)); 304 - out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.dsttran.low), 305 - DMA_CHANNEL(c)); 306 - out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.error.low), 307 - DMA_CHANNEL(c)); 308 - } 309 - 310 - /* 311 - * Function: dma_request_channel 312 - * arguments: None 313 - * returns channel number if available else -1 314 - * This function assigns the next available DMA channel from the list to the 315 - * requester 316 - */ 317 - static int dma_request_channel(void) 318 - { 319 - /* Check if the channel is not currently in use */ 320 - if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) & 321 - DMA_CHANNEL(host_pvt.dma_channel))) 322 - return host_pvt.dma_channel; 323 - dev_err(host_pvt.dwc_dev, "%s Channel %d is currently in use\n", 324 - __func__, host_pvt.dma_channel); 325 - return -1; 326 - } 327 - 328 - /* 329 - * Function: dma_dwc_interrupt 330 - * arguments: irq, dev_id, pt_regs 331 - * returns channel number if available else -1 332 - * Interrupt Handler for DW AHB SATA DMA 333 - */ 334 - static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance) 335 - { 336 - int chan; 337 - u32 tfr_reg, err_reg; 338 410 unsigned long flags; 339 411 struct sata_dwc_device *hsdev = hsdev_instance; 340 412 struct ata_host *host = (struct ata_host *)hsdev->host; ··· 281 487 hsdevp = HSDEVP_FROM_AP(ap); 282 488 tag = ap->link.active_tag; 283 489 284 - tfr_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.tfr\ 285 - .low)); 286 - err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error\ 287 - .low)); 288 - 289 - dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n", 290 - tfr_reg, err_reg, hsdevp->dma_pending[tag], port); 291 - 292 - chan = host_pvt.dma_channel; 293 - if (chan >= 0) { 294 - /* Check for end-of-transfer interrupt. */ 295 - if (tfr_reg & DMA_CHANNEL(chan)) { 296 - /* 297 - * Each DMA command produces 2 interrupts. Only 298 - * complete the command after both interrupts have been 299 - * seen. (See sata_dwc_isr()) 300 - */ 301 - host_pvt.dma_interrupt_count++; 302 - sata_dwc_clear_dmacr(hsdevp, tag); 303 - 304 - if (hsdevp->dma_pending[tag] == 305 - SATA_DWC_DMA_PENDING_NONE) { 306 - dev_err(ap->dev, "DMA not pending eot=0x%08x " 307 - "err=0x%08x tag=0x%02x pending=%d\n", 308 - tfr_reg, err_reg, tag, 309 - hsdevp->dma_pending[tag]); 310 - } 311 - 312 - if ((host_pvt.dma_interrupt_count % 2) == 0) 313 - sata_dwc_dma_xfer_complete(ap, 1); 314 - 315 - /* Clear the interrupt */ 316 - out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\ 317 - .tfr.low), 318 - DMA_CHANNEL(chan)); 319 - } 320 - 321 - /* Check for error interrupt. */ 322 - if (err_reg & DMA_CHANNEL(chan)) { 323 - /* TODO Need error handler ! */ 324 - dev_err(ap->dev, "error interrupt err_reg=0x%08x\n", 325 - err_reg); 326 - 327 - /* Clear the interrupt. */ 328 - out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\ 329 - .error.low), 330 - DMA_CHANNEL(chan)); 331 - } 332 - } 333 - spin_unlock_irqrestore(&host->lock, flags); 334 - return IRQ_HANDLED; 335 - } 336 - 337 - /* 338 - * Function: dma_request_interrupts 339 - * arguments: hsdev 340 - * returns status 341 - * This function registers ISR for a particular DMA channel interrupt 342 - */ 343 - static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq) 344 - { 345 - int retval = 0; 346 - int chan = host_pvt.dma_channel; 347 - 348 - if (chan >= 0) { 349 - /* Unmask error interrupt */ 350 - out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.error.low, 351 - DMA_ENABLE_CHAN(chan)); 352 - 353 - /* Unmask end-of-transfer interrupt */ 354 - out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.tfr.low, 355 - DMA_ENABLE_CHAN(chan)); 356 - } 357 - 358 - retval = request_irq(irq, dma_dwc_interrupt, 0, "SATA DMA", hsdev); 359 - if (retval) { 360 - dev_err(host_pvt.dwc_dev, "%s: could not get IRQ %d\n", 361 - __func__, irq); 362 - return -ENODEV; 363 - } 364 - 365 - /* Mark this interrupt as requested */ 366 - hsdev->irq_dma = irq; 367 - return 0; 368 - } 369 - 370 - /* 371 - * Function: map_sg_to_lli 372 - * The Synopsis driver has a comment proposing that better performance 373 - * is possible by only enabling interrupts on the last item in the linked list. 374 - * However, it seems that could be a problem if an error happened on one of the 375 - * first items. The transfer would halt, but no error interrupt would occur. 376 - * Currently this function sets interrupts enabled for each linked list item: 377 - * DMA_CTL_INT_EN. 378 - */ 379 - static int map_sg_to_lli(struct scatterlist *sg, int num_elems, 380 - struct lli *lli, dma_addr_t dma_lli, 381 - void __iomem *dmadr_addr, int dir) 382 - { 383 - int i, idx = 0; 384 - int fis_len = 0; 385 - dma_addr_t next_llp; 386 - int bl; 387 - int sms_val, dms_val; 388 - 389 - sms_val = 0; 390 - dms_val = 1 + host_pvt.dma_channel; 391 - dev_dbg(host_pvt.dwc_dev, 392 - "%s: sg=%p nelem=%d lli=%p dma_lli=0x%pad dmadr=0x%p\n", 393 - __func__, sg, num_elems, lli, &dma_lli, dmadr_addr); 394 - 395 - bl = get_burst_length_encode(AHB_DMA_BRST_DFLT); 396 - 397 - for (i = 0; i < num_elems; i++, sg++) { 398 - u32 addr, offset; 399 - u32 sg_len, len; 400 - 401 - addr = (u32) sg_dma_address(sg); 402 - sg_len = sg_dma_len(sg); 403 - 404 - dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len" 405 - "=%d\n", __func__, i, addr, sg_len); 406 - 407 - while (sg_len) { 408 - if (idx >= SATA_DWC_DMAC_LLI_NUM) { 409 - /* The LLI table is not large enough. */ 410 - dev_err(host_pvt.dwc_dev, "LLI table overrun " 411 - "(idx=%d)\n", idx); 412 - break; 413 - } 414 - len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ? 415 - SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len; 416 - 417 - offset = addr & 0xffff; 418 - if ((offset + sg_len) > 0x10000) 419 - len = 0x10000 - offset; 420 - 421 - /* 422 - * Make sure a LLI block is not created that will span 423 - * 8K max FIS boundary. If the block spans such a FIS 424 - * boundary, there is a chance that a DMA burst will 425 - * cross that boundary -- this results in an error in 426 - * the host controller. 427 - */ 428 - if (fis_len + len > 8192) { 429 - dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len=" 430 - "%d(0x%x) len=%d(0x%x)\n", fis_len, 431 - fis_len, len, len); 432 - len = 8192 - fis_len; 433 - fis_len = 0; 434 - } else { 435 - fis_len += len; 436 - } 437 - if (fis_len == 8192) 438 - fis_len = 0; 439 - 440 - /* 441 - * Set DMA addresses and lower half of control register 442 - * based on direction. 443 - */ 444 - if (dir == DMA_FROM_DEVICE) { 445 - lli[idx].dar = cpu_to_le32(addr); 446 - lli[idx].sar = cpu_to_le32((u32)dmadr_addr); 447 - 448 - lli[idx].ctl.low = cpu_to_le32( 449 - DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) | 450 - DMA_CTL_SMS(sms_val) | 451 - DMA_CTL_DMS(dms_val) | 452 - DMA_CTL_SRC_MSIZE(bl) | 453 - DMA_CTL_DST_MSIZE(bl) | 454 - DMA_CTL_SINC_NOCHANGE | 455 - DMA_CTL_SRC_TRWID(2) | 456 - DMA_CTL_DST_TRWID(2) | 457 - DMA_CTL_INT_EN | 458 - DMA_CTL_LLP_SRCEN | 459 - DMA_CTL_LLP_DSTEN); 460 - } else { /* DMA_TO_DEVICE */ 461 - lli[idx].sar = cpu_to_le32(addr); 462 - lli[idx].dar = cpu_to_le32((u32)dmadr_addr); 463 - 464 - lli[idx].ctl.low = cpu_to_le32( 465 - DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) | 466 - DMA_CTL_SMS(dms_val) | 467 - DMA_CTL_DMS(sms_val) | 468 - DMA_CTL_SRC_MSIZE(bl) | 469 - DMA_CTL_DST_MSIZE(bl) | 470 - DMA_CTL_DINC_NOCHANGE | 471 - DMA_CTL_SRC_TRWID(2) | 472 - DMA_CTL_DST_TRWID(2) | 473 - DMA_CTL_INT_EN | 474 - DMA_CTL_LLP_SRCEN | 475 - DMA_CTL_LLP_DSTEN); 476 - } 477 - 478 - dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: " 479 - "0x%08x val: 0x%08x\n", __func__, 480 - len, DMA_CTL_BLK_TS(len / 4)); 481 - 482 - /* Program the LLI CTL high register */ 483 - lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\ 484 - (len / 4)); 485 - 486 - /* Program the next pointer. The next pointer must be 487 - * the physical address, not the virtual address. 488 - */ 489 - next_llp = (dma_lli + ((idx + 1) * sizeof(struct \ 490 - lli))); 491 - 492 - /* The last 2 bits encode the list master select. */ 493 - next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2); 494 - 495 - lli[idx].llp = cpu_to_le32(next_llp); 496 - idx++; 497 - sg_len -= len; 498 - addr += len; 499 - } 500 - } 501 - 502 490 /* 503 - * The last next ptr has to be zero and the last control low register 504 - * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source 505 - * and destination enable) set back to 0 (disabled.) This is what tells 506 - * the core that this is the last item in the linked list. 491 + * Each DMA command produces 2 interrupts. Only 492 + * complete the command after both interrupts have been 493 + * seen. (See sata_dwc_isr()) 507 494 */ 508 - if (idx) { 509 - lli[idx-1].llp = 0x00000000; 510 - lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32; 495 + hsdevp->dma_interrupt_count++; 496 + sata_dwc_clear_dmacr(hsdevp, tag); 511 497 512 - /* Flush cache to memory */ 513 - dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx), 514 - DMA_BIDIRECTIONAL); 498 + if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { 499 + dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n", 500 + tag, hsdevp->dma_pending[tag]); 515 501 } 516 502 517 - return idx; 503 + if ((hsdevp->dma_interrupt_count % 2) == 0) 504 + sata_dwc_dma_xfer_complete(ap, 1); 505 + 506 + spin_unlock_irqrestore(&host->lock, flags); 518 507 } 519 508 520 - /* 521 - * Function: dma_dwc_xfer_start 522 - * arguments: Channel number 523 - * Return : None 524 - * Enables the DMA channel 525 - */ 526 - static void dma_dwc_xfer_start(int dma_ch) 509 + static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc) 527 510 { 528 - /* Enable the DMA channel */ 529 - out_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low), 530 - in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) | 531 - DMA_ENABLE_CHAN(dma_ch)); 532 - } 511 + struct ata_port *ap = qc->ap; 512 + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 513 + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 514 + dma_addr_t addr = (dma_addr_t)&hsdev->sata_dwc_regs->dmadr; 515 + struct dma_slave_config sconf; 516 + struct dma_async_tx_descriptor *desc; 533 517 534 - static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems, 535 - struct lli *lli, dma_addr_t dma_lli, 536 - void __iomem *addr, int dir) 537 - { 538 - int dma_ch; 539 - int num_lli; 540 - /* Acquire DMA channel */ 541 - dma_ch = dma_request_channel(); 542 - if (dma_ch == -1) { 543 - dev_err(host_pvt.dwc_dev, "%s: dma channel unavailable\n", 544 - __func__); 545 - return -EAGAIN; 518 + if (qc->dma_dir == DMA_DEV_TO_MEM) { 519 + sconf.src_addr = addr; 520 + sconf.device_fc = true; 521 + } else { /* DMA_MEM_TO_DEV */ 522 + sconf.dst_addr = addr; 523 + sconf.device_fc = false; 546 524 } 525 + 526 + sconf.direction = qc->dma_dir; 527 + sconf.src_maxburst = AHB_DMA_BRST_DFLT; 528 + sconf.dst_maxburst = AHB_DMA_BRST_DFLT; 529 + sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 530 + sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 531 + 532 + dmaengine_slave_config(hsdevp->chan, &sconf); 547 533 548 534 /* Convert SG list to linked list of items (LLIs) for AHB DMA */ 549 - num_lli = map_sg_to_lli(sg, num_elems, lli, dma_lli, addr, dir); 535 + desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem, 536 + qc->dma_dir, 537 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 550 538 551 - dev_dbg(host_pvt.dwc_dev, "%s sg: 0x%p, count: %d lli: %p dma_lli:" 552 - " 0x%0xlx addr: %p lli count: %d\n", __func__, sg, num_elems, 553 - lli, (u32)dma_lli, addr, num_lli); 539 + if (!desc) 540 + return NULL; 554 541 555 - clear_chan_interrupts(dma_ch); 542 + desc->callback = dma_dwc_xfer_done; 543 + desc->callback_param = hsdev; 556 544 557 - /* Program the CFG register. */ 558 - out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.high), 559 - DMA_CFG_HW_HS_SRC(dma_ch) | DMA_CFG_HW_HS_DEST(dma_ch) | 560 - DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ); 561 - out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low), 562 - DMA_CFG_HW_CH_PRIOR(dma_ch)); 545 + dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pad\n", 546 + __func__, qc->sg, qc->n_elem, &addr); 563 547 564 - /* Program the address of the linked list */ 565 - out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].llp.low), 566 - DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2)); 567 - 568 - /* Program the CTL register with src enable / dst enable */ 569 - out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low), 570 - DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN); 571 - return dma_ch; 572 - } 573 - 574 - /* 575 - * Function: dma_dwc_exit 576 - * arguments: None 577 - * returns status 578 - * This function exits the SATA DMA driver 579 - */ 580 - static void dma_dwc_exit(struct sata_dwc_device *hsdev) 581 - { 582 - dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__); 583 - if (host_pvt.sata_dma_regs) { 584 - iounmap((void __iomem *)host_pvt.sata_dma_regs); 585 - host_pvt.sata_dma_regs = NULL; 586 - } 587 - 588 - if (hsdev->irq_dma) { 589 - free_irq(hsdev->irq_dma, hsdev); 590 - hsdev->irq_dma = 0; 591 - } 592 - } 593 - 594 - /* 595 - * Function: dma_dwc_init 596 - * arguments: hsdev 597 - * returns status 598 - * This function initializes the SATA DMA driver 599 - */ 600 - static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq) 601 - { 602 - int err; 603 - 604 - err = dma_request_interrupts(hsdev, irq); 605 - if (err) { 606 - dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns" 607 - " %d\n", __func__, err); 608 - return err; 609 - } 610 - 611 - /* Enabe DMA */ 612 - out_le32(&(host_pvt.sata_dma_regs->dma_cfg.low), DMA_EN); 613 - 614 - dev_notice(host_pvt.dwc_dev, "DMA initialized\n"); 615 - dev_dbg(host_pvt.dwc_dev, "SATA DMA registers=0x%p\n", host_pvt.\ 616 - sata_dma_regs); 617 - 618 - return 0; 548 + return desc; 619 549 } 620 550 621 551 static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) ··· 409 891 struct ata_queued_cmd *qc; 410 892 u32 serror; 411 893 u8 status, tag; 412 - u32 err_reg; 413 894 414 895 ata_ehi_clear_desc(ehi); 415 896 416 897 serror = core_scr_read(SCR_ERROR); 417 898 status = ap->ops->sff_check_status(ap); 418 899 419 - err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error.\ 420 - low)); 421 900 tag = ap->link.active_tag; 422 901 423 - dev_err(ap->dev, "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x " 424 - "dma_intp=%d pending=%d issued=%d dma_err_status=0x%08x\n", 425 - __func__, serror, intpr, status, host_pvt.dma_interrupt_count, 426 - hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag], err_reg); 902 + dev_err(ap->dev, 903 + "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d", 904 + __func__, serror, intpr, status, hsdevp->dma_interrupt_count, 905 + hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]); 427 906 428 907 /* Clear error register and interrupt bit */ 429 908 clear_serror(); ··· 518 1003 519 1004 /* DEV interrupt w/ no active qc? */ 520 1005 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 521 - dev_err(ap->dev, "%s interrupt with no active qc " 522 - "qc=%p\n", __func__, qc); 1006 + dev_err(ap->dev, 1007 + "%s interrupt with no active qc qc=%p\n", 1008 + __func__, qc); 523 1009 ap->ops->sff_check_status(ap); 524 1010 handled = 1; 525 1011 goto DONE; ··· 547 1031 * operation done interrupt. The command should be 548 1032 * completed only after both interrupts are seen. 549 1033 */ 550 - host_pvt.dma_interrupt_count++; 1034 + hsdevp->dma_interrupt_count++; 551 1035 if (hsdevp->dma_pending[tag] == \ 552 1036 SATA_DWC_DMA_PENDING_NONE) { 553 - dev_err(ap->dev, "%s: DMA not pending " 554 - "intpr=0x%08x status=0x%08x pending" 555 - "=%d\n", __func__, intpr, status, 1037 + dev_err(ap->dev, 1038 + "%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n", 1039 + __func__, intpr, status, 556 1040 hsdevp->dma_pending[tag]); 557 1041 } 558 1042 559 - if ((host_pvt.dma_interrupt_count % 2) == 0) 1043 + if ((hsdevp->dma_interrupt_count % 2) == 0) 560 1044 sata_dwc_dma_xfer_complete(ap, 1); 561 1045 } else if (ata_is_pio(qc->tf.protocol)) { 562 1046 ata_sff_hsm_move(ap, qc, status, 0); ··· 584 1068 585 1069 if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \ 586 1070 tag_mask > 1) { 587 - dev_dbg(ap->dev, "%s NCQ:sactive=0x%08x sactive_issued=0x%08x" 588 - "tag_mask=0x%08x\n", __func__, sactive, 589 - host_pvt.sata_dwc_sactive_issued, tag_mask); 1071 + dev_dbg(ap->dev, 1072 + "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n", 1073 + __func__, sactive, host_pvt.sata_dwc_sactive_issued, 1074 + tag_mask); 590 1075 } 591 1076 592 1077 if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \ 593 1078 (host_pvt.sata_dwc_sactive_issued)) { 594 - dev_warn(ap->dev, "Bad tag mask? sactive=0x%08x " 595 - "(host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask" 596 - "=0x%08x\n", sactive, host_pvt.sata_dwc_sactive_issued, 597 - tag_mask); 1079 + dev_warn(ap->dev, 1080 + "Bad tag mask? sactive=0x%08x (host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask=0x%08x\n", 1081 + sactive, host_pvt.sata_dwc_sactive_issued, tag_mask); 598 1082 } 599 1083 600 1084 /* read just to clear ... not bad if currently still busy */ ··· 630 1114 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, 631 1115 get_prot_descript(qc->tf.protocol)); 632 1116 if (ata_is_dma(qc->tf.protocol)) { 633 - host_pvt.dma_interrupt_count++; 1117 + hsdevp->dma_interrupt_count++; 634 1118 if (hsdevp->dma_pending[tag] == \ 635 1119 SATA_DWC_DMA_PENDING_NONE) 636 1120 dev_warn(ap->dev, "%s: DMA not pending?\n", 637 1121 __func__); 638 - if ((host_pvt.dma_interrupt_count % 2) == 0) 1122 + if ((hsdevp->dma_interrupt_count % 2) == 0) 639 1123 sata_dwc_dma_xfer_complete(ap, 1); 640 1124 } else { 641 1125 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) ··· 658 1142 */ 659 1143 sactive2 = core_scr_read(SCR_ACTIVE); 660 1144 if (sactive2 != sactive) { 661 - dev_dbg(ap->dev, "More completed - sactive=0x%x sactive2" 662 - "=0x%x\n", sactive, sactive2); 1145 + dev_dbg(ap->dev, 1146 + "More completed - sactive=0x%x sactive2=0x%x\n", 1147 + sactive, sactive2); 663 1148 } 664 1149 handled = 1; 665 1150 ··· 686 1169 * This should not happen, it indicates the driver is out of 687 1170 * sync. If it does happen, clear dmacr anyway. 688 1171 */ 689 - dev_err(host_pvt.dwc_dev, "%s DMA protocol RX and" 690 - "TX DMA not pending tag=0x%02x pending=%d" 691 - " dmacr: 0x%08x\n", __func__, tag, 692 - hsdevp->dma_pending[tag], 693 - in_le32(&(hsdev->sata_dwc_regs->dmacr))); 1172 + dev_err(hsdev->dev, 1173 + "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n", 1174 + __func__, tag, hsdevp->dma_pending[tag], 1175 + in_le32(&hsdev->sata_dwc_regs->dmacr)); 694 1176 out_le32(&(hsdev->sata_dwc_regs->dmacr), 695 1177 SATA_DWC_DMACR_TXRXCH_CLEAR); 696 1178 } ··· 711 1195 712 1196 #ifdef DEBUG_NCQ 713 1197 if (tag > 0) { 714 - dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s " 715 - "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command, 1198 + dev_info(ap->dev, 1199 + "%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n", 1200 + __func__, qc->tag, qc->tf.command, 716 1201 get_dma_dir_descript(qc->dma_dir), 717 1202 get_prot_descript(qc->tf.protocol), 718 1203 in_le32(&(hsdev->sata_dwc_regs->dmacr))); ··· 722 1205 723 1206 if (ata_is_dma(qc->tf.protocol)) { 724 1207 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { 725 - dev_err(ap->dev, "%s DMA protocol RX and TX DMA not " 726 - "pending dmacr: 0x%08x\n", __func__, 1208 + dev_err(ap->dev, 1209 + "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n", 1210 + __func__, 727 1211 in_le32(&(hsdev->sata_dwc_regs->dmacr))); 728 1212 } 729 1213 ··· 750 1232 dev_err(ap->dev, "TX DMA PENDING\n"); 751 1233 else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) 752 1234 dev_err(ap->dev, "RX DMA PENDING\n"); 753 - dev_dbg(ap->dev, "QC complete cmd=0x%02x status=0x%02x ata%u:" 754 - " protocol=%d\n", qc->tf.command, status, ap->print_id, 755 - qc->tf.protocol); 1235 + dev_dbg(ap->dev, 1236 + "QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n", 1237 + qc->tf.command, status, ap->print_id, qc->tf.protocol); 756 1238 757 1239 /* clear active bit */ 758 1240 mask = (~(qcmd_tag_to_mask(tag))); ··· 778 1260 */ 779 1261 out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS); 780 1262 781 - dev_dbg(host_pvt.dwc_dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", 1263 + dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", 782 1264 __func__, in_le32(&hsdev->sata_dwc_regs->intmr), 783 1265 in_le32(&hsdev->sata_dwc_regs->errmr)); 1266 + } 1267 + 1268 + static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param) 1269 + { 1270 + struct sata_dwc_device_port *hsdevp = param; 1271 + struct dw_dma_slave *dws = hsdevp->dws; 1272 + 1273 + if (dws->dma_dev != chan->device->dev) 1274 + return false; 1275 + 1276 + chan->private = dws; 1277 + return true; 784 1278 } 785 1279 786 1280 static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base) ··· 829 1299 struct sata_dwc_device *hsdev; 830 1300 struct sata_dwc_device_port *hsdevp = NULL; 831 1301 struct device *pdev; 1302 + dma_cap_mask_t mask; 832 1303 int i; 833 1304 834 1305 hsdev = HSDEV_FROM_AP(ap); ··· 853 1322 } 854 1323 hsdevp->hsdev = hsdev; 855 1324 1325 + hsdevp->dws = &sata_dwc_dma_dws; 1326 + hsdevp->dws->dma_dev = hsdev->dev; 1327 + 1328 + dma_cap_zero(mask); 1329 + dma_cap_set(DMA_SLAVE, mask); 1330 + 1331 + /* Acquire DMA channel */ 1332 + hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp); 1333 + if (!hsdevp->chan) { 1334 + dev_err(hsdev->dev, "%s: dma channel unavailable\n", 1335 + __func__); 1336 + err = -EAGAIN; 1337 + goto CLEANUP_ALLOC; 1338 + } 1339 + 856 1340 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) 857 1341 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; 858 1342 859 1343 ap->bmdma_prd = NULL; /* set these so libata doesn't use them */ 860 1344 ap->bmdma_prd_dma = 0; 861 - 862 - /* 863 - * DMA - Assign scatter gather LLI table. We can't use the libata 864 - * version since it's PRD is IDE PCI specific. 865 - */ 866 - for (i = 0; i < SATA_DWC_QCMD_MAX; i++) { 867 - hsdevp->llit[i] = dma_alloc_coherent(pdev, 868 - SATA_DWC_DMAC_LLI_TBL_SZ, 869 - &(hsdevp->llit_dma[i]), 870 - GFP_ATOMIC); 871 - if (!hsdevp->llit[i]) { 872 - dev_err(ap->dev, "%s: dma_alloc_coherent failed\n", 873 - __func__); 874 - err = -ENOMEM; 875 - goto CLEANUP_ALLOC; 876 - } 877 - } 878 1345 879 1346 if (ap->port_no == 0) { 880 1347 dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", ··· 902 1373 903 1374 static void sata_dwc_port_stop(struct ata_port *ap) 904 1375 { 905 - int i; 906 - struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 907 1376 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 908 1377 909 1378 dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id); 910 1379 911 - if (hsdevp && hsdev) { 912 - /* deallocate LLI table */ 913 - for (i = 0; i < SATA_DWC_QCMD_MAX; i++) { 914 - dma_free_coherent(ap->host->dev, 915 - SATA_DWC_DMAC_LLI_TBL_SZ, 916 - hsdevp->llit[i], hsdevp->llit_dma[i]); 917 - } 1380 + dmaengine_terminate_all(hsdevp->chan); 1381 + dma_release_channel(hsdevp->chan); 918 1382 919 - kfree(hsdevp); 920 - } 1383 + kfree(hsdevp); 921 1384 ap->private_data = NULL; 922 1385 } 923 1386 ··· 965 1444 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) 966 1445 { 967 1446 int start_dma; 968 - u32 reg, dma_chan; 1447 + u32 reg; 969 1448 struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc); 970 1449 struct ata_port *ap = qc->ap; 971 1450 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1451 + struct dma_async_tx_descriptor *desc = hsdevp->desc[tag]; 972 1452 int dir = qc->dma_dir; 973 - dma_chan = hsdevp->dma_chan[tag]; 974 1453 975 1454 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) { 976 1455 start_dma = 1; ··· 979 1458 else 980 1459 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX; 981 1460 } else { 982 - dev_err(ap->dev, "%s: Command not pending cmd_issued=%d " 983 - "(tag=%d) DMA NOT started\n", __func__, 984 - hsdevp->cmd_issued[tag], tag); 1461 + dev_err(ap->dev, 1462 + "%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n", 1463 + __func__, hsdevp->cmd_issued[tag], tag); 985 1464 start_dma = 0; 986 1465 } 987 1466 988 - dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s " 989 - "start_dma? %x\n", __func__, qc, tag, qc->tf.command, 1467 + dev_dbg(ap->dev, 1468 + "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s start_dma? %x\n", 1469 + __func__, qc, tag, qc->tf.command, 990 1470 get_dma_dir_descript(qc->dma_dir), start_dma); 991 - sata_dwc_tf_dump(&(qc->tf)); 1471 + sata_dwc_tf_dump(ap, &qc->tf); 992 1472 993 1473 if (start_dma) { 994 1474 reg = core_scr_read(SCR_ERROR); ··· 1006 1484 SATA_DWC_DMACR_RXCHEN); 1007 1485 1008 1486 /* Enable AHB DMA transfer on the specified channel */ 1009 - dma_dwc_xfer_start(dma_chan); 1487 + dmaengine_submit(desc); 1488 + dma_async_issue_pending(hsdevp->chan); 1010 1489 } 1011 1490 } 1012 1491 ··· 1033 1510 */ 1034 1511 static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag) 1035 1512 { 1036 - struct scatterlist *sg = qc->sg; 1513 + struct dma_async_tx_descriptor *desc; 1037 1514 struct ata_port *ap = qc->ap; 1038 - int dma_chan; 1039 - struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 1040 1515 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1041 1516 1042 1517 dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n", 1043 1518 __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir), 1044 1519 qc->n_elem); 1045 1520 1046 - dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag], 1047 - hsdevp->llit_dma[tag], 1048 - (void __iomem *)&hsdev->sata_dwc_regs->dmadr, 1049 - qc->dma_dir); 1050 - if (dma_chan < 0) { 1051 - dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n", 1052 - __func__, dma_chan); 1521 + desc = dma_dwc_xfer_setup(qc); 1522 + if (!desc) { 1523 + dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns NULL\n", 1524 + __func__); 1053 1525 return; 1054 1526 } 1055 - hsdevp->dma_chan[tag] = dma_chan; 1527 + hsdevp->desc[tag] = desc; 1056 1528 } 1057 1529 1058 1530 static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) ··· 1058 1540 1059 1541 #ifdef DEBUG_NCQ 1060 1542 if (qc->tag > 0 || ap->link.sactive > 1) 1061 - dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d " 1062 - "prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", 1543 + dev_info(ap->dev, 1544 + "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", 1063 1545 __func__, ap->print_id, qc->tf.command, 1064 1546 ata_get_cmd_descript(qc->tf.command), 1065 1547 qc->tag, get_prot_descript(qc->tf.protocol), ··· 1075 1557 sactive |= (0x00000001 << tag); 1076 1558 core_scr_write(SCR_ACTIVE, sactive); 1077 1559 1078 - dev_dbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x " 1079 - "sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive, 1080 - sactive); 1560 + dev_dbg(qc->ap->dev, 1561 + "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n", 1562 + __func__, tag, qc->ap->link.sactive, sactive); 1081 1563 1082 1564 ap->ops->sff_tf_load(ap, &qc->tf); 1083 1565 sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag, ··· 1191 1673 struct ata_port_info pi = sata_dwc_port_info[0]; 1192 1674 const struct ata_port_info *ppi[] = { &pi, NULL }; 1193 1675 struct device_node *np = ofdev->dev.of_node; 1194 - u32 dma_chan; 1195 1676 1196 1677 /* Allocate DWC SATA device */ 1197 1678 host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); ··· 1200 1683 1201 1684 host->private_data = hsdev; 1202 1685 1203 - if (of_property_read_u32(np, "dma-channel", &dma_chan)) { 1204 - dev_warn(&ofdev->dev, "no dma-channel property set." 1205 - " Use channel 0\n"); 1206 - dma_chan = 0; 1207 - } 1208 - host_pvt.dma_channel = dma_chan; 1209 - 1210 1686 /* Ioremap SATA registers */ 1211 1687 base = of_iomap(np, 0); 1212 1688 if (!base) { 1213 - dev_err(&ofdev->dev, "ioremap failed for SATA register" 1214 - " address\n"); 1689 + dev_err(&ofdev->dev, 1690 + "ioremap failed for SATA register address\n"); 1215 1691 return -ENODEV; 1216 1692 } 1217 1693 hsdev->reg_base = base; ··· 1226 1716 idr, ver[0], ver[1], ver[2]); 1227 1717 1228 1718 /* Get SATA DMA interrupt number */ 1229 - irq = irq_of_parse_and_map(np, 1); 1230 - if (irq == NO_IRQ) { 1719 + hsdev->dma->irq = irq_of_parse_and_map(np, 1); 1720 + if (hsdev->dma->irq == NO_IRQ) { 1231 1721 dev_err(&ofdev->dev, "no SATA DMA irq\n"); 1232 1722 err = -ENODEV; 1233 1723 goto error_iomap; 1234 1724 } 1235 1725 1236 1726 /* Get physical SATA DMA register base address */ 1237 - host_pvt.sata_dma_regs = (void *)of_iomap(np, 1); 1238 - if (!(host_pvt.sata_dma_regs)) { 1239 - dev_err(&ofdev->dev, "ioremap failed for AHBDMA register" 1240 - " address\n"); 1727 + hsdev->dma->regs = of_iomap(np, 1); 1728 + if (!hsdev->dma->regs) { 1729 + dev_err(&ofdev->dev, 1730 + "ioremap failed for AHBDMA register address\n"); 1241 1731 err = -ENODEV; 1242 1732 goto error_iomap; 1243 1733 } 1244 1734 1245 1735 /* Save dev for later use in dev_xxx() routines */ 1246 - host_pvt.dwc_dev = &ofdev->dev; 1736 + hsdev->dev = &ofdev->dev; 1737 + 1738 + hsdev->dma->dev = &ofdev->dev; 1247 1739 1248 1740 /* Initialize AHB DMAC */ 1249 - err = dma_dwc_init(hsdev, irq); 1741 + err = dw_dma_probe(hsdev->dma, NULL); 1250 1742 if (err) 1251 1743 goto error_dma_iomap; 1252 1744 ··· 1277 1765 1278 1766 error_out: 1279 1767 /* Free SATA DMA resources */ 1280 - dma_dwc_exit(hsdev); 1768 + dw_dma_remove(hsdev->dma); 1281 1769 error_dma_iomap: 1282 - iounmap((void __iomem *)host_pvt.sata_dma_regs); 1770 + iounmap(hsdev->dma->regs); 1283 1771 error_iomap: 1284 1772 iounmap(base); 1285 1773 return err; ··· 1294 1782 ata_host_detach(host); 1295 1783 1296 1784 /* Free SATA DMA resources */ 1297 - dma_dwc_exit(hsdev); 1785 + dw_dma_remove(hsdev->dma); 1298 1786 1299 - iounmap((void __iomem *)host_pvt.sata_dma_regs); 1787 + iounmap(hsdev->dma->regs); 1300 1788 iounmap(hsdev->reg_base); 1301 1789 dev_dbg(&ofdev->dev, "done\n"); 1302 1790 return 0; ··· 1321 1809 1322 1810 MODULE_LICENSE("GPL"); 1323 1811 MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>"); 1324 - MODULE_DESCRIPTION("DesignWare Cores SATA controller low lever driver"); 1812 + MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver"); 1325 1813 MODULE_VERSION(DRV_VERSION);
+2 -2
drivers/ata/sata_inic162x.c
··· 856 856 } 857 857 858 858 /* Set dma_mask. This devices doesn't support 64bit addressing. */ 859 - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 859 + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 860 860 if (rc) { 861 861 dev_err(&pdev->dev, "32-bit DMA enable failed\n"); 862 862 return rc; 863 863 } 864 864 865 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 865 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 866 866 if (rc) { 867 867 dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); 868 868 return rc;
+21 -9
drivers/ata/sata_mv.c
··· 306 306 MV5_PHY_CTL = 0x0C, 307 307 SATA_IFCFG = 0x050, 308 308 LP_PHY_CTL = 0x058, 309 + LP_PHY_CTL_PIN_PU_PLL = (1 << 0), 310 + LP_PHY_CTL_PIN_PU_RX = (1 << 1), 311 + LP_PHY_CTL_PIN_PU_TX = (1 << 2), 312 + LP_PHY_CTL_GEN_TX_3G = (1 << 5), 313 + LP_PHY_CTL_GEN_RX_3G = (1 << 9), 309 314 310 315 MV_M2_PREAMP_MASK = 0x7e0, 311 316 ··· 1396 1391 /* 1397 1392 * Set PHY speed according to SControl speed. 1398 1393 */ 1399 - if ((val & 0xf0) == 0x10) 1400 - writelfl(0x7, lp_phy_addr); 1401 - else 1402 - writelfl(0x227, lp_phy_addr); 1394 + u32 lp_phy_val = 1395 + LP_PHY_CTL_PIN_PU_PLL | 1396 + LP_PHY_CTL_PIN_PU_RX | 1397 + LP_PHY_CTL_PIN_PU_TX; 1398 + 1399 + if ((val & 0xf0) != 0x10) 1400 + lp_phy_val |= 1401 + LP_PHY_CTL_GEN_TX_3G | 1402 + LP_PHY_CTL_GEN_RX_3G; 1403 + 1404 + writelfl(lp_phy_val, lp_phy_addr); 1403 1405 } 1404 1406 } 1405 1407 writelfl(val, addr); ··· 4320 4308 { 4321 4309 int rc; 4322 4310 4323 - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 4324 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 4311 + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 4312 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 4325 4313 if (rc) { 4326 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 4314 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 4327 4315 if (rc) { 4328 4316 dev_err(&pdev->dev, 4329 4317 "64-bit DMA enable failed\n"); ··· 4331 4319 } 4332 4320 } 4333 4321 } else { 4334 - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 4322 + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 4335 4323 if (rc) { 4336 4324 dev_err(&pdev->dev, "32-bit DMA enable failed\n"); 4337 4325 return rc; 4338 4326 } 4339 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 4327 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 4340 4328 if (rc) { 4341 4329 dev_err(&pdev->dev, 4342 4330 "32-bit consistent DMA enable failed\n");
+6 -6
drivers/ata/sata_nv.c
··· 756 756 blk_queue_bounce_limit(sdev1->request_queue, 757 757 ATA_DMA_MASK); 758 758 759 - pci_set_dma_mask(pdev, ATA_DMA_MASK); 759 + dma_set_mask(&pdev->dev, ATA_DMA_MASK); 760 760 } else { 761 761 /** This shouldn't fail as it was set to this value before */ 762 - pci_set_dma_mask(pdev, pp->adma_dma_mask); 762 + dma_set_mask(&pdev->dev, pp->adma_dma_mask); 763 763 if (sdev0) 764 764 blk_queue_bounce_limit(sdev0->request_queue, 765 765 pp->adma_dma_mask); ··· 1133 1133 1134 1134 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and 1135 1135 pad buffers */ 1136 - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1136 + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 1137 1137 if (rc) 1138 1138 return rc; 1139 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1139 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1140 1140 if (rc) 1141 1141 return rc; 1142 1142 ··· 1161 1161 These are allowed to fail since we store the value that ends up 1162 1162 being used to set as the bounce limit in slave_config later if 1163 1163 needed. */ 1164 - pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1165 - pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1164 + dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1165 + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1166 1166 pp->adma_dma_mask = *dev->dma_mask; 1167 1167 1168 1168 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
+2 -2
drivers/ata/sata_promise.c
··· 1246 1246 /* initialize adapter */ 1247 1247 pdc_host_init(host); 1248 1248 1249 - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 1249 + rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); 1250 1250 if (rc) 1251 1251 return rc; 1252 - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 1252 + rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); 1253 1253 if (rc) 1254 1254 return rc; 1255 1255
+5 -5
drivers/ata/sata_qstor.c
··· 557 557 int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT); 558 558 559 559 if (have_64bit_bus && 560 - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 561 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 560 + !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 561 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 562 562 if (rc) { 563 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 563 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 564 564 if (rc) { 565 565 dev_err(&pdev->dev, 566 566 "64-bit DMA enable failed\n"); ··· 568 568 } 569 569 } 570 570 } else { 571 - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 571 + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 572 572 if (rc) { 573 573 dev_err(&pdev->dev, "32-bit DMA enable failed\n"); 574 574 return rc; 575 575 } 576 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 576 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 577 577 if (rc) { 578 578 dev_err(&pdev->dev, 579 579 "32-bit consistent DMA enable failed\n");
+2 -2
drivers/ata/sata_sil.c
··· 770 770 return rc; 771 771 host->iomap = pcim_iomap_table(pdev); 772 772 773 - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 773 + rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); 774 774 if (rc) 775 775 return rc; 776 - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 776 + rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); 777 777 if (rc) 778 778 return rc; 779 779
+6 -6
drivers/ata/sata_sil24.c
··· 246 246 /* host flags */ 247 247 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | 248 248 ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | 249 - ATA_FLAG_AN | ATA_FLAG_PMP | ATA_FLAG_LOWTAG, 249 + ATA_FLAG_AN | ATA_FLAG_PMP, 250 250 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ 251 251 252 252 IRQ_STAT_4PORTS = 0xf, ··· 1312 1312 host->iomap = iomap; 1313 1313 1314 1314 /* configure and activate the device */ 1315 - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 1316 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1315 + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 1316 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1317 1317 if (rc) { 1318 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1318 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1319 1319 if (rc) { 1320 1320 dev_err(&pdev->dev, 1321 1321 "64-bit DMA enable failed\n"); ··· 1323 1323 } 1324 1324 } 1325 1325 } else { 1326 - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1326 + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 1327 1327 if (rc) { 1328 1328 dev_err(&pdev->dev, "32-bit DMA enable failed\n"); 1329 1329 return rc; 1330 1330 } 1331 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1331 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1332 1332 if (rc) { 1333 1333 dev_err(&pdev->dev, 1334 1334 "32-bit consistent DMA enable failed\n");
+2 -2
drivers/ata/sata_svw.c
··· 496 496 ata_port_pbar_desc(ap, 5, offset, "port"); 497 497 } 498 498 499 - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 499 + rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); 500 500 if (rc) 501 501 return rc; 502 - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 502 + rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); 503 503 if (rc) 504 504 return rc; 505 505
+2 -2
drivers/ata/sata_sx4.c
··· 1476 1476 } 1477 1477 1478 1478 /* configure and activate */ 1479 - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 1479 + rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); 1480 1480 if (rc) 1481 1481 return rc; 1482 - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 1482 + rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); 1483 1483 if (rc) 1484 1484 return rc; 1485 1485
+2 -2
drivers/ata/sata_via.c
··· 502 502 for (i = 0; i < host->n_ports; i++) 503 503 vt6421_init_addrs(host->ports[i]); 504 504 505 - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 505 + rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); 506 506 if (rc) 507 507 return rc; 508 - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 508 + rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); 509 509 if (rc) 510 510 return rc; 511 511
+2 -2
drivers/ata/sata_vsc.c
··· 387 387 /* 388 388 * Use 32 bit DMA mask, because 64 bit address support is poor. 389 389 */ 390 - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 390 + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 391 391 if (rc) 392 392 return rc; 393 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 393 + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 394 394 if (rc) 395 395 return rc; 396 396
+2 -2
drivers/ide/ide-lib.c
··· 148 148 printk(KERN_CONT "DataRequest "); 149 149 if (stat & ATA_CORR) 150 150 printk(KERN_CONT "CorrectedError "); 151 - if (stat & ATA_IDX) 152 - printk(KERN_CONT "Index "); 151 + if (stat & ATA_SENSE) 152 + printk(KERN_CONT "Sense "); 153 153 if (stat & ATA_ERR) 154 154 printk(KERN_CONT "Error "); 155 155 }
+1 -1
drivers/ide/ide-probe.c
··· 273 273 (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) { 274 274 a = tp_ops->read_altstatus(hwif); 275 275 s = tp_ops->read_status(hwif); 276 - if ((a ^ s) & ~ATA_IDX) 276 + if ((a ^ s) & ~ATA_SENSE) 277 277 /* ancient Seagate drives, broken interfaces */ 278 278 printk(KERN_INFO "%s: probing with STATUS(0x%02x) " 279 279 "instead of ALTSTATUS(0x%02x)\n",
+31
drivers/scsi/scsi_error.c
··· 26 26 #include <linux/blkdev.h> 27 27 #include <linux/delay.h> 28 28 #include <linux/jiffies.h> 29 + #include <asm/unaligned.h> 29 30 30 31 #include <scsi/scsi.h> 31 32 #include <scsi/scsi_cmnd.h> ··· 2587 2586 } 2588 2587 } 2589 2588 EXPORT_SYMBOL(scsi_build_sense_buffer); 2589 + 2590 + /** 2591 + * scsi_set_sense_information - set the information field in a 2592 + * formatted sense data buffer 2593 + * @buf: Where to build sense data 2594 + * @info: 64-bit information value to be set 2595 + * 2596 + **/ 2597 + void scsi_set_sense_information(u8 *buf, u64 info) 2598 + { 2599 + if ((buf[0] & 0x7f) == 0x72) { 2600 + u8 *ucp, len; 2601 + 2602 + len = buf[7]; 2603 + ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0); 2604 + if (!ucp) { 2605 + buf[7] = len + 0xa; 2606 + ucp = buf + 8 + len; 2607 + } 2608 + ucp[0] = 0; 2609 + ucp[1] = 0xa; 2610 + ucp[2] = 0x80; /* Valid bit */ 2611 + ucp[3] = 0; 2612 + put_unaligned_be64(info, &ucp[4]); 2613 + } else if ((buf[0] & 0x7f) == 0x70) { 2614 + buf[0] |= 0x80; 2615 + put_unaligned_be64(info, &buf[3]); 2616 + } 2617 + } 2618 + EXPORT_SYMBOL(scsi_set_sense_information);
+28 -1
include/linux/ata.h
··· 94 94 ATA_ID_SECTOR_SIZE = 106, 95 95 ATA_ID_WWN = 108, 96 96 ATA_ID_LOGICAL_SECTOR_SIZE = 117, /* and 118 */ 97 + ATA_ID_COMMAND_SET_3 = 119, 98 + ATA_ID_COMMAND_SET_4 = 120, 97 99 ATA_ID_LAST_LUN = 126, 98 100 ATA_ID_DLF = 128, 99 101 ATA_ID_CSFO = 129, ··· 179 177 ATA_DSC = (1 << 4), /* drive seek complete */ 180 178 ATA_DRQ = (1 << 3), /* data request i/o */ 181 179 ATA_CORR = (1 << 2), /* corrected data error */ 182 - ATA_IDX = (1 << 1), /* index */ 180 + ATA_SENSE = (1 << 1), /* sense code available */ 183 181 ATA_ERR = (1 << 0), /* have an error */ 184 182 ATA_SRST = (1 << 2), /* software reset */ 185 183 ATA_ICRC = (1 << 7), /* interface CRC error */ ··· 384 382 SATA_SSP = 0x06, /* Software Settings Preservation */ 385 383 SATA_DEVSLP = 0x09, /* Device Sleep */ 386 384 385 + SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */ 386 + 387 387 /* feature values for SET_MAX */ 388 388 ATA_SET_MAX_ADDR = 0x00, 389 389 ATA_SET_MAX_PASSWD = 0x01, ··· 529 525 #define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) 530 526 #define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4)) 531 527 #define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)) 528 + #define ata_id_has_ncq_autosense(id) \ 529 + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7)) 532 530 533 531 static inline bool ata_id_has_hipm(const u16 *id) 534 532 { ··· 700 694 if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) 701 695 return false; 702 696 return id[ATA_ID_CFS_ENABLE_1] & (1 << 5); 697 + } 698 + 699 + static inline bool ata_id_has_read_log_dma_ext(const u16 *id) 700 + { 701 + if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) 702 + return false; 703 + return id[ATA_ID_COMMAND_SET_3] & (1 << 3); 704 + } 705 + 706 + static inline bool ata_id_has_sense_reporting(const u16 *id) 707 + { 708 + if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) 709 + return false; 710 + return id[ATA_ID_COMMAND_SET_3] & (1 << 6); 711 + } 712 + 713 + static inline bool ata_id_sense_reporting_enabled(const u16 *id) 714 + { 715 + if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) 716 + return false; 717 + return id[ATA_ID_COMMAND_SET_4] & (1 << 6); 703 718 } 704 719 705 720 /**
+1 -2
include/linux/libata.h
··· 231 231 ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity 232 232 * led */ 233 233 ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ 234 - ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */ 235 - ATA_FLAG_SAS_HOST = (1 << 25), /* SAS host */ 234 + ATA_FLAG_SAS_HOST = (1 << 24), /* SAS host */ 236 235 237 236 /* bits 24:31 of ap->flags are reserved for LLD specific flags */ 238 237
+1
include/scsi/scsi_eh.h
··· 59 59 u64 * info_out); 60 60 61 61 extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq); 62 + extern void scsi_set_sense_information(u8 *buf, u64 info); 62 63 63 64 extern int scsi_ioctl_reset(struct scsi_device *, int __user *); 64 65
+325
include/trace/events/libata.h
··· 1 + #undef TRACE_SYSTEM 2 + #define TRACE_SYSTEM libata 3 + 4 + #if !defined(_TRACE_LIBATA_H) || defined(TRACE_HEADER_MULTI_READ) 5 + #define _TRACE_LIBATA_H 6 + 7 + #include <linux/ata.h> 8 + #include <linux/libata.h> 9 + #include <linux/tracepoint.h> 10 + #include <linux/trace_seq.h> 11 + 12 + #define ata_opcode_name(opcode) { opcode, #opcode } 13 + #define show_opcode_name(val) \ 14 + __print_symbolic(val, \ 15 + ata_opcode_name(ATA_CMD_DEV_RESET), \ 16 + ata_opcode_name(ATA_CMD_CHK_POWER), \ 17 + ata_opcode_name(ATA_CMD_STANDBY), \ 18 + ata_opcode_name(ATA_CMD_IDLE), \ 19 + ata_opcode_name(ATA_CMD_EDD), \ 20 + ata_opcode_name(ATA_CMD_DOWNLOAD_MICRO), \ 21 + ata_opcode_name(ATA_CMD_DOWNLOAD_MICRO_DMA), \ 22 + ata_opcode_name(ATA_CMD_NOP), \ 23 + ata_opcode_name(ATA_CMD_FLUSH), \ 24 + ata_opcode_name(ATA_CMD_FLUSH_EXT), \ 25 + ata_opcode_name(ATA_CMD_ID_ATA), \ 26 + ata_opcode_name(ATA_CMD_ID_ATAPI), \ 27 + ata_opcode_name(ATA_CMD_SERVICE), \ 28 + ata_opcode_name(ATA_CMD_READ), \ 29 + ata_opcode_name(ATA_CMD_READ_EXT), \ 30 + ata_opcode_name(ATA_CMD_READ_QUEUED), \ 31 + ata_opcode_name(ATA_CMD_READ_STREAM_EXT), \ 32 + ata_opcode_name(ATA_CMD_READ_STREAM_DMA_EXT), \ 33 + ata_opcode_name(ATA_CMD_WRITE), \ 34 + ata_opcode_name(ATA_CMD_WRITE_EXT), \ 35 + ata_opcode_name(ATA_CMD_WRITE_QUEUED), \ 36 + ata_opcode_name(ATA_CMD_WRITE_STREAM_EXT), \ 37 + ata_opcode_name(ATA_CMD_WRITE_STREAM_DMA_EXT), \ 38 + ata_opcode_name(ATA_CMD_WRITE_FUA_EXT), \ 39 + ata_opcode_name(ATA_CMD_WRITE_QUEUED_FUA_EXT), \ 40 + ata_opcode_name(ATA_CMD_FPDMA_READ), \ 41 + ata_opcode_name(ATA_CMD_FPDMA_WRITE), \ 42 + ata_opcode_name(ATA_CMD_FPDMA_SEND), \ 43 + ata_opcode_name(ATA_CMD_FPDMA_RECV), \ 44 + ata_opcode_name(ATA_CMD_PIO_READ), \ 45 + ata_opcode_name(ATA_CMD_PIO_READ_EXT), \ 46 + ata_opcode_name(ATA_CMD_PIO_WRITE), \ 47 + ata_opcode_name(ATA_CMD_PIO_WRITE_EXT), \ 48 + ata_opcode_name(ATA_CMD_READ_MULTI), \ 49 + ata_opcode_name(ATA_CMD_READ_MULTI_EXT), \ 50 + ata_opcode_name(ATA_CMD_WRITE_MULTI), \ 51 + ata_opcode_name(ATA_CMD_WRITE_MULTI_EXT), \ 52 + ata_opcode_name(ATA_CMD_WRITE_MULTI_FUA_EXT), \ 53 + ata_opcode_name(ATA_CMD_SET_FEATURES), \ 54 + ata_opcode_name(ATA_CMD_SET_MULTI), \ 55 + ata_opcode_name(ATA_CMD_PACKET), \ 56 + ata_opcode_name(ATA_CMD_VERIFY), \ 57 + ata_opcode_name(ATA_CMD_VERIFY_EXT), \ 58 + ata_opcode_name(ATA_CMD_WRITE_UNCORR_EXT), \ 59 + ata_opcode_name(ATA_CMD_STANDBYNOW1), \ 60 + ata_opcode_name(ATA_CMD_IDLEIMMEDIATE), \ 61 + ata_opcode_name(ATA_CMD_SLEEP), \ 62 + ata_opcode_name(ATA_CMD_INIT_DEV_PARAMS), \ 63 + ata_opcode_name(ATA_CMD_READ_NATIVE_MAX), \ 64 + ata_opcode_name(ATA_CMD_READ_NATIVE_MAX_EXT), \ 65 + ata_opcode_name(ATA_CMD_SET_MAX), \ 66 + ata_opcode_name(ATA_CMD_SET_MAX_EXT), \ 67 + ata_opcode_name(ATA_CMD_READ_LOG_EXT), \ 68 + ata_opcode_name(ATA_CMD_WRITE_LOG_EXT), \ 69 + ata_opcode_name(ATA_CMD_READ_LOG_DMA_EXT), \ 70 + ata_opcode_name(ATA_CMD_WRITE_LOG_DMA_EXT), \ 71 + ata_opcode_name(ATA_CMD_TRUSTED_NONDATA), \ 72 + ata_opcode_name(ATA_CMD_TRUSTED_RCV), \ 73 + ata_opcode_name(ATA_CMD_TRUSTED_RCV_DMA), \ 74 + ata_opcode_name(ATA_CMD_TRUSTED_SND), \ 75 + ata_opcode_name(ATA_CMD_TRUSTED_SND_DMA), \ 76 + ata_opcode_name(ATA_CMD_PMP_READ), \ 77 + ata_opcode_name(ATA_CMD_PMP_READ_DMA), \ 78 + ata_opcode_name(ATA_CMD_PMP_WRITE), \ 79 + ata_opcode_name(ATA_CMD_PMP_WRITE_DMA), \ 80 + ata_opcode_name(ATA_CMD_CONF_OVERLAY), \ 81 + ata_opcode_name(ATA_CMD_SEC_SET_PASS), \ 82 + ata_opcode_name(ATA_CMD_SEC_UNLOCK), \ 83 + ata_opcode_name(ATA_CMD_SEC_ERASE_PREP), \ 84 + ata_opcode_name(ATA_CMD_SEC_ERASE_UNIT), \ 85 + ata_opcode_name(ATA_CMD_SEC_FREEZE_LOCK), \ 86 + ata_opcode_name(ATA_CMD_SEC_DISABLE_PASS), \ 87 + ata_opcode_name(ATA_CMD_CONFIG_STREAM), \ 88 + ata_opcode_name(ATA_CMD_SMART), \ 89 + ata_opcode_name(ATA_CMD_MEDIA_LOCK), \ 90 + ata_opcode_name(ATA_CMD_MEDIA_UNLOCK), \ 91 + ata_opcode_name(ATA_CMD_DSM), \ 92 + ata_opcode_name(ATA_CMD_CHK_MED_CRD_TYP), \ 93 + ata_opcode_name(ATA_CMD_CFA_REQ_EXT_ERR), \ 94 + ata_opcode_name(ATA_CMD_CFA_WRITE_NE), \ 95 + ata_opcode_name(ATA_CMD_CFA_TRANS_SECT), \ 96 + ata_opcode_name(ATA_CMD_CFA_ERASE), \ 97 + ata_opcode_name(ATA_CMD_CFA_WRITE_MULT_NE), \ 98 + ata_opcode_name(ATA_CMD_REQ_SENSE_DATA), \ 99 + ata_opcode_name(ATA_CMD_SANITIZE_DEVICE), \ 100 + ata_opcode_name(ATA_CMD_RESTORE), \ 101 + ata_opcode_name(ATA_CMD_READ_LONG), \ 102 + ata_opcode_name(ATA_CMD_READ_LONG_ONCE), \ 103 + ata_opcode_name(ATA_CMD_WRITE_LONG), \ 104 + ata_opcode_name(ATA_CMD_WRITE_LONG_ONCE)) 105 + 106 + #define ata_error_name(result) { result, #result } 107 + #define show_error_name(val) \ 108 + __print_symbolic(val, \ 109 + ata_error_name(ATA_ICRC), \ 110 + ata_error_name(ATA_UNC), \ 111 + ata_error_name(ATA_MC), \ 112 + ata_error_name(ATA_IDNF), \ 113 + ata_error_name(ATA_MCR), \ 114 + ata_error_name(ATA_ABORTED), \ 115 + ata_error_name(ATA_TRK0NF), \ 116 + ata_error_name(ATA_AMNF)) 117 + 118 + #define ata_protocol_name(proto) { proto, #proto } 119 + #define show_protocol_name(val) \ 120 + __print_symbolic(val, \ 121 + ata_protocol_name(ATA_PROT_UNKNOWN), \ 122 + ata_protocol_name(ATA_PROT_NODATA), \ 123 + ata_protocol_name(ATA_PROT_PIO), \ 124 + ata_protocol_name(ATA_PROT_DMA), \ 125 + ata_protocol_name(ATA_PROT_NCQ), \ 126 + ata_protocol_name(ATAPI_PROT_NODATA), \ 127 + ata_protocol_name(ATAPI_PROT_PIO), \ 128 + ata_protocol_name(ATAPI_PROT_DMA)) 129 + 130 + const char *libata_trace_parse_status(struct trace_seq*, unsigned char); 131 + #define __parse_status(s) libata_trace_parse_status(p, s) 132 + 133 + const char *libata_trace_parse_eh_action(struct trace_seq *, unsigned int); 134 + #define __parse_eh_action(a) libata_trace_parse_eh_action(p, a) 135 + 136 + const char *libata_trace_parse_eh_err_mask(struct trace_seq *, unsigned int); 137 + #define __parse_eh_err_mask(m) libata_trace_parse_eh_err_mask(p, m) 138 + 139 + const char *libata_trace_parse_qc_flags(struct trace_seq *, unsigned int); 140 + #define __parse_qc_flags(f) libata_trace_parse_qc_flags(p, f) 141 + 142 + TRACE_EVENT(ata_qc_issue, 143 + 144 + TP_PROTO(struct ata_queued_cmd *qc), 145 + 146 + TP_ARGS(qc), 147 + 148 + TP_STRUCT__entry( 149 + __field( unsigned int, ata_port ) 150 + __field( unsigned int, ata_dev ) 151 + __field( unsigned int, tag ) 152 + __field( unsigned char, cmd ) 153 + __field( unsigned char, dev ) 154 + __field( unsigned char, lbal ) 155 + __field( unsigned char, lbam ) 156 + __field( unsigned char, lbah ) 157 + __field( unsigned char, nsect ) 158 + __field( unsigned char, feature ) 159 + __field( unsigned char, hob_lbal ) 160 + __field( unsigned char, hob_lbam ) 161 + __field( unsigned char, hob_lbah ) 162 + __field( unsigned char, hob_nsect ) 163 + __field( unsigned char, hob_feature ) 164 + __field( unsigned char, ctl ) 165 + __field( unsigned char, proto ) 166 + __field( unsigned long, flags ) 167 + ), 168 + 169 + TP_fast_assign( 170 + __entry->ata_port = qc->ap->print_id; 171 + __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno; 172 + __entry->tag = qc->tag; 173 + __entry->proto = qc->tf.protocol; 174 + __entry->cmd = qc->tf.command; 175 + __entry->dev = qc->tf.device; 176 + __entry->lbal = qc->tf.lbal; 177 + __entry->lbam = qc->tf.lbam; 178 + __entry->lbah = qc->tf.lbah; 179 + __entry->hob_lbal = qc->tf.hob_lbal; 180 + __entry->hob_lbam = qc->tf.hob_lbam; 181 + __entry->hob_lbah = qc->tf.hob_lbah; 182 + __entry->feature = qc->tf.feature; 183 + __entry->hob_feature = qc->tf.hob_feature; 184 + __entry->nsect = qc->tf.nsect; 185 + __entry->hob_nsect = qc->tf.hob_nsect; 186 + ), 187 + 188 + TP_printk("ata_port=%u ata_dev=%u tag=%d proto=%s cmd=%s " \ 189 + " tf=(%02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x)", 190 + __entry->ata_port, __entry->ata_dev, __entry->tag, 191 + show_protocol_name(__entry->proto), 192 + show_opcode_name(__entry->cmd), 193 + __entry->cmd, __entry->feature, __entry->nsect, 194 + __entry->lbal, __entry->lbam, __entry->lbah, 195 + __entry->hob_feature, __entry->hob_nsect, 196 + __entry->hob_lbal, __entry->hob_lbam, __entry->hob_lbah, 197 + __entry->dev) 198 + ); 199 + 200 + DECLARE_EVENT_CLASS(ata_qc_complete_template, 201 + 202 + TP_PROTO(struct ata_queued_cmd *qc), 203 + 204 + TP_ARGS(qc), 205 + 206 + TP_STRUCT__entry( 207 + __field( unsigned int, ata_port ) 208 + __field( unsigned int, ata_dev ) 209 + __field( unsigned int, tag ) 210 + __field( unsigned char, status ) 211 + __field( unsigned char, dev ) 212 + __field( unsigned char, lbal ) 213 + __field( unsigned char, lbam ) 214 + __field( unsigned char, lbah ) 215 + __field( unsigned char, nsect ) 216 + __field( unsigned char, error ) 217 + __field( unsigned char, hob_lbal ) 218 + __field( unsigned char, hob_lbam ) 219 + __field( unsigned char, hob_lbah ) 220 + __field( unsigned char, hob_nsect ) 221 + __field( unsigned char, hob_feature ) 222 + __field( unsigned char, ctl ) 223 + __field( unsigned long, flags ) 224 + ), 225 + 226 + TP_fast_assign( 227 + __entry->ata_port = qc->ap->print_id; 228 + __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno; 229 + __entry->tag = qc->tag; 230 + __entry->status = qc->result_tf.command; 231 + __entry->dev = qc->result_tf.device; 232 + __entry->lbal = qc->result_tf.lbal; 233 + __entry->lbam = qc->result_tf.lbam; 234 + __entry->lbah = qc->result_tf.lbah; 235 + __entry->hob_lbal = qc->result_tf.hob_lbal; 236 + __entry->hob_lbam = qc->result_tf.hob_lbam; 237 + __entry->hob_lbah = qc->result_tf.hob_lbah; 238 + __entry->error = qc->result_tf.feature; 239 + __entry->hob_feature = qc->result_tf.hob_feature; 240 + __entry->nsect = qc->result_tf.nsect; 241 + __entry->hob_nsect = qc->result_tf.hob_nsect; 242 + ), 243 + 244 + TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s status=%s " \ 245 + " res=(%02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x)", 246 + __entry->ata_port, __entry->ata_dev, __entry->tag, 247 + __parse_qc_flags(__entry->flags), 248 + __parse_status(__entry->status), 249 + __entry->status, __entry->error, __entry->nsect, 250 + __entry->lbal, __entry->lbam, __entry->lbah, 251 + __entry->hob_feature, __entry->hob_nsect, 252 + __entry->hob_lbal, __entry->hob_lbam, __entry->hob_lbah, 253 + __entry->dev) 254 + ); 255 + 256 + DEFINE_EVENT(ata_qc_complete_template, ata_qc_complete_internal, 257 + TP_PROTO(struct ata_queued_cmd *qc), 258 + TP_ARGS(qc)); 259 + 260 + DEFINE_EVENT(ata_qc_complete_template, ata_qc_complete_failed, 261 + TP_PROTO(struct ata_queued_cmd *qc), 262 + TP_ARGS(qc)); 263 + 264 + DEFINE_EVENT(ata_qc_complete_template, ata_qc_complete_done, 265 + TP_PROTO(struct ata_queued_cmd *qc), 266 + TP_ARGS(qc)); 267 + 268 + TRACE_EVENT(ata_eh_link_autopsy, 269 + 270 + TP_PROTO(struct ata_device *dev, unsigned int eh_action, unsigned int eh_err_mask), 271 + 272 + TP_ARGS(dev, eh_action, eh_err_mask), 273 + 274 + TP_STRUCT__entry( 275 + __field( unsigned int, ata_port ) 276 + __field( unsigned int, ata_dev ) 277 + __field( unsigned int, eh_action ) 278 + __field( unsigned int, eh_err_mask) 279 + ), 280 + 281 + TP_fast_assign( 282 + __entry->ata_port = dev->link->ap->print_id; 283 + __entry->ata_dev = dev->link->pmp + dev->devno; 284 + __entry->eh_action = eh_action; 285 + __entry->eh_err_mask = eh_err_mask; 286 + ), 287 + 288 + TP_printk("ata_port=%u ata_dev=%u eh_action=%s err_mask=%s", 289 + __entry->ata_port, __entry->ata_dev, 290 + __parse_eh_action(__entry->eh_action), 291 + __parse_eh_err_mask(__entry->eh_err_mask)) 292 + ); 293 + 294 + TRACE_EVENT(ata_eh_link_autopsy_qc, 295 + 296 + TP_PROTO(struct ata_queued_cmd *qc), 297 + 298 + TP_ARGS(qc), 299 + 300 + TP_STRUCT__entry( 301 + __field( unsigned int, ata_port ) 302 + __field( unsigned int, ata_dev ) 303 + __field( unsigned int, tag ) 304 + __field( unsigned int, qc_flags ) 305 + __field( unsigned int, eh_err_mask) 306 + ), 307 + 308 + TP_fast_assign( 309 + __entry->ata_port = qc->ap->print_id; 310 + __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno; 311 + __entry->tag = qc->tag; 312 + __entry->qc_flags = qc->flags; 313 + __entry->eh_err_mask = qc->err_mask; 314 + ), 315 + 316 + TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s err_mask=%s", 317 + __entry->ata_port, __entry->ata_dev, __entry->tag, 318 + __parse_qc_flags(__entry->qc_flags), 319 + __parse_eh_err_mask(__entry->eh_err_mask)) 320 + ); 321 + 322 + #endif /* _TRACE_LIBATA_H */ 323 + 324 + /* This part must be outside protection */ 325 + #include <trace/define_trace.h>