Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata

Pull libata updates from Tejun Heo:

- ahci grew runtime power management support so that the controller can
be turned off if no devices are attached.

- sata_via isn't dead yet. It got hotplug support and more refined
workaround for certain WD drives.

- Misc cleanups. There's a merge from for-4.5-fixes to avoid confusing
conflicts in ahci PCI ID table.

* 'for-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata:
ata: ahci_xgene: dereferencing uninitialized pointer in probe
AHCI: Remove obsolete Intel Lewisburg SATA RAID device IDs
ata: sata_rcar: Use ARCH_RENESAS
sata_via: Implement hotplug for VT6421
sata_via: Apply WD workaround only when needed on VT6421
ahci: Add runtime PM support for the host controller
ahci: Add functions to manage runtime PM of AHCI ports
ahci: Convert driver to use modern PM hooks
ahci: Cache host controller version
scsi: Drop runtime PM usage count after host is added
scsi: Set request queue runtime PM status back to active on resume
block: Add blk_set_runtime_active()
ata: ahci_mvebu: add support for Armada 3700 variant
libata: fix unbalanced spin_lock_irqsave/spin_unlock_irq() in ata_scsi_park_show()
libata: support AHCI on OCTEON platform

+475 -57
+1
Documentation/devicetree/bindings/ata/ahci-platform.txt
··· 11 11 - compatible : compatible string, one of: 12 12 - "allwinner,sun4i-a10-ahci" 13 13 - "hisilicon,hisi-ahci" 14 + - "cavium,octeon-7130-ahci" 14 15 - "ibm,476gtr-ahci" 15 16 - "marvell,armada-380-ahci" 16 17 - "snps,dwc-ahci"
+42
Documentation/devicetree/bindings/mips/cavium/sata-uctl.txt
··· 1 + * UCTL SATA controller glue 2 + 3 + UCTL is the bridge unit between the I/O interconnect (an internal bus) 4 + and the SATA AHCI host controller (UAHC). It performs the following functions: 5 + - provides interfaces for the applications to access the UAHC AHCI 6 + registers on the CN71XX I/O space. 7 + - provides a bridge for UAHC to fetch AHCI command table entries and data 8 + buffers from Level 2 Cache. 9 + - posts interrupts to the CIU. 10 + - contains registers that: 11 + - control the behavior of the UAHC 12 + - control the clock/reset generation to UAHC 13 + - control endian swapping for all UAHC registers and DMA accesses 14 + 15 + Properties: 16 + 17 + - compatible: "cavium,octeon-7130-sata-uctl" 18 + 19 + Compatibility with the cn7130 SOC. 20 + 21 + - reg: The base address of the UCTL register bank. 22 + 23 + - #address-cells, #size-cells, ranges and dma-ranges must be present and hold 24 + suitable values to map all child nodes. 25 + 26 + Example: 27 + 28 + uctl@118006c000000 { 29 + compatible = "cavium,octeon-7130-sata-uctl"; 30 + reg = <0x11800 0x6c000000 0x0 0x100>; 31 + ranges; /* Direct mapping */ 32 + dma-ranges; 33 + #address-cells = <2>; 34 + #size-cells = <2>; 35 + 36 + sata: sata@16c0000000000 { 37 + compatible = "cavium,octeon-7130-ahci"; 38 + reg = <0x16c00 0x00000000 0x0 0x200>; 39 + interrupt-parent = <&cibsata>; 40 + interrupts = <2 4>; /* Bit: 2, level */ 41 + }; 42 + };
+9
arch/mips/include/asm/octeon/cvmx.h
··· 275 275 cvmx_read64(CVMX_MIO_BOOT_BIST_STAT); 276 276 } 277 277 278 + static inline void cvmx_writeq_csr(void __iomem *csr_addr, uint64_t val) 279 + { 280 + cvmx_write_csr((__force uint64_t)csr_addr, val); 281 + } 282 + 278 283 static inline void cvmx_write_io(uint64_t io_addr, uint64_t val) 279 284 { 280 285 cvmx_write64(io_addr, val); ··· 292 287 return val; 293 288 } 294 289 290 + static inline uint64_t cvmx_readq_csr(void __iomem *csr_addr) 291 + { 292 + return cvmx_read_csr((__force uint64_t) csr_addr); 293 + } 295 294 296 295 static inline void cvmx_send_single(uint64_t data) 297 296 {
+24
block/blk-core.c
··· 3529 3529 spin_unlock_irq(q->queue_lock); 3530 3530 } 3531 3531 EXPORT_SYMBOL(blk_post_runtime_resume); 3532 + 3533 + /** 3534 + * blk_set_runtime_active - Force runtime status of the queue to be active 3535 + * @q: the queue of the device 3536 + * 3537 + * If the device is left runtime suspended during system suspend the resume 3538 + * hook typically resumes the device and corrects runtime status 3539 + * accordingly. However, that does not affect the queue runtime PM status 3540 + * which is still "suspended". This prevents processing requests from the 3541 + * queue. 3542 + * 3543 + * This function can be used in driver's resume hook to correct queue 3544 + * runtime PM status and re-enable peeking requests from the queue. It 3545 + * should be called before first request is added to the queue. 3546 + */ 3547 + void blk_set_runtime_active(struct request_queue *q) 3548 + { 3549 + spin_lock_irq(q->queue_lock); 3550 + q->rpm_status = RPM_ACTIVE; 3551 + pm_runtime_mark_last_busy(q->dev); 3552 + pm_request_autosuspend(q->dev); 3553 + spin_unlock_irq(q->queue_lock); 3554 + } 3555 + EXPORT_SYMBOL(blk_set_runtime_active); 3532 3556 #endif 3533 3557 3534 3558 int __init blk_dev_init(void)
+10 -1
drivers/ata/Kconfig
··· 151 151 152 152 If unsure, say N. 153 153 154 + config AHCI_OCTEON 155 + tristate "Cavium Octeon Soc Serial ATA" 156 + depends on SATA_AHCI_PLATFORM && CAVIUM_OCTEON_SOC 157 + default y 158 + help 159 + This option enables support for Cavium Octeon SoC Serial ATA. 160 + 161 + If unsure, say N. 162 + 154 163 config AHCI_SUNXI 155 164 tristate "Allwinner sunxi AHCI SATA support" 156 165 depends on ARCH_SUNXI ··· 364 355 365 356 config SATA_RCAR 366 357 tristate "Renesas R-Car SATA support" 367 - depends on ARCH_SHMOBILE || COMPILE_TEST 358 + depends on ARCH_RENESAS || COMPILE_TEST 368 359 help 369 360 This option enables support for Renesas R-Car Serial ATA. 370 361
+1
drivers/ata/Makefile
··· 15 15 obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o 16 16 obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o 17 17 obj-$(CONFIG_AHCI_MVEBU) += ahci_mvebu.o libahci.o libahci_platform.o 18 + obj-$(CONFIG_AHCI_OCTEON) += ahci_octeon.o 18 19 obj-$(CONFIG_AHCI_SUNXI) += ahci_sunxi.o libahci.o libahci_platform.o 19 20 obj-$(CONFIG_AHCI_ST) += ahci_st.o libahci.o libahci_platform.o 20 21 obj-$(CONFIG_AHCI_TEGRA) += ahci_tegra.o libahci.o libahci_platform.o
+75 -33
drivers/ata/ahci.c
··· 85 85 }; 86 86 87 87 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 88 + static void ahci_remove_one(struct pci_dev *dev); 88 89 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 89 90 unsigned long deadline); 90 91 static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, ··· 95 94 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, 96 95 unsigned long deadline); 97 96 #ifdef CONFIG_PM 98 - static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 99 - static int ahci_pci_device_resume(struct pci_dev *pdev); 97 + static int ahci_pci_device_runtime_suspend(struct device *dev); 98 + static int ahci_pci_device_runtime_resume(struct device *dev); 99 + #ifdef CONFIG_PM_SLEEP 100 + static int ahci_pci_device_suspend(struct device *dev); 101 + static int ahci_pci_device_resume(struct device *dev); 100 102 #endif 103 + #endif /* CONFIG_PM */ 101 104 102 105 static struct scsi_host_template ahci_sht = { 103 106 AHCI_SHT("ahci"), ··· 376 371 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/ 377 372 { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/ 378 373 { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/ 379 - { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/ 380 374 { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/ 381 - { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/ 382 375 { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/ 383 376 { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/ 384 377 { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/ 385 - { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/ 386 378 { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/ 387 - { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/ 388 379 { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/ 389 380 { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/ 390 381 ··· 564 563 { } /* terminate list */ 565 564 }; 566 565 566 + static const struct dev_pm_ops ahci_pci_pm_ops = { 567 + SET_SYSTEM_SLEEP_PM_OPS(ahci_pci_device_suspend, ahci_pci_device_resume) 568 + SET_RUNTIME_PM_OPS(ahci_pci_device_runtime_suspend, 569 + ahci_pci_device_runtime_resume, NULL) 570 + }; 567 571 568 572 static struct pci_driver ahci_pci_driver = { 569 573 .name = DRV_NAME, 570 574 .id_table = ahci_pci_tbl, 571 575 .probe = ahci_init_one, 572 - .remove = ata_pci_remove_one, 573 - #ifdef CONFIG_PM 574 - .suspend = ahci_pci_device_suspend, 575 - .resume = ahci_pci_device_resume, 576 - #endif 576 + .remove = ahci_remove_one, 577 + .driver = { 578 + .pm = &ahci_pci_pm_ops, 579 + }, 577 580 }; 578 581 579 582 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE) ··· 806 801 807 802 808 803 #ifdef CONFIG_PM 809 - static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 804 + static void ahci_pci_disable_interrupts(struct ata_host *host) 810 805 { 811 - struct ata_host *host = pci_get_drvdata(pdev); 812 806 struct ahci_host_priv *hpriv = host->private_data; 813 807 void __iomem *mmio = hpriv->mmio; 814 808 u32 ctl; 815 809 816 - if (mesg.event & PM_EVENT_SUSPEND && 817 - hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { 810 + /* AHCI spec rev1.1 section 8.3.3: 811 + * Software must disable interrupts prior to requesting a 812 + * transition of the HBA to D3 state. 813 + */ 814 + ctl = readl(mmio + HOST_CTL); 815 + ctl &= ~HOST_IRQ_EN; 816 + writel(ctl, mmio + HOST_CTL); 817 + readl(mmio + HOST_CTL); /* flush */ 818 + } 819 + 820 + static int ahci_pci_device_runtime_suspend(struct device *dev) 821 + { 822 + struct pci_dev *pdev = to_pci_dev(dev); 823 + struct ata_host *host = pci_get_drvdata(pdev); 824 + 825 + ahci_pci_disable_interrupts(host); 826 + return 0; 827 + } 828 + 829 + static int ahci_pci_device_runtime_resume(struct device *dev) 830 + { 831 + struct pci_dev *pdev = to_pci_dev(dev); 832 + struct ata_host *host = pci_get_drvdata(pdev); 833 + int rc; 834 + 835 + rc = ahci_pci_reset_controller(host); 836 + if (rc) 837 + return rc; 838 + ahci_pci_init_controller(host); 839 + return 0; 840 + } 841 + 842 + #ifdef CONFIG_PM_SLEEP 843 + static int ahci_pci_device_suspend(struct device *dev) 844 + { 845 + struct pci_dev *pdev = to_pci_dev(dev); 846 + struct ata_host *host = pci_get_drvdata(pdev); 847 + struct ahci_host_priv *hpriv = host->private_data; 848 + 849 + if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { 818 850 dev_err(&pdev->dev, 819 851 "BIOS update required for suspend/resume\n"); 820 852 return -EIO; 821 853 } 822 854 823 - if (mesg.event & PM_EVENT_SLEEP) { 824 - /* AHCI spec rev1.1 section 8.3.3: 825 - * Software must disable interrupts prior to requesting a 826 - * transition of the HBA to D3 state. 827 - */ 828 - ctl = readl(mmio + HOST_CTL); 829 - ctl &= ~HOST_IRQ_EN; 830 - writel(ctl, mmio + HOST_CTL); 831 - readl(mmio + HOST_CTL); /* flush */ 832 - } 833 - 834 - return ata_pci_device_suspend(pdev, mesg); 855 + ahci_pci_disable_interrupts(host); 856 + return ata_host_suspend(host, PMSG_SUSPEND); 835 857 } 836 858 837 - static int ahci_pci_device_resume(struct pci_dev *pdev) 859 + static int ahci_pci_device_resume(struct device *dev) 838 860 { 861 + struct pci_dev *pdev = to_pci_dev(dev); 839 862 struct ata_host *host = pci_get_drvdata(pdev); 840 863 int rc; 841 - 842 - rc = ata_pci_device_do_resume(pdev); 843 - if (rc) 844 - return rc; 845 864 846 865 /* Apple BIOS helpfully mangles the registers on resume */ 847 866 if (is_mcp89_apple(pdev)) ··· 884 855 return 0; 885 856 } 886 857 #endif 858 + 859 + #endif /* CONFIG_PM */ 887 860 888 861 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) 889 862 { ··· 1749 1718 1750 1719 pci_set_master(pdev); 1751 1720 1752 - return ahci_host_activate(host, &ahci_sht); 1721 + rc = ahci_host_activate(host, &ahci_sht); 1722 + if (rc) 1723 + return rc; 1724 + 1725 + pm_runtime_put_noidle(&pdev->dev); 1726 + return 0; 1727 + } 1728 + 1729 + static void ahci_remove_one(struct pci_dev *pdev) 1730 + { 1731 + pm_runtime_get_noresume(&pdev->dev); 1732 + ata_pci_remove_one(pdev); 1753 1733 } 1754 1734 1755 1735 module_pci_driver(ahci_pci_driver);
+1
drivers/ata/ahci.h
··· 335 335 void __iomem * mmio; /* bus-independent mem map */ 336 336 u32 cap; /* cap to use */ 337 337 u32 cap2; /* cap2 to use */ 338 + u32 version; /* cached version */ 338 339 u32 port_map; /* port map to use */ 339 340 u32 saved_cap; /* saved initial cap */ 340 341 u32 saved_cap2; /* saved initial cap2 */
+9 -5
drivers/ata/ahci_mvebu.c
··· 112 112 if (rc) 113 113 return rc; 114 114 115 - dram = mv_mbus_dram_info(); 116 - if (!dram) 117 - return -ENODEV; 115 + if (of_device_is_compatible(pdev->dev.of_node, 116 + "marvell,armada-380-ahci")) { 117 + dram = mv_mbus_dram_info(); 118 + if (!dram) 119 + return -ENODEV; 118 120 119 - ahci_mvebu_mbus_config(hpriv, dram); 120 - ahci_mvebu_regret_option(hpriv); 121 + ahci_mvebu_mbus_config(hpriv, dram); 122 + ahci_mvebu_regret_option(hpriv); 123 + } 121 124 122 125 rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info, 123 126 &ahci_platform_sht); ··· 136 133 137 134 static const struct of_device_id ahci_mvebu_of_match[] = { 138 135 { .compatible = "marvell,armada-380-ahci", }, 136 + { .compatible = "marvell,armada-3700-ahci", }, 139 137 { }, 140 138 }; 141 139 MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
+105
drivers/ata/ahci_octeon.c
··· 1 + /* 2 + * SATA glue for Cavium Octeon III SOCs. 3 + * 4 + * 5 + * This file is subject to the terms and conditions of the GNU General Public 6 + * License. See the file "COPYING" in the main directory of this archive 7 + * for more details. 8 + * 9 + * Copyright (C) 2010-2015 Cavium Networks 10 + * 11 + */ 12 + 13 + #include <linux/module.h> 14 + #include <linux/dma-mapping.h> 15 + #include <linux/platform_device.h> 16 + #include <linux/of_platform.h> 17 + 18 + #include <asm/octeon/octeon.h> 19 + #include <asm/bitfield.h> 20 + 21 + #define CVMX_SATA_UCTL_SHIM_CFG 0xE8 22 + 23 + #define SATA_UCTL_ENDIAN_MODE_BIG 1 24 + #define SATA_UCTL_ENDIAN_MODE_LITTLE 0 25 + #define SATA_UCTL_ENDIAN_MODE_MASK 3 26 + 27 + #define SATA_UCTL_DMA_ENDIAN_MODE_SHIFT 8 28 + #define SATA_UCTL_CSR_ENDIAN_MODE_SHIFT 0 29 + #define SATA_UCTL_DMA_READ_CMD_SHIFT 12 30 + 31 + static int ahci_octeon_probe(struct platform_device *pdev) 32 + { 33 + struct device *dev = &pdev->dev; 34 + struct device_node *node = dev->of_node; 35 + struct resource *res; 36 + void __iomem *base; 37 + u64 cfg; 38 + int ret; 39 + 40 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 41 + if (!res) { 42 + dev_err(&pdev->dev, "Platform resource[0] is missing\n"); 43 + return -ENODEV; 44 + } 45 + 46 + base = devm_ioremap_resource(&pdev->dev, res); 47 + if (IS_ERR(base)) 48 + return PTR_ERR(base); 49 + 50 + cfg = cvmx_readq_csr(base + CVMX_SATA_UCTL_SHIM_CFG); 51 + 52 + cfg &= ~(SATA_UCTL_ENDIAN_MODE_MASK << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT); 53 + cfg &= ~(SATA_UCTL_ENDIAN_MODE_MASK << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT); 54 + 55 + #ifdef __BIG_ENDIAN 56 + cfg |= SATA_UCTL_ENDIAN_MODE_BIG << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT; 57 + cfg |= SATA_UCTL_ENDIAN_MODE_BIG << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT; 58 + #else 59 + cfg |= SATA_UCTL_ENDIAN_MODE_LITTLE << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT; 60 + cfg |= SATA_UCTL_ENDIAN_MODE_LITTLE << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT; 61 + #endif 62 + 63 + cfg |= 1 << SATA_UCTL_DMA_READ_CMD_SHIFT; 64 + 65 + cvmx_writeq_csr(base + CVMX_SATA_UCTL_SHIM_CFG, cfg); 66 + 67 + if (!node) { 68 + dev_err(dev, "no device node, failed to add octeon sata\n"); 69 + return -ENODEV; 70 + } 71 + 72 + ret = of_platform_populate(node, NULL, NULL, dev); 73 + if (ret) { 74 + dev_err(dev, "failed to add ahci-platform core\n"); 75 + return ret; 76 + } 77 + 78 + return 0; 79 + } 80 + 81 + static int ahci_octeon_remove(struct platform_device *pdev) 82 + { 83 + return 0; 84 + } 85 + 86 + static const struct of_device_id octeon_ahci_match[] = { 87 + { .compatible = "cavium,octeon-7130-sata-uctl", }, 88 + {}, 89 + }; 90 + MODULE_DEVICE_TABLE(of, octeon_ahci_match); 91 + 92 + static struct platform_driver ahci_octeon_driver = { 93 + .probe = ahci_octeon_probe, 94 + .remove = ahci_octeon_remove, 95 + .driver = { 96 + .name = "octeon-ahci", 97 + .of_match_table = octeon_ahci_match, 98 + }, 99 + }; 100 + 101 + module_platform_driver(ahci_octeon_driver); 102 + 103 + MODULE_LICENSE("GPL"); 104 + MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>"); 105 + MODULE_DESCRIPTION("Cavium Inc. sata config.");
+1
drivers/ata/ahci_platform.c
··· 76 76 { .compatible = "ibm,476gtr-ahci", }, 77 77 { .compatible = "snps,dwc-ahci", }, 78 78 { .compatible = "hisilicon,hisi-ahci", }, 79 + { .compatible = "cavium,octeon-7130-ahci", }, 79 80 {}, 80 81 }; 81 82 MODULE_DEVICE_TABLE(of, ahci_of_match);
+2 -2
drivers/ata/ahci_xgene.c
··· 821 821 dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n", 822 822 __func__); 823 823 version = XGENE_AHCI_V1; 824 - } 825 - if (info->valid & ACPI_VALID_CID) 824 + } else if (info->valid & ACPI_VALID_CID) { 826 825 version = XGENE_AHCI_V2; 826 + } 827 827 } 828 828 } 829 829 #endif
+50 -5
drivers/ata/libahci.c
··· 225 225 WARN_ON(1); 226 226 } 227 227 228 + /** 229 + * ahci_rpm_get_port - Make sure the port is powered on 230 + * @ap: Port to power on 231 + * 232 + * Whenever there is need to access the AHCI host registers outside of 233 + * normal execution paths, call this function to make sure the host is 234 + * actually powered on. 235 + */ 236 + static int ahci_rpm_get_port(struct ata_port *ap) 237 + { 238 + return pm_runtime_get_sync(ap->dev); 239 + } 240 + 241 + /** 242 + * ahci_rpm_put_port - Undoes ahci_rpm_get_port() 243 + * @ap: Port to power down 244 + * 245 + * Undoes ahci_rpm_get_port() and possibly powers down the AHCI host 246 + * if it has no more active users. 247 + */ 248 + static void ahci_rpm_put_port(struct ata_port *ap) 249 + { 250 + pm_runtime_put(ap->dev); 251 + } 252 + 228 253 static ssize_t ahci_show_host_caps(struct device *dev, 229 254 struct device_attribute *attr, char *buf) 230 255 { ··· 276 251 struct Scsi_Host *shost = class_to_shost(dev); 277 252 struct ata_port *ap = ata_shost_to_port(shost); 278 253 struct ahci_host_priv *hpriv = ap->host->private_data; 279 - void __iomem *mmio = hpriv->mmio; 280 254 281 - return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION)); 255 + return sprintf(buf, "%x\n", hpriv->version); 282 256 } 283 257 284 258 static ssize_t ahci_show_port_cmd(struct device *dev, ··· 286 262 struct Scsi_Host *shost = class_to_shost(dev); 287 263 struct ata_port *ap = ata_shost_to_port(shost); 288 264 void __iomem *port_mmio = ahci_port_base(ap); 265 + ssize_t ret; 289 266 290 - return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD)); 267 + ahci_rpm_get_port(ap); 268 + ret = sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD)); 269 + ahci_rpm_put_port(ap); 270 + 271 + return ret; 291 272 } 292 273 293 274 static ssize_t ahci_read_em_buffer(struct device *dev, ··· 308 279 size_t count; 309 280 int i; 310 281 282 + ahci_rpm_get_port(ap); 311 283 spin_lock_irqsave(ap->lock, flags); 312 284 313 285 em_ctl = readl(mmio + HOST_EM_CTL); 314 286 if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT || 315 287 !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) { 316 288 spin_unlock_irqrestore(ap->lock, flags); 289 + ahci_rpm_put_port(ap); 317 290 return -EINVAL; 318 291 } 319 292 320 293 if (!(em_ctl & EM_CTL_MR)) { 321 294 spin_unlock_irqrestore(ap->lock, flags); 295 + ahci_rpm_put_port(ap); 322 296 return -EAGAIN; 323 297 } 324 298 ··· 349 317 } 350 318 351 319 spin_unlock_irqrestore(ap->lock, flags); 320 + ahci_rpm_put_port(ap); 352 321 353 322 return i; 354 323 } ··· 374 341 size % 4 || size > hpriv->em_buf_sz) 375 342 return -EINVAL; 376 343 344 + ahci_rpm_get_port(ap); 377 345 spin_lock_irqsave(ap->lock, flags); 378 346 379 347 em_ctl = readl(mmio + HOST_EM_CTL); 380 348 if (em_ctl & EM_CTL_TM) { 381 349 spin_unlock_irqrestore(ap->lock, flags); 350 + ahci_rpm_put_port(ap); 382 351 return -EBUSY; 383 352 } 384 353 ··· 393 358 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL); 394 359 395 360 spin_unlock_irqrestore(ap->lock, flags); 361 + ahci_rpm_put_port(ap); 396 362 397 363 return size; 398 364 } ··· 407 371 void __iomem *mmio = hpriv->mmio; 408 372 u32 em_ctl; 409 373 374 + ahci_rpm_get_port(ap); 410 375 em_ctl = readl(mmio + HOST_EM_CTL); 376 + ahci_rpm_put_port(ap); 411 377 412 378 return sprintf(buf, "%s%s%s%s\n", 413 379 em_ctl & EM_CTL_LED ? "led " : "", ··· 547 509 /* record values to use during operation */ 548 510 hpriv->cap = cap; 549 511 hpriv->cap2 = cap2; 512 + hpriv->version = readl(mmio + HOST_VERSION); 550 513 hpriv->port_map = port_map; 551 514 552 515 if (!hpriv->start_engine) ··· 1053 1014 else 1054 1015 return -EINVAL; 1055 1016 1017 + ahci_rpm_get_port(ap); 1056 1018 spin_lock_irqsave(ap->lock, flags); 1057 1019 1058 1020 /* ··· 1063 1023 em_ctl = readl(mmio + HOST_EM_CTL); 1064 1024 if (em_ctl & EM_CTL_TM) { 1065 1025 spin_unlock_irqrestore(ap->lock, flags); 1026 + ahci_rpm_put_port(ap); 1066 1027 return -EBUSY; 1067 1028 } 1068 1029 ··· 1091 1050 emp->led_state = state; 1092 1051 1093 1052 spin_unlock_irqrestore(ap->lock, flags); 1053 + ahci_rpm_put_port(ap); 1054 + 1094 1055 return size; 1095 1056 } 1096 1057 ··· 2258 2215 2259 2216 int ahci_port_resume(struct ata_port *ap) 2260 2217 { 2218 + ahci_rpm_get_port(ap); 2219 + 2261 2220 ahci_power_up(ap); 2262 2221 ahci_start_port(ap); 2263 2222 ··· 2286 2241 ata_port_freeze(ap); 2287 2242 } 2288 2243 2244 + ahci_rpm_put_port(ap); 2289 2245 return rc; 2290 2246 } 2291 2247 #endif ··· 2402 2356 void ahci_print_info(struct ata_host *host, const char *scc_s) 2403 2357 { 2404 2358 struct ahci_host_priv *hpriv = host->private_data; 2405 - void __iomem *mmio = hpriv->mmio; 2406 2359 u32 vers, cap, cap2, impl, speed; 2407 2360 const char *speed_s; 2408 2361 2409 - vers = readl(mmio + HOST_VERSION); 2362 + vers = hpriv->version; 2410 2363 cap = hpriv->cap; 2411 2364 cap2 = hpriv->cap2; 2412 2365 impl = hpriv->port_map;
+2 -2
drivers/ata/libata-scsi.c
··· 174 174 struct ata_port *ap; 175 175 struct ata_link *link; 176 176 struct ata_device *dev; 177 - unsigned long flags, now; 177 + unsigned long now; 178 178 unsigned int uninitialized_var(msecs); 179 179 int rc = 0; 180 180 181 181 ap = ata_shost_to_port(sdev->host); 182 182 183 - spin_lock_irqsave(ap->lock, flags); 183 + spin_lock_irq(ap->lock); 184 184 dev = ata_scsi_find_dev(ap, sdev); 185 185 if (!dev) { 186 186 rc = -ENODEV;
+124 -9
drivers/ata/sata_via.c
··· 61 61 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ 62 62 SATA_INT_GATE = 0x41, /* SATA interrupt gating */ 63 63 SATA_NATIVE_MODE = 0x42, /* Native mode enable */ 64 + SVIA_MISC_3 = 0x46, /* Miscellaneous Control III */ 64 65 PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ 65 66 PATA_PIO_TIMING = 0xAB, /* PATA timing register */ 66 67 ··· 72 71 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), 73 72 74 73 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ 74 + 75 + SATA_HOTPLUG = (1 << 5), /* enable IRQ on hotplug */ 76 + }; 77 + 78 + struct svia_priv { 79 + bool wd_workaround; 75 80 }; 76 81 77 82 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 83 + #ifdef CONFIG_PM_SLEEP 84 + static int svia_pci_device_resume(struct pci_dev *pdev); 85 + #endif 78 86 static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 79 87 static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 80 88 static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val); ··· 95 85 static int vt6421_pata_cable_detect(struct ata_port *ap); 96 86 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev); 97 87 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev); 88 + static void vt6421_error_handler(struct ata_port *ap); 98 89 99 90 static const struct pci_device_id svia_pci_tbl[] = { 100 91 { PCI_VDEVICE(VIA, 0x5337), vt6420 }, ··· 116 105 .probe = svia_init_one, 117 106 #ifdef CONFIG_PM_SLEEP 118 107 .suspend = ata_pci_device_suspend, 119 - .resume = ata_pci_device_resume, 108 + .resume = svia_pci_device_resume, 120 109 #endif 121 110 .remove = ata_pci_remove_one, 122 111 }; ··· 148 137 .inherits = &svia_base_ops, 149 138 .scr_read = svia_scr_read, 150 139 .scr_write = svia_scr_write, 140 + .error_handler = vt6421_error_handler, 151 141 }; 152 142 153 143 static struct ata_port_operations vt8251_ops = { ··· 548 536 return 0; 549 537 } 550 538 551 - static void svia_configure(struct pci_dev *pdev, int board_id) 539 + static void svia_wd_fix(struct pci_dev *pdev) 540 + { 541 + u8 tmp8; 542 + 543 + pci_read_config_byte(pdev, 0x52, &tmp8); 544 + pci_write_config_byte(pdev, 0x52, tmp8 | BIT(2)); 545 + } 546 + 547 + static irqreturn_t vt6421_interrupt(int irq, void *dev_instance) 548 + { 549 + struct ata_host *host = dev_instance; 550 + irqreturn_t rc = ata_bmdma_interrupt(irq, dev_instance); 551 + 552 + /* if the IRQ was not handled, it might be a hotplug IRQ */ 553 + if (rc != IRQ_HANDLED) { 554 + u32 serror; 555 + unsigned long flags; 556 + 557 + spin_lock_irqsave(&host->lock, flags); 558 + /* check for hotplug on port 0 */ 559 + svia_scr_read(&host->ports[0]->link, SCR_ERROR, &serror); 560 + if (serror & SERR_PHYRDY_CHG) { 561 + ata_ehi_hotplugged(&host->ports[0]->link.eh_info); 562 + ata_port_freeze(host->ports[0]); 563 + rc = IRQ_HANDLED; 564 + } 565 + /* check for hotplug on port 1 */ 566 + svia_scr_read(&host->ports[1]->link, SCR_ERROR, &serror); 567 + if (serror & SERR_PHYRDY_CHG) { 568 + ata_ehi_hotplugged(&host->ports[1]->link.eh_info); 569 + ata_port_freeze(host->ports[1]); 570 + rc = IRQ_HANDLED; 571 + } 572 + spin_unlock_irqrestore(&host->lock, flags); 573 + } 574 + 575 + return rc; 576 + } 577 + 578 + static void vt6421_error_handler(struct ata_port *ap) 579 + { 580 + struct svia_priv *hpriv = ap->host->private_data; 581 + struct pci_dev *pdev = to_pci_dev(ap->host->dev); 582 + u32 serror; 583 + 584 + /* see svia_configure() for description */ 585 + if (!hpriv->wd_workaround) { 586 + svia_scr_read(&ap->link, SCR_ERROR, &serror); 587 + if (serror == 0x1000500) { 588 + ata_port_warn(ap, "Incompatible drive: enabling workaround. This slows down transfer rate to ~60 MB/s"); 589 + svia_wd_fix(pdev); 590 + hpriv->wd_workaround = true; 591 + ap->link.eh_context.i.flags |= ATA_EHI_QUIET; 592 + } 593 + } 594 + 595 + ata_sff_error_handler(ap); 596 + } 597 + 598 + static void svia_configure(struct pci_dev *pdev, int board_id, 599 + struct svia_priv *hpriv) 552 600 { 553 601 u8 tmp8; 554 602 ··· 644 572 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); 645 573 } 646 574 575 + /* enable IRQ on hotplug */ 576 + pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8); 577 + if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) { 578 + dev_dbg(&pdev->dev, 579 + "enabling SATA hotplug (0x%x)\n", 580 + (int) tmp8); 581 + tmp8 |= SATA_HOTPLUG; 582 + pci_write_config_byte(pdev, SVIA_MISC_3, tmp8); 583 + } 584 + 647 585 /* 648 586 * vt6420/1 has problems talking to some drives. The following 649 587 * is the fix from Joseph Chan <JosephChan@via.com.tw>. ··· 675 593 * https://bugzilla.kernel.org/show_bug.cgi?id=15173 676 594 * http://article.gmane.org/gmane.linux.ide/46352 677 595 * http://thread.gmane.org/gmane.linux.kernel/1062139 596 + * 597 + * As the fix slows down data transfer, apply it only if the error 598 + * actually appears - see vt6421_error_handler() 599 + * Apply the fix always on vt6420 as we don't know if SCR_ERROR can be 600 + * read safely. 678 601 */ 679 - if (board_id == vt6420 || board_id == vt6421) { 680 - pci_read_config_byte(pdev, 0x52, &tmp8); 681 - tmp8 |= 1 << 2; 682 - pci_write_config_byte(pdev, 0x52, tmp8); 602 + if (board_id == vt6420) { 603 + svia_wd_fix(pdev); 604 + hpriv->wd_workaround = true; 683 605 } 684 606 } 685 607 ··· 694 608 struct ata_host *host = NULL; 695 609 int board_id = (int) ent->driver_data; 696 610 const unsigned *bar_sizes; 611 + struct svia_priv *hpriv; 697 612 698 613 ata_print_version_once(&pdev->dev, DRV_VERSION); 699 614 ··· 734 647 if (rc) 735 648 return rc; 736 649 737 - svia_configure(pdev, board_id); 650 + hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 651 + if (!hpriv) 652 + return -ENOMEM; 653 + host->private_data = hpriv; 654 + 655 + svia_configure(pdev, board_id, hpriv); 738 656 739 657 pci_set_master(pdev); 740 - return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, 741 - IRQF_SHARED, &svia_sht); 658 + if (board_id == vt6421) 659 + return ata_host_activate(host, pdev->irq, vt6421_interrupt, 660 + IRQF_SHARED, &svia_sht); 661 + else 662 + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, 663 + IRQF_SHARED, &svia_sht); 742 664 } 665 + 666 + #ifdef CONFIG_PM_SLEEP 667 + static int svia_pci_device_resume(struct pci_dev *pdev) 668 + { 669 + struct ata_host *host = pci_get_drvdata(pdev); 670 + struct svia_priv *hpriv = host->private_data; 671 + int rc; 672 + 673 + rc = ata_pci_device_do_resume(pdev); 674 + if (rc) 675 + return rc; 676 + 677 + if (hpriv->wd_workaround) 678 + svia_wd_fix(pdev); 679 + ata_host_resume(host); 680 + 681 + return 0; 682 + } 683 + #endif 743 684 744 685 module_pci_driver(svia_pci_driver);
+7
drivers/scsi/hosts.c
··· 250 250 if (error) 251 251 goto out_destroy_freelist; 252 252 253 + /* 254 + * Increase usage count temporarily here so that calling 255 + * scsi_autopm_put_host() will trigger runtime idle if there is 256 + * nothing else preventing suspending the device. 257 + */ 258 + pm_runtime_get_noresume(&shost->shost_gendev); 253 259 pm_runtime_set_active(&shost->shost_gendev); 254 260 pm_runtime_enable(&shost->shost_gendev); 255 261 device_enable_async_suspend(&shost->shost_gendev); ··· 296 290 goto out_destroy_host; 297 291 298 292 scsi_proc_host_add(shost); 293 + scsi_autopm_put_host(shost); 299 294 return error; 300 295 301 296 out_destroy_host:
+10
drivers/scsi/scsi_pm.c
··· 139 139 else 140 140 fn = NULL; 141 141 142 + /* 143 + * Forcibly set runtime PM status of request queue to "active" to 144 + * make sure we can again get requests from the queue (see also 145 + * blk_pm_peek_request()). 146 + * 147 + * The resume hook will correct runtime PM status of the disk. 148 + */ 149 + if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev)) 150 + blk_set_runtime_active(to_scsi_device(dev)->request_queue); 151 + 142 152 if (fn) { 143 153 async_schedule_domain(fn, dev, &scsi_sd_pm_domain); 144 154
+2
include/linux/blkdev.h
··· 1029 1029 extern void blk_post_runtime_suspend(struct request_queue *q, int err); 1030 1030 extern void blk_pre_runtime_resume(struct request_queue *q); 1031 1031 extern void blk_post_runtime_resume(struct request_queue *q, int err); 1032 + extern void blk_set_runtime_active(struct request_queue *q); 1032 1033 #else 1033 1034 static inline void blk_pm_runtime_init(struct request_queue *q, 1034 1035 struct device *dev) {} ··· 1040 1039 static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} 1041 1040 static inline void blk_pre_runtime_resume(struct request_queue *q) {} 1042 1041 static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} 1042 + extern inline void blk_set_runtime_active(struct request_queue *q) {} 1043 1043 #endif 1044 1044 1045 1045 /*