Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.13-rc4 353 lines 8.4 kB view raw
1/* 2 * AHCI SATA platform driver 3 * 4 * Copyright 2004-2005 Red Hat, Inc. 5 * Jeff Garzik <jgarzik@pobox.com> 6 * Copyright 2010 MontaVista Software, LLC. 7 * Anton Vorontsov <avorontsov@ru.mvista.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2, or (at your option) 12 * any later version. 13 */ 14 15#include <linux/clk.h> 16#include <linux/kernel.h> 17#include <linux/gfp.h> 18#include <linux/module.h> 19#include <linux/pm.h> 20#include <linux/init.h> 21#include <linux/interrupt.h> 22#include <linux/device.h> 23#include <linux/platform_device.h> 24#include <linux/libata.h> 25#include <linux/ahci_platform.h> 26#include "ahci.h" 27 28static void ahci_host_stop(struct ata_host *host); 29 30enum ahci_type { 31 AHCI, /* standard platform ahci */ 32 IMX53_AHCI, /* ahci on i.mx53 */ 33 STRICT_AHCI, /* delayed DMA engine start */ 34}; 35 36static struct platform_device_id ahci_devtype[] = { 37 { 38 .name = "ahci", 39 .driver_data = AHCI, 40 }, { 41 .name = "imx53-ahci", 42 .driver_data = IMX53_AHCI, 43 }, { 44 .name = "strict-ahci", 45 .driver_data = STRICT_AHCI, 46 }, { 47 /* sentinel */ 48 } 49}; 50MODULE_DEVICE_TABLE(platform, ahci_devtype); 51 52struct ata_port_operations ahci_platform_ops = { 53 .inherits = &ahci_ops, 54 .host_stop = ahci_host_stop, 55}; 56EXPORT_SYMBOL_GPL(ahci_platform_ops); 57 58static struct ata_port_operations ahci_platform_retry_srst_ops = { 59 .inherits = &ahci_pmp_retry_srst_ops, 60 .host_stop = ahci_host_stop, 61}; 62 63static const struct ata_port_info ahci_port_info[] = { 64 /* by features */ 65 [AHCI] = { 66 .flags = AHCI_FLAG_COMMON, 67 .pio_mask = ATA_PIO4, 68 .udma_mask = ATA_UDMA6, 69 .port_ops = &ahci_platform_ops, 70 }, 71 [IMX53_AHCI] = { 72 .flags = AHCI_FLAG_COMMON, 73 .pio_mask = ATA_PIO4, 74 .udma_mask = ATA_UDMA6, 75 .port_ops = &ahci_platform_retry_srst_ops, 76 }, 77 [STRICT_AHCI] = { 78 AHCI_HFLAGS (AHCI_HFLAG_DELAY_ENGINE), 79 .flags = AHCI_FLAG_COMMON, 80 .pio_mask = ATA_PIO4, 81 .udma_mask = ATA_UDMA6, 82 .port_ops = &ahci_platform_ops, 83 }, 84}; 85 86static struct scsi_host_template ahci_platform_sht = { 87 AHCI_SHT("ahci_platform"), 88}; 89 90static int ahci_probe(struct platform_device *pdev) 91{ 92 struct device *dev = &pdev->dev; 93 struct ahci_platform_data *pdata = dev_get_platdata(dev); 94 const struct platform_device_id *id = platform_get_device_id(pdev); 95 struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0]; 96 const struct ata_port_info *ppi[] = { &pi, NULL }; 97 struct ahci_host_priv *hpriv; 98 struct ata_host *host; 99 struct resource *mem; 100 int irq; 101 int n_ports; 102 int i; 103 int rc; 104 105 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 106 if (!mem) { 107 dev_err(dev, "no mmio space\n"); 108 return -EINVAL; 109 } 110 111 irq = platform_get_irq(pdev, 0); 112 if (irq <= 0) { 113 dev_err(dev, "no irq\n"); 114 return -EINVAL; 115 } 116 117 if (pdata && pdata->ata_port_info) 118 pi = *pdata->ata_port_info; 119 120 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 121 if (!hpriv) { 122 dev_err(dev, "can't alloc ahci_host_priv\n"); 123 return -ENOMEM; 124 } 125 126 hpriv->flags |= (unsigned long)pi.private_data; 127 128 hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem)); 129 if (!hpriv->mmio) { 130 dev_err(dev, "can't map %pR\n", mem); 131 return -ENOMEM; 132 } 133 134 hpriv->clk = clk_get(dev, NULL); 135 if (IS_ERR(hpriv->clk)) { 136 dev_err(dev, "can't get clock\n"); 137 } else { 138 rc = clk_prepare_enable(hpriv->clk); 139 if (rc) { 140 dev_err(dev, "clock prepare enable failed"); 141 goto free_clk; 142 } 143 } 144 145 /* 146 * Some platforms might need to prepare for mmio region access, 147 * which could be done in the following init call. So, the mmio 148 * region shouldn't be accessed before init (if provided) has 149 * returned successfully. 150 */ 151 if (pdata && pdata->init) { 152 rc = pdata->init(dev, hpriv->mmio); 153 if (rc) 154 goto disable_unprepare_clk; 155 } 156 157 ahci_save_initial_config(dev, hpriv, 158 pdata ? pdata->force_port_map : 0, 159 pdata ? pdata->mask_port_map : 0); 160 161 /* prepare host */ 162 if (hpriv->cap & HOST_CAP_NCQ) 163 pi.flags |= ATA_FLAG_NCQ; 164 165 if (hpriv->cap & HOST_CAP_PMP) 166 pi.flags |= ATA_FLAG_PMP; 167 168 ahci_set_em_messages(hpriv, &pi); 169 170 /* CAP.NP sometimes indicate the index of the last enabled 171 * port, at other times, that of the last possible port, so 172 * determining the maximum port number requires looking at 173 * both CAP.NP and port_map. 174 */ 175 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); 176 177 host = ata_host_alloc_pinfo(dev, ppi, n_ports); 178 if (!host) { 179 rc = -ENOMEM; 180 goto pdata_exit; 181 } 182 183 host->private_data = hpriv; 184 185 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) 186 host->flags |= ATA_HOST_PARALLEL_SCAN; 187 else 188 dev_info(dev, "SSS flag set, parallel bus scan disabled\n"); 189 190 if (pi.flags & ATA_FLAG_EM) 191 ahci_reset_em(host); 192 193 for (i = 0; i < host->n_ports; i++) { 194 struct ata_port *ap = host->ports[i]; 195 196 ata_port_desc(ap, "mmio %pR", mem); 197 ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80); 198 199 /* set enclosure management message type */ 200 if (ap->flags & ATA_FLAG_EM) 201 ap->em_message_type = hpriv->em_msg_type; 202 203 /* disabled/not-implemented port */ 204 if (!(hpriv->port_map & (1 << i))) 205 ap->ops = &ata_dummy_port_ops; 206 } 207 208 rc = ahci_reset_controller(host); 209 if (rc) 210 goto pdata_exit; 211 212 ahci_init_controller(host); 213 ahci_print_info(host, "platform"); 214 215 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, 216 &ahci_platform_sht); 217 if (rc) 218 goto pdata_exit; 219 220 return 0; 221pdata_exit: 222 if (pdata && pdata->exit) 223 pdata->exit(dev); 224disable_unprepare_clk: 225 if (!IS_ERR(hpriv->clk)) 226 clk_disable_unprepare(hpriv->clk); 227free_clk: 228 if (!IS_ERR(hpriv->clk)) 229 clk_put(hpriv->clk); 230 return rc; 231} 232 233static void ahci_host_stop(struct ata_host *host) 234{ 235 struct device *dev = host->dev; 236 struct ahci_platform_data *pdata = dev_get_platdata(dev); 237 struct ahci_host_priv *hpriv = host->private_data; 238 239 if (pdata && pdata->exit) 240 pdata->exit(dev); 241 242 if (!IS_ERR(hpriv->clk)) { 243 clk_disable_unprepare(hpriv->clk); 244 clk_put(hpriv->clk); 245 } 246} 247 248#ifdef CONFIG_PM_SLEEP 249static int ahci_suspend(struct device *dev) 250{ 251 struct ahci_platform_data *pdata = dev_get_platdata(dev); 252 struct ata_host *host = dev_get_drvdata(dev); 253 struct ahci_host_priv *hpriv = host->private_data; 254 void __iomem *mmio = hpriv->mmio; 255 u32 ctl; 256 int rc; 257 258 if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { 259 dev_err(dev, "firmware update required for suspend/resume\n"); 260 return -EIO; 261 } 262 263 /* 264 * AHCI spec rev1.1 section 8.3.3: 265 * Software must disable interrupts prior to requesting a 266 * transition of the HBA to D3 state. 267 */ 268 ctl = readl(mmio + HOST_CTL); 269 ctl &= ~HOST_IRQ_EN; 270 writel(ctl, mmio + HOST_CTL); 271 readl(mmio + HOST_CTL); /* flush */ 272 273 rc = ata_host_suspend(host, PMSG_SUSPEND); 274 if (rc) 275 return rc; 276 277 if (pdata && pdata->suspend) 278 return pdata->suspend(dev); 279 280 if (!IS_ERR(hpriv->clk)) 281 clk_disable_unprepare(hpriv->clk); 282 283 return 0; 284} 285 286static int ahci_resume(struct device *dev) 287{ 288 struct ahci_platform_data *pdata = dev_get_platdata(dev); 289 struct ata_host *host = dev_get_drvdata(dev); 290 struct ahci_host_priv *hpriv = host->private_data; 291 int rc; 292 293 if (!IS_ERR(hpriv->clk)) { 294 rc = clk_prepare_enable(hpriv->clk); 295 if (rc) { 296 dev_err(dev, "clock prepare enable failed"); 297 return rc; 298 } 299 } 300 301 if (pdata && pdata->resume) { 302 rc = pdata->resume(dev); 303 if (rc) 304 goto disable_unprepare_clk; 305 } 306 307 if (dev->power.power_state.event == PM_EVENT_SUSPEND) { 308 rc = ahci_reset_controller(host); 309 if (rc) 310 goto disable_unprepare_clk; 311 312 ahci_init_controller(host); 313 } 314 315 ata_host_resume(host); 316 317 return 0; 318 319disable_unprepare_clk: 320 if (!IS_ERR(hpriv->clk)) 321 clk_disable_unprepare(hpriv->clk); 322 323 return rc; 324} 325#endif 326 327static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume); 328 329static const struct of_device_id ahci_of_match[] = { 330 { .compatible = "snps,spear-ahci", }, 331 { .compatible = "snps,exynos5440-ahci", }, 332 { .compatible = "ibm,476gtr-ahci", }, 333 {}, 334}; 335MODULE_DEVICE_TABLE(of, ahci_of_match); 336 337static struct platform_driver ahci_driver = { 338 .probe = ahci_probe, 339 .remove = ata_platform_remove_one, 340 .driver = { 341 .name = "ahci", 342 .owner = THIS_MODULE, 343 .of_match_table = ahci_of_match, 344 .pm = &ahci_pm_ops, 345 }, 346 .id_table = ahci_devtype, 347}; 348module_platform_driver(ahci_driver); 349 350MODULE_DESCRIPTION("AHCI SATA platform driver"); 351MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>"); 352MODULE_LICENSE("GPL"); 353MODULE_ALIAS("platform:ahci");