Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev

* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev: (24 commits)
pci: allow multiple calls to pcim_enable_device()
Blackfin pata-bf54x driver: fix compiling bug - no ata_port struct in struct ata_device any more
Blackfin pata-bf54x driver: should cover all possible interrupt sources
Blackfin pata-bf54x driver: Add debug information
Blackfin pata-bf54x driver: Remove obsolete PM function
pata_sl82c105: dual channel support
ata_piix.c: make piix_merge_scr() static
sata_nv: fix for completion handling
sata_mv: Remove PCI dependency
sata_mv ncq Comments and version bump
sata_mv ncq Remove post internal cmd op
sata_mv ncq Enable NCQ operation
sata_mv ncq Introduce per-tag SG tables
ata_piix: IDE mode SATA patch for Intel ICH10 DeviceID's
ahci: RAID mode SATA patch for Intel ICH10 DeviceID's
sata_mv ncq Use DMA memory pools for hardware memory tables
sata_mv ncq Restrict max sectors to 8-bits on GenII NCQ
sata_mv ncq Ignore response status LSB on NCQ
sata_mv ncq Use hqtag instead of ioid
sata_mv ncq Add want ncq parameter for EDMA configuration
...

+389 -218
+1 -1
drivers/ata/Kconfig
··· 69 70 config SATA_MV 71 tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)" 72 - depends on PCI && EXPERIMENTAL 73 help 74 This option enables support for the Marvell Serial ATA family. 75 Currently supports 88SX[56]0[48][01] chips.
··· 69 70 config SATA_MV 71 tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)" 72 + depends on EXPERIMENTAL 73 help 74 This option enables support for the Marvell Serial ATA family. 75 Currently supports 88SX[56]0[48][01] chips.
+2
drivers/ata/ahci.c
··· 475 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ 476 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ 477 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ 478 479 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 480 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
··· 475 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ 476 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ 477 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ 478 + { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ 479 + { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ 480 481 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 482 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+9 -1
drivers/ata/ata_piix.c
··· 267 { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, 268 /* SATA Controller IDE (Tolapai) */ 269 { 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata_ahci }, 270 271 { } /* terminate list */ 272 }; ··· 1076 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA); 1077 } 1078 1079 - u32 piix_merge_scr(u32 val0, u32 val1, const int * const *merge_tbl) 1080 { 1081 u32 val = 0; 1082 int i, mi;
··· 267 { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, 268 /* SATA Controller IDE (Tolapai) */ 269 { 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata_ahci }, 270 + /* SATA Controller IDE (ICH10) */ 271 + { 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, 272 + /* SATA Controller IDE (ICH10) */ 273 + { 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 274 + /* SATA Controller IDE (ICH10) */ 275 + { 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, 276 + /* SATA Controller IDE (ICH10) */ 277 + { 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 278 279 { } /* terminate list */ 280 }; ··· 1068 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA); 1069 } 1070 1071 + static u32 piix_merge_scr(u32 val0, u32 val1, const int * const *merge_tbl) 1072 { 1073 u32 val = 0; 1074 int i, mi;
+24 -29
drivers/ata/pata_bf54x.c
··· 299 */ 300 n6 = num_clocks_min(t6min, fsclk); 301 if (mode >= 0 && mode <= 4 && n6 >= 1) { 302 - pr_debug("set piomode: mode=%d, fsclk=%ud\n", mode, fsclk); 303 /* calculate the timing values for register transfers. */ 304 while (mode > 0 && pio_fsclk[mode] > fsclk) 305 mode--; ··· 376 377 mode = adev->dma_mode - XFER_UDMA_0; 378 if (mode >= 0 && mode <= 5) { 379 - pr_debug("set udmamode: mode=%d\n", mode); 380 /* the most restrictive timing value is t6 and tc, 381 * the DIOW - data hold. If one SCLK pulse is longer 382 * than this minimum value then register ··· 433 434 mode = adev->dma_mode - XFER_MW_DMA_0; 435 if (mode >= 0 && mode <= 2) { 436 - pr_debug("set mdmamode: mode=%d\n", mode); 437 /* the most restrictive timing value is tf, the DMACK to 438 * read data released. If one SCLK pulse is longer than 439 * this maximum value then the MDMA mode ··· 697 write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal); 698 write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam); 699 write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah); 700 - pr_debug("hob: feat 0x%X nsect 0x%X, lba 0x%X " 701 "0x%X 0x%X\n", 702 tf->hob_feature, 703 tf->hob_nsect, ··· 711 write_atapi_register(base, ATA_REG_LBAL, tf->lbal); 712 write_atapi_register(base, ATA_REG_LBAM, tf->lbam); 713 write_atapi_register(base, ATA_REG_LBAH, tf->lbah); 714 - pr_debug("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 715 tf->feature, 716 tf->nsect, 717 tf->lbal, ··· 721 722 if (tf->flags & ATA_TFLAG_DEVICE) { 723 write_atapi_register(base, ATA_REG_DEVICE, tf->device); 724 - pr_debug("device 0x%X\n", tf->device); 725 } 726 727 ata_wait_idle(ap); ··· 782 const struct ata_taskfile *tf) 783 { 784 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 785 - pr_debug("ata%u: cmd 0x%X\n", ap->print_id, tf->command); 786 787 write_atapi_register(base, ATA_REG_CMD, tf->command); 788 ata_pause(ap); ··· 834 struct scatterlist *sg; 835 unsigned int si; 836 837 - pr_debug("in atapi dma setup\n"); 838 /* Program the ATA_CTRL register with dir */ 839 if (qc->tf.flags & ATA_TFLAG_WRITE) { 840 /* fill the ATAPI DMA controller */ ··· 870 struct scatterlist *sg; 871 unsigned int si; 872 873 - pr_debug("in atapi dma start\n"); 874 if (!(ap->udma_mask || ap->mwdma_mask)) 875 return; 876 ··· 888 sg_dma_address(sg) + sg_dma_len(sg)); 889 } 890 enable_dma(CH_ATAPI_TX); 891 - pr_debug("enable udma write\n"); 892 893 /* Send ATA DMA write command */ 894 bfin_exec_command(ap, &qc->tf); ··· 898 | XFER_DIR)); 899 } else { 900 enable_dma(CH_ATAPI_RX); 901 - pr_debug("enable udma read\n"); 902 903 /* Send ATA DMA read command */ 904 bfin_exec_command(ap, &qc->tf); ··· 936 struct scatterlist *sg; 937 unsigned int si; 938 939 - pr_debug("in atapi dma stop\n"); 940 if (!(ap->udma_mask || ap->mwdma_mask)) 941 return; 942 ··· 1147 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1148 unsigned short int_status = ATAPI_GET_INT_STATUS(base); 1149 1150 - if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON)) { 1151 host_stat |= ATA_DMA_ACTIVE; 1152 - } 1153 - if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT)) { 1154 host_stat |= ATA_DMA_INTR; 1155 - } 1156 - if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT)) { 1157 - host_stat |= ATA_DMA_ERR; 1158 - } 1159 1160 return host_stat; 1161 } ··· 1213 { 1214 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1215 1216 - pr_debug("in atapi irq clear\n"); 1217 - 1218 ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT 1219 | MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT 1220 | MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT); ··· 1231 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1232 u8 tmp; 1233 1234 - pr_debug("in atapi irq on\n"); 1235 ap->ctl &= ~ATA_NIEN; 1236 ap->last_ctl = ap->ctl; 1237 ··· 1254 { 1255 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1256 1257 - pr_debug("in atapi dma freeze\n"); 1258 ap->ctl |= ATA_NIEN; 1259 ap->last_ctl = ap->ctl; 1260 ··· 1327 1328 static void bfin_port_stop(struct ata_port *ap) 1329 { 1330 - pr_debug("in atapi port stop\n"); 1331 if (ap->udma_mask != 0 || ap->mwdma_mask != 0) { 1332 free_dma(CH_ATAPI_RX); 1333 free_dma(CH_ATAPI_TX); ··· 1336 1337 static int bfin_port_start(struct ata_port *ap) 1338 { 1339 - pr_debug("in atapi port start\n"); 1340 if (!(ap->udma_mask || ap->mwdma_mask)) 1341 return 0; 1342 ··· 1372 .slave_configure = ata_scsi_slave_config, 1373 .slave_destroy = ata_scsi_slave_destroy, 1374 .bios_param = ata_std_bios_param, 1375 - #ifdef CONFIG_PM 1376 - .resume = ata_scsi_device_resume, 1377 - .suspend = ata_scsi_device_suspend, 1378 - #endif 1379 }; 1380 1381 static const struct ata_port_operations bfin_pata_ops = {
··· 299 */ 300 n6 = num_clocks_min(t6min, fsclk); 301 if (mode >= 0 && mode <= 4 && n6 >= 1) { 302 + dev_dbg(adev->link->ap->dev, "set piomode: mode=%d, fsclk=%ud\n", mode, fsclk); 303 /* calculate the timing values for register transfers. */ 304 while (mode > 0 && pio_fsclk[mode] > fsclk) 305 mode--; ··· 376 377 mode = adev->dma_mode - XFER_UDMA_0; 378 if (mode >= 0 && mode <= 5) { 379 + dev_dbg(adev->link->ap->dev, "set udmamode: mode=%d\n", mode); 380 /* the most restrictive timing value is t6 and tc, 381 * the DIOW - data hold. If one SCLK pulse is longer 382 * than this minimum value then register ··· 433 434 mode = adev->dma_mode - XFER_MW_DMA_0; 435 if (mode >= 0 && mode <= 2) { 436 + dev_dbg(adev->link->ap->dev, "set mdmamode: mode=%d\n", mode); 437 /* the most restrictive timing value is tf, the DMACK to 438 * read data released. If one SCLK pulse is longer than 439 * this maximum value then the MDMA mode ··· 697 write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal); 698 write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam); 699 write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah); 700 + dev_dbg(ap->dev, "hob: feat 0x%X nsect 0x%X, lba 0x%X " 701 "0x%X 0x%X\n", 702 tf->hob_feature, 703 tf->hob_nsect, ··· 711 write_atapi_register(base, ATA_REG_LBAL, tf->lbal); 712 write_atapi_register(base, ATA_REG_LBAM, tf->lbam); 713 write_atapi_register(base, ATA_REG_LBAH, tf->lbah); 714 + dev_dbg(ap->dev, "feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 715 tf->feature, 716 tf->nsect, 717 tf->lbal, ··· 721 722 if (tf->flags & ATA_TFLAG_DEVICE) { 723 write_atapi_register(base, ATA_REG_DEVICE, tf->device); 724 + dev_dbg(ap->dev, "device 0x%X\n", tf->device); 725 } 726 727 ata_wait_idle(ap); ··· 782 const struct ata_taskfile *tf) 783 { 784 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 785 + dev_dbg(ap->dev, "ata%u: cmd 0x%X\n", ap->print_id, tf->command); 786 787 write_atapi_register(base, ATA_REG_CMD, tf->command); 788 ata_pause(ap); ··· 834 struct scatterlist *sg; 835 unsigned int si; 836 837 + dev_dbg(qc->ap->dev, "in atapi dma setup\n"); 838 /* Program the ATA_CTRL register with dir */ 839 if (qc->tf.flags & ATA_TFLAG_WRITE) { 840 /* fill the ATAPI DMA controller */ ··· 870 struct scatterlist *sg; 871 unsigned int si; 872 873 + dev_dbg(qc->ap->dev, "in atapi dma start\n"); 874 if (!(ap->udma_mask || ap->mwdma_mask)) 875 return; 876 ··· 888 sg_dma_address(sg) + sg_dma_len(sg)); 889 } 890 enable_dma(CH_ATAPI_TX); 891 + dev_dbg(qc->ap->dev, "enable udma write\n"); 892 893 /* Send ATA DMA write command */ 894 bfin_exec_command(ap, &qc->tf); ··· 898 | XFER_DIR)); 899 } else { 900 enable_dma(CH_ATAPI_RX); 901 + dev_dbg(qc->ap->dev, "enable udma read\n"); 902 903 /* Send ATA DMA read command */ 904 bfin_exec_command(ap, &qc->tf); ··· 936 struct scatterlist *sg; 937 unsigned int si; 938 939 + dev_dbg(qc->ap->dev, "in atapi dma stop\n"); 940 if (!(ap->udma_mask || ap->mwdma_mask)) 941 return; 942 ··· 1147 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1148 unsigned short int_status = ATAPI_GET_INT_STATUS(base); 1149 1150 + if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON)) 1151 host_stat |= ATA_DMA_ACTIVE; 1152 + if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT| 1153 + ATAPI_DEV_INT)) 1154 host_stat |= ATA_DMA_INTR; 1155 + if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT)) 1156 + host_stat |= ATA_DMA_ERR|ATA_DMA_INTR; 1157 + 1158 + dev_dbg(ap->dev, "ATAPI: host_stat=0x%x\n", host_stat); 1159 1160 return host_stat; 1161 } ··· 1213 { 1214 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1215 1216 + dev_dbg(ap->dev, "in atapi irq clear\n"); 1217 ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT 1218 | MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT 1219 | MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT); ··· 1232 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1233 u8 tmp; 1234 1235 + dev_dbg(ap->dev, "in atapi irq on\n"); 1236 ap->ctl &= ~ATA_NIEN; 1237 ap->last_ctl = ap->ctl; 1238 ··· 1255 { 1256 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1257 1258 + dev_dbg(ap->dev, "in atapi dma freeze\n"); 1259 ap->ctl |= ATA_NIEN; 1260 ap->last_ctl = ap->ctl; 1261 ··· 1328 1329 static void bfin_port_stop(struct ata_port *ap) 1330 { 1331 + dev_dbg(ap->dev, "in atapi port stop\n"); 1332 if (ap->udma_mask != 0 || ap->mwdma_mask != 0) { 1333 free_dma(CH_ATAPI_RX); 1334 free_dma(CH_ATAPI_TX); ··· 1337 1338 static int bfin_port_start(struct ata_port *ap) 1339 { 1340 + dev_dbg(ap->dev, "in atapi port start\n"); 1341 if (!(ap->udma_mask || ap->mwdma_mask)) 1342 return 0; 1343 ··· 1373 .slave_configure = ata_scsi_slave_config, 1374 .slave_destroy = ata_scsi_slave_destroy, 1375 .bios_param = ata_std_bios_param, 1376 }; 1377 1378 static const struct ata_port_operations bfin_pata_ops = {
+31 -2
drivers/ata/pata_sl82c105.c
··· 26 #include <linux/libata.h> 27 28 #define DRV_NAME "pata_sl82c105" 29 - #define DRV_VERSION "0.3.2" 30 31 enum { 32 /* ··· 206 sl82c105_set_piomode(ap, qc->dev); 207 } 208 209 static struct scsi_host_template sl82c105_sht = { 210 .module = THIS_MODULE, 211 .name = DRV_NAME, ··· 273 .bmdma_stop = sl82c105_bmdma_stop, 274 .bmdma_status = ata_bmdma_status, 275 276 .qc_prep = ata_qc_prep, 277 .qc_issue = ata_qc_issue_prot, 278 ··· 341 }; 342 /* for now use only the first port */ 343 const struct ata_port_info *ppi[] = { &info_early, 344 - &ata_dummy_port_info }; 345 u32 val; 346 int rev; 347
··· 26 #include <linux/libata.h> 27 28 #define DRV_NAME "pata_sl82c105" 29 + #define DRV_VERSION "0.3.3" 30 31 enum { 32 /* ··· 206 sl82c105_set_piomode(ap, qc->dev); 207 } 208 209 + /** 210 + * sl82c105_qc_defer - implement serialization 211 + * @qc: command 212 + * 213 + * We must issue one command per host not per channel because 214 + * of the reset bug. 215 + * 216 + * Q: is the scsi host lock sufficient ? 217 + */ 218 + 219 + static int sl82c105_qc_defer(struct ata_queued_cmd *qc) 220 + { 221 + struct ata_host *host = qc->ap->host; 222 + struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; 223 + int rc; 224 + 225 + /* First apply the usual rules */ 226 + rc = ata_std_qc_defer(qc); 227 + if (rc != 0) 228 + return rc; 229 + 230 + /* Now apply serialization rules. Only allow a command if the 231 + other channel state machine is idle */ 232 + if (alt && alt->qc_active) 233 + return ATA_DEFER_PORT; 234 + return 0; 235 + } 236 + 237 static struct scsi_host_template sl82c105_sht = { 238 .module = THIS_MODULE, 239 .name = DRV_NAME, ··· 245 .bmdma_stop = sl82c105_bmdma_stop, 246 .bmdma_status = ata_bmdma_status, 247 248 + .qc_defer = sl82c105_qc_defer, 249 .qc_prep = ata_qc_prep, 250 .qc_issue = ata_qc_issue_prot, 251 ··· 312 }; 313 /* for now use only the first port */ 314 const struct ata_port_info *ppi[] = { &info_early, 315 + NULL }; 316 u32 val; 317 int rev; 318
+308 -178
drivers/ata/sata_mv.c
··· 29 I distinctly remember a couple workarounds (one related to PCI-X) 30 are still needed. 31 32 - 4) Add NCQ support (easy to intermediate, once new-EH support appears) 33 34 5) Investigate problems with PCI Message Signalled Interrupts (MSI). 35 ··· 59 Target mode, for those without docs, is the ability to directly 60 connect two SATA controllers. 61 62 - 13) Verify that 7042 is fully supported. I only have a 6042. 63 - 64 */ 65 66 ··· 77 #include <linux/libata.h> 78 79 #define DRV_NAME "sata_mv" 80 - #define DRV_VERSION "1.01" 81 82 enum { 83 /* BAR's are enumerated in terms of pci_resource_start() terms */ ··· 111 112 /* CRQB needs alignment on a 1KB boundary. Size == 1KB 113 * CRPB needs alignment on a 256B boundary. Size == 256B 114 - * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB 115 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B 116 */ 117 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), 118 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), 119 - MV_MAX_SG_CT = 176, 120 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), 121 - MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ), 122 123 MV_PORTS_PER_HC = 4, 124 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ ··· 127 /* Host Flags */ 128 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 129 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 130 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 131 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 132 ATA_FLAG_PIO_POLLING, ··· 175 176 PCIE_IRQ_CAUSE_OFS = 0x1900, 177 PCIE_IRQ_MASK_OFS = 0x1910, 178 - PCIE_UNMASK_ALL_IRQS = 0x70a, /* assorted bits */ 179 180 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, 181 HC_MAIN_IRQ_MASK_OFS = 0x1d64, ··· 215 /* SATA registers */ 216 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 217 SATA_ACTIVE_OFS = 0x350, 218 PHY_MODE3 = 0x310, 219 PHY_MODE4 = 0x314, 220 PHY_MODE2 = 0x330, ··· 228 229 /* Port registers */ 230 EDMA_CFG_OFS = 0, 231 - EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */ 232 - EDMA_CFG_NCQ = (1 << 5), 233 - EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 234 - EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 235 - EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 236 237 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 238 EDMA_ERR_IRQ_MASK_OFS = 0xc, ··· 250 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ 251 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ 252 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ 253 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ 254 - EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), 255 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ 256 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ 257 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ 258 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ 259 EDMA_ERR_OVERRUN_5 = (1 << 5), 260 EDMA_ERR_UNDERRUN_5 = (1 << 6), 261 EDMA_EH_FREEZE = EDMA_ERR_D_PAR | 262 EDMA_ERR_PRD_PAR | 263 EDMA_ERR_DEV_DCON | ··· 336 337 /* Port private flags (pp_flags) */ 338 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 339 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */ 340 }; 341 342 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 343 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 344 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 345 346 enum { 347 /* DMA boundary 0xffff is required by the s/g splitting ··· 406 dma_addr_t crqb_dma; 407 struct mv_crpb *crpb; 408 dma_addr_t crpb_dma; 409 - struct mv_sg *sg_tbl; 410 - dma_addr_t sg_tbl_dma; 411 412 unsigned int req_idx; 413 unsigned int resp_idx; ··· 427 u32 irq_cause_ofs; 428 u32 irq_mask_ofs; 429 u32 unmask_all_irqs; 430 }; 431 432 struct mv_hw_ops { ··· 446 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, 447 unsigned int n_hc); 448 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); 449 - void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio); 450 }; 451 452 static void mv_irq_clear(struct ata_port *ap); ··· 460 static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 461 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 462 static void mv_error_handler(struct ata_port *ap); 463 - static void mv_post_int_cmd(struct ata_queued_cmd *qc); 464 static void mv_eh_freeze(struct ata_port *ap); 465 static void mv_eh_thaw(struct ata_port *ap); 466 - static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 467 468 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 469 unsigned int port); ··· 472 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 473 unsigned int n_hc); 474 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 475 - static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio); 476 477 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 478 unsigned int port); ··· 482 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 483 unsigned int n_hc); 484 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 485 - static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio); 486 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, 487 unsigned int port_no); 488 489 static struct scsi_host_template mv5_sht = { 490 .module = THIS_MODULE, 491 .name = DRV_NAME, ··· 516 .name = DRV_NAME, 517 .ioctl = ata_scsi_ioctl, 518 .queuecommand = ata_scsi_queuecmd, 519 - .can_queue = ATA_DEF_QUEUE, 520 .this_id = ATA_SHT_THIS_ID, 521 .sg_tablesize = MV_MAX_SG_CT / 2, 522 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, ··· 547 .irq_on = ata_irq_on, 548 549 .error_handler = mv_error_handler, 550 - .post_internal_cmd = mv_post_int_cmd, 551 .freeze = mv_eh_freeze, 552 .thaw = mv_eh_thaw, 553 ··· 558 }; 559 560 static const struct ata_port_operations mv6_ops = { 561 .tf_load = ata_tf_load, 562 .tf_read = ata_tf_read, 563 .check_status = ata_check_status, ··· 575 .irq_on = ata_irq_on, 576 577 .error_handler = mv_error_handler, 578 - .post_internal_cmd = mv_post_int_cmd, 579 .freeze = mv_eh_freeze, 580 .thaw = mv_eh_thaw, 581 582 .scr_read = mv_scr_read, 583 .scr_write = mv_scr_write, ··· 603 .irq_on = ata_irq_on, 604 605 .error_handler = mv_error_handler, 606 - .post_internal_cmd = mv_post_int_cmd, 607 .freeze = mv_eh_freeze, 608 .thaw = mv_eh_thaw, 609 610 .scr_read = mv_scr_read, 611 .scr_write = mv_scr_write, ··· 634 .port_ops = &mv5_ops, 635 }, 636 { /* chip_604x */ 637 - .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, 638 .pio_mask = 0x1f, /* pio0-4 */ 639 .udma_mask = ATA_UDMA6, 640 .port_ops = &mv6_ops, 641 }, 642 { /* chip_608x */ 643 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 644 - MV_FLAG_DUAL_HC, 645 .pio_mask = 0x1f, /* pio0-4 */ 646 .udma_mask = ATA_UDMA6, 647 .port_ops = &mv6_ops, 648 }, 649 { /* chip_6042 */ 650 - .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, 651 .pio_mask = 0x1f, /* pio0-4 */ 652 .udma_mask = ATA_UDMA6, 653 .port_ops = &mv_iie_ops, 654 }, 655 { /* chip_7042 */ 656 - .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, 657 .pio_mask = 0x1f, /* pio0-4 */ 658 .udma_mask = ATA_UDMA6, 659 .port_ops = &mv_iie_ops, ··· 693 { } /* terminate list */ 694 }; 695 696 - static struct pci_driver mv_pci_driver = { 697 - .name = DRV_NAME, 698 - .id_table = mv_pci_tbl, 699 - .probe = mv_init_one, 700 - .remove = ata_pci_remove_one, 701 - }; 702 - 703 static const struct mv_hw_ops mv5xxx_ops = { 704 .phy_errata = mv5_phy_errata, 705 .enable_leds = mv5_enable_leds, ··· 710 .reset_flash = mv6_reset_flash, 711 .reset_bus = mv_reset_pci_bus, 712 }; 713 - 714 - /* 715 - * module options 716 - */ 717 - static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ 718 - 719 - 720 - /* move to PCI layer or libata core? */ 721 - static int pci_go_64(struct pci_dev *pdev) 722 - { 723 - int rc; 724 - 725 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 726 - rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 727 - if (rc) { 728 - rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 729 - if (rc) { 730 - dev_printk(KERN_ERR, &pdev->dev, 731 - "64-bit DMA enable failed\n"); 732 - return rc; 733 - } 734 - } 735 - } else { 736 - rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 737 - if (rc) { 738 - dev_printk(KERN_ERR, &pdev->dev, 739 - "32-bit DMA enable failed\n"); 740 - return rc; 741 - } 742 - rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 743 - if (rc) { 744 - dev_printk(KERN_ERR, &pdev->dev, 745 - "32-bit consistent DMA enable failed\n"); 746 - return rc; 747 - } 748 - } 749 - 750 - return rc; 751 - } 752 753 /* 754 * Functions ··· 814 * LOCKING: 815 * Inherited from caller. 816 */ 817 - static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv, 818 - struct mv_port_priv *pp) 819 { 820 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 821 /* clear EDMA event indicators, if any */ 822 - writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS); 823 824 - mv_set_edma_ptrs(base, hpriv, pp); 825 826 - writelfl(EDMA_EN, base + EDMA_CMD_OFS); 827 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 828 } 829 - WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS))); 830 } 831 832 /** ··· 1029 return -EINVAL; 1030 } 1031 1032 - static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, 1033 - void __iomem *port_mmio) 1034 { 1035 - u32 cfg = readl(port_mmio + EDMA_CFG_OFS); 1036 1037 /* set up non-NCQ EDMA configuration */ 1038 - cfg &= ~(1 << 9); /* disable eQue */ 1039 1040 - if (IS_GEN_I(hpriv)) { 1041 - cfg &= ~0x1f; /* clear queue depth */ 1042 cfg |= (1 << 8); /* enab config burst size mask */ 1043 - } 1044 1045 - else if (IS_GEN_II(hpriv)) { 1046 - cfg &= ~0x1f; /* clear queue depth */ 1047 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 1048 - cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */ 1049 - } 1050 1051 else if (IS_GEN_IIE(hpriv)) { 1052 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1053 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1054 - cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */ 1055 cfg |= (1 << 18); /* enab early completion */ 1056 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ 1057 - cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */ 1058 - cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */ 1059 } 1060 1061 writelfl(cfg, port_mmio + EDMA_CFG_OFS); 1062 } 1063 1064 /** ··· 1115 struct mv_host_priv *hpriv = ap->host->private_data; 1116 struct mv_port_priv *pp; 1117 void __iomem *port_mmio = mv_ap_base(ap); 1118 - void *mem; 1119 - dma_addr_t mem_dma; 1120 unsigned long flags; 1121 - int rc; 1122 1123 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1124 if (!pp) 1125 return -ENOMEM; 1126 - 1127 - mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma, 1128 - GFP_KERNEL); 1129 - if (!mem) 1130 - return -ENOMEM; 1131 - memset(mem, 0, MV_PORT_PRIV_DMA_SZ); 1132 1133 rc = ata_pad_alloc(ap, dev); 1134 if (rc) 1135 return rc; 1136 1137 - /* First item in chunk of DMA memory: 1138 - * 32-slot command request table (CRQB), 32 bytes each in size 1139 - */ 1140 - pp->crqb = mem; 1141 - pp->crqb_dma = mem_dma; 1142 - mem += MV_CRQB_Q_SZ; 1143 - mem_dma += MV_CRQB_Q_SZ; 1144 1145 - /* Second item: 1146 - * 32-slot command response table (CRPB), 8 bytes each in size 1147 - */ 1148 - pp->crpb = mem; 1149 - pp->crpb_dma = mem_dma; 1150 - mem += MV_CRPB_Q_SZ; 1151 - mem_dma += MV_CRPB_Q_SZ; 1152 1153 - /* Third item: 1154 - * Table of scatter-gather descriptors (ePRD), 16 bytes each 1155 */ 1156 - pp->sg_tbl = mem; 1157 - pp->sg_tbl_dma = mem_dma; 1158 1159 spin_lock_irqsave(&ap->host->lock, flags); 1160 1161 - mv_edma_cfg(ap, hpriv, port_mmio); 1162 - 1163 mv_set_edma_ptrs(port_mmio, hpriv, pp); 1164 1165 spin_unlock_irqrestore(&ap->host->lock, flags); ··· 1164 * we'll be unable to send non-data, PIO, etc due to restricted access 1165 * to shadow regs. 1166 */ 1167 - ap->private_data = pp; 1168 return 0; 1169 } 1170 1171 /** ··· 1183 static void mv_port_stop(struct ata_port *ap) 1184 { 1185 mv_stop_dma(ap); 1186 } 1187 1188 /** ··· 1202 struct mv_sg *mv_sg, *last_sg = NULL; 1203 unsigned int si; 1204 1205 - mv_sg = pp->sg_tbl; 1206 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1207 dma_addr_t addr = sg_dma_address(sg); 1208 u32 sg_len = sg_dma_len(sg); ··· 1258 u16 flags = 0; 1259 unsigned in_index; 1260 1261 - if (qc->tf.protocol != ATA_PROT_DMA) 1262 return; 1263 1264 /* Fill in command request block ··· 1268 flags |= CRQB_FLAG_READ; 1269 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1270 flags |= qc->tag << CRQB_TAG_SHIFT; 1271 - flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/ 1272 1273 /* get current queue index from software */ 1274 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1275 1276 pp->crqb[in_index].sg_addr = 1277 - cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1278 pp->crqb[in_index].sg_addr_hi = 1279 - cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1280 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); 1281 1282 cw = &pp->crqb[in_index].ata_cmd[0]; ··· 1295 case ATA_CMD_WRITE_FUA_EXT: 1296 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 1297 break; 1298 - #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */ 1299 case ATA_CMD_FPDMA_READ: 1300 case ATA_CMD_FPDMA_WRITE: 1301 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 1302 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); 1303 break; 1304 - #endif /* FIXME: remove this line when NCQ added */ 1305 default: 1306 /* The only other commands EDMA supports in non-queued and 1307 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none ··· 1348 unsigned in_index; 1349 u32 flags = 0; 1350 1351 - if (qc->tf.protocol != ATA_PROT_DMA) 1352 return; 1353 1354 /* Fill in Gen IIE command request block ··· 1359 1360 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1361 flags |= qc->tag << CRQB_TAG_SHIFT; 1362 - flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really- 1363 - what we use as our tag */ 1364 1365 /* get current queue index from software */ 1366 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1367 1368 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; 1369 - crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1370 - crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1371 crqb->flags = cpu_to_le32(flags); 1372 1373 tf = &qc->tf; ··· 1413 struct ata_port *ap = qc->ap; 1414 void __iomem *port_mmio = mv_ap_base(ap); 1415 struct mv_port_priv *pp = ap->private_data; 1416 - struct mv_host_priv *hpriv = ap->host->private_data; 1417 u32 in_index; 1418 1419 - if (qc->tf.protocol != ATA_PROT_DMA) { 1420 /* We're about to send a non-EDMA capable command to the 1421 * port. Turn off EDMA so there won't be problems accessing 1422 * shadow block, etc registers. ··· 1425 return ata_qc_issue_prot(qc); 1426 } 1427 1428 - mv_start_dma(port_mmio, hpriv, pp); 1429 - 1430 - in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1431 - 1432 - /* until we do queuing, the queue should be empty at this point */ 1433 - WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) 1434 - >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1435 1436 pp->req_idx++; 1437 ··· 1493 ata_ehi_hotplugged(ehi); 1494 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? 1495 "dev disconnect" : "dev connect"); 1496 } 1497 1498 if (IS_GEN_I(hpriv)) { ··· 1522 } 1523 1524 /* Clear EDMA now that SERR cleanup done */ 1525 - writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1526 1527 if (!err_mask) { 1528 err_mask = AC_ERR_OTHER; ··· 1595 * support for queueing. this works transparently for 1596 * queued and non-queued modes. 1597 */ 1598 - else if (IS_GEN_II(hpriv)) 1599 - tag = (le16_to_cpu(pp->crpb[out_index].id) 1600 - >> CRPB_IOID_SHIFT_6) & 0x3f; 1601 - 1602 - else /* IS_GEN_IIE */ 1603 - tag = (le16_to_cpu(pp->crpb[out_index].id) 1604 - >> CRPB_IOID_SHIFT_7) & 0x3f; 1605 1606 qc = ata_qc_from_tag(ap, tag); 1607 1608 - /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS 1609 - * bits (WARNING: might not necessarily be associated 1610 - * with this command), which -should- be clear 1611 - * if all is well 1612 */ 1613 status = le16_to_cpu(pp->crpb[out_index].flags); 1614 - if (unlikely(status & 0xff)) { 1615 mv_err_intr(ap, qc); 1616 return; 1617 } ··· 1766 struct ata_host *host = dev_instance; 1767 unsigned int hc, handled = 0, n_hcs; 1768 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; 1769 - u32 irq_stat; 1770 1771 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); 1772 1773 /* check the cases where we either have nothing pending or have read 1774 * a bogus register value which can indicate HW removal or PCI fault 1775 */ 1776 - if (!irq_stat || (0xffffffffU == irq_stat)) 1777 - return IRQ_NONE; 1778 1779 n_hcs = mv_get_hc_count(host->ports[0]->flags); 1780 - spin_lock(&host->lock); 1781 1782 - if (unlikely(irq_stat & PCI_ERR)) { 1783 mv_pci_error(host, mmio); 1784 handled = 1; 1785 goto out_unlock; /* skip all other HC irq handling */ ··· 1851 return -EINVAL; 1852 } 1853 1854 - static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio) 1855 { 1856 int early_5080; 1857 1858 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); ··· 1864 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 1865 } 1866 1867 - mv_reset_pci_bus(pdev, mmio); 1868 } 1869 1870 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) ··· 1988 1989 #undef ZERO 1990 #define ZERO(reg) writel(0, mmio + (reg)) 1991 - static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio) 1992 { 1993 - struct ata_host *host = dev_get_drvdata(&pdev->dev); 1994 struct mv_host_priv *hpriv = host->private_data; 1995 u32 tmp; 1996 ··· 2381 mv_hardreset, mv_postreset); 2382 } 2383 2384 - static void mv_post_int_cmd(struct ata_queued_cmd *qc) 2385 - { 2386 - mv_stop_dma(qc->ap); 2387 - } 2388 - 2389 static void mv_eh_freeze(struct ata_port *ap) 2390 { 2391 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; ··· 2474 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); 2475 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2476 2477 - /* unmask all EDMA error interrupts */ 2478 - writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS); 2479 2480 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 2481 readl(port_mmio + EDMA_CFG_OFS), ··· 2633 static int mv_init_host(struct ata_host *host, unsigned int board_idx) 2634 { 2635 int rc = 0, n_hc, port, hc; 2636 - struct pci_dev *pdev = to_pci_dev(host->dev); 2637 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; 2638 struct mv_host_priv *hpriv = host->private_data; 2639 ··· 2653 goto done; 2654 2655 hpriv->ops->reset_flash(hpriv, mmio); 2656 - hpriv->ops->reset_bus(pdev, mmio); 2657 hpriv->ops->enable_leds(hpriv, mmio); 2658 2659 for (port = 0; port < host->n_ports; port++) { ··· 2676 2677 mv_port_init(&ap->ioaddr, port_mmio); 2678 2679 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); 2680 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); 2681 } 2682 2683 for (hc = 0; hc < n_hc; hc++) { ··· 2713 readl(mmio + hpriv->irq_mask_ofs)); 2714 2715 done: 2716 return rc; 2717 } 2718 ··· 2805 "Gen-%s %u slots %u ports %s mode IRQ via %s\n", 2806 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, 2807 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 2808 } 2809 2810 /** ··· 2872 if (rc) 2873 return rc; 2874 2875 /* initialize adapter */ 2876 rc = mv_init_host(host, board_idx); 2877 if (rc) ··· 2893 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 2894 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); 2895 } 2896 2897 static int __init mv_init(void) 2898 { 2899 - return pci_register_driver(&mv_pci_driver); 2900 } 2901 2902 static void __exit mv_exit(void) 2903 { 2904 pci_unregister_driver(&mv_pci_driver); 2905 } 2906 2907 MODULE_AUTHOR("Brett Russ"); ··· 2917 MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 2918 MODULE_VERSION(DRV_VERSION); 2919 2920 module_param(msi, int, 0444); 2921 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); 2922 2923 module_init(mv_init); 2924 module_exit(mv_exit);
··· 29 I distinctly remember a couple workarounds (one related to PCI-X) 30 are still needed. 31 32 + 2) Improve/fix IRQ and error handling sequences. 33 + 34 + 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it). 35 + 36 + 4) Think about TCQ support here, and for libata in general 37 + with controllers that suppport it via host-queuing hardware 38 + (a software-only implementation could be a nightmare). 39 40 5) Investigate problems with PCI Message Signalled Interrupts (MSI). 41 ··· 53 Target mode, for those without docs, is the ability to directly 54 connect two SATA controllers. 55 56 */ 57 58 ··· 73 #include <linux/libata.h> 74 75 #define DRV_NAME "sata_mv" 76 + #define DRV_VERSION "1.20" 77 78 enum { 79 /* BAR's are enumerated in terms of pci_resource_start() terms */ ··· 107 108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB 109 * CRPB needs alignment on a 256B boundary. Size == 256B 110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B 111 */ 112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), 113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), 114 + MV_MAX_SG_CT = 256, 115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), 116 117 MV_PORTS_PER_HC = 4, 118 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ ··· 125 /* Host Flags */ 126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 128 + /* SoC integrated controllers, no PCI interface */ 129 + MV_FLAG_SOC = (1 << 28), 130 + 131 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 132 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 133 ATA_FLAG_PIO_POLLING, ··· 170 171 PCIE_IRQ_CAUSE_OFS = 0x1900, 172 PCIE_IRQ_MASK_OFS = 0x1910, 173 + PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ 174 175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, 176 HC_MAIN_IRQ_MASK_OFS = 0x1d64, ··· 210 /* SATA registers */ 211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 212 SATA_ACTIVE_OFS = 0x350, 213 + SATA_FIS_IRQ_CAUSE_OFS = 0x364, 214 PHY_MODE3 = 0x310, 215 PHY_MODE4 = 0x314, 216 PHY_MODE2 = 0x330, ··· 222 223 /* Port registers */ 224 EDMA_CFG_OFS = 0, 225 + EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ 226 + EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ 227 + EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 228 + EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 229 + EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 230 231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 232 EDMA_ERR_IRQ_MASK_OFS = 0xc, ··· 244 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ 245 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ 246 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ 247 + 248 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ 249 + EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ 250 + EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ 251 + EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ 252 + EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ 253 + 254 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ 255 + 256 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ 257 + EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ 258 + EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ 259 + EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ 260 + EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ 261 + EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ 262 + 263 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ 264 + 265 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ 266 EDMA_ERR_OVERRUN_5 = (1 << 5), 267 EDMA_ERR_UNDERRUN_5 = (1 << 6), 268 + 269 + EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | 270 + EDMA_ERR_LNK_CTRL_RX_1 | 271 + EDMA_ERR_LNK_CTRL_RX_3 | 272 + EDMA_ERR_LNK_CTRL_TX, 273 + 274 EDMA_EH_FREEZE = EDMA_ERR_D_PAR | 275 EDMA_ERR_PRD_PAR | 276 EDMA_ERR_DEV_DCON | ··· 311 312 /* Port private flags (pp_flags) */ 313 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 314 + MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ 315 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */ 316 }; 317 318 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 319 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 320 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 321 + #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) 322 323 enum { 324 /* DMA boundary 0xffff is required by the s/g splitting ··· 379 dma_addr_t crqb_dma; 380 struct mv_crpb *crpb; 381 dma_addr_t crpb_dma; 382 + struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; 383 + dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; 384 385 unsigned int req_idx; 386 unsigned int resp_idx; ··· 400 u32 irq_cause_ofs; 401 u32 irq_mask_ofs; 402 u32 unmask_all_irqs; 403 + /* 404 + * These consistent DMA memory pools give us guaranteed 405 + * alignment for hardware-accessed data structures, 406 + * and less memory waste in accomplishing the alignment. 407 + */ 408 + struct dma_pool *crqb_pool; 409 + struct dma_pool *crpb_pool; 410 + struct dma_pool *sg_tbl_pool; 411 }; 412 413 struct mv_hw_ops { ··· 411 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, 412 unsigned int n_hc); 413 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); 414 + void (*reset_bus)(struct ata_host *host, void __iomem *mmio); 415 }; 416 417 static void mv_irq_clear(struct ata_port *ap); ··· 425 static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 426 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 427 static void mv_error_handler(struct ata_port *ap); 428 static void mv_eh_freeze(struct ata_port *ap); 429 static void mv_eh_thaw(struct ata_port *ap); 430 + static void mv6_dev_config(struct ata_device *dev); 431 432 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 433 unsigned int port); ··· 438 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 439 unsigned int n_hc); 440 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 441 + static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); 442 443 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 444 unsigned int port); ··· 448 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 449 unsigned int n_hc); 450 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 451 + static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); 452 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, 453 unsigned int port_no); 454 + static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, 455 + void __iomem *port_mmio, int want_ncq); 456 + static int __mv_stop_dma(struct ata_port *ap); 457 458 + /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 459 + * because we have to allow room for worst case splitting of 460 + * PRDs for 64K boundaries in mv_fill_sg(). 461 + */ 462 static struct scsi_host_template mv5_sht = { 463 .module = THIS_MODULE, 464 .name = DRV_NAME, ··· 475 .name = DRV_NAME, 476 .ioctl = ata_scsi_ioctl, 477 .queuecommand = ata_scsi_queuecmd, 478 + .change_queue_depth = ata_scsi_change_queue_depth, 479 + .can_queue = MV_MAX_Q_DEPTH - 1, 480 .this_id = ATA_SHT_THIS_ID, 481 .sg_tablesize = MV_MAX_SG_CT / 2, 482 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, ··· 505 .irq_on = ata_irq_on, 506 507 .error_handler = mv_error_handler, 508 .freeze = mv_eh_freeze, 509 .thaw = mv_eh_thaw, 510 ··· 517 }; 518 519 static const struct ata_port_operations mv6_ops = { 520 + .dev_config = mv6_dev_config, 521 .tf_load = ata_tf_load, 522 .tf_read = ata_tf_read, 523 .check_status = ata_check_status, ··· 533 .irq_on = ata_irq_on, 534 535 .error_handler = mv_error_handler, 536 .freeze = mv_eh_freeze, 537 .thaw = mv_eh_thaw, 538 + .qc_defer = ata_std_qc_defer, 539 540 .scr_read = mv_scr_read, 541 .scr_write = mv_scr_write, ··· 561 .irq_on = ata_irq_on, 562 563 .error_handler = mv_error_handler, 564 .freeze = mv_eh_freeze, 565 .thaw = mv_eh_thaw, 566 + .qc_defer = ata_std_qc_defer, 567 568 .scr_read = mv_scr_read, 569 .scr_write = mv_scr_write, ··· 592 .port_ops = &mv5_ops, 593 }, 594 { /* chip_604x */ 595 + .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 596 + ATA_FLAG_NCQ, 597 .pio_mask = 0x1f, /* pio0-4 */ 598 .udma_mask = ATA_UDMA6, 599 .port_ops = &mv6_ops, 600 }, 601 { /* chip_608x */ 602 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 603 + ATA_FLAG_NCQ | MV_FLAG_DUAL_HC, 604 .pio_mask = 0x1f, /* pio0-4 */ 605 .udma_mask = ATA_UDMA6, 606 .port_ops = &mv6_ops, 607 }, 608 { /* chip_6042 */ 609 + .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 610 + ATA_FLAG_NCQ, 611 .pio_mask = 0x1f, /* pio0-4 */ 612 .udma_mask = ATA_UDMA6, 613 .port_ops = &mv_iie_ops, 614 }, 615 { /* chip_7042 */ 616 + .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 617 + ATA_FLAG_NCQ, 618 .pio_mask = 0x1f, /* pio0-4 */ 619 .udma_mask = ATA_UDMA6, 620 .port_ops = &mv_iie_ops, ··· 648 { } /* terminate list */ 649 }; 650 651 static const struct mv_hw_ops mv5xxx_ops = { 652 .phy_errata = mv5_phy_errata, 653 .enable_leds = mv5_enable_leds, ··· 672 .reset_flash = mv6_reset_flash, 673 .reset_bus = mv_reset_pci_bus, 674 }; 675 676 /* 677 * Functions ··· 815 * LOCKING: 816 * Inherited from caller. 817 */ 818 + static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, 819 + struct mv_port_priv *pp, u8 protocol) 820 { 821 + int want_ncq = (protocol == ATA_PROT_NCQ); 822 + 823 + if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 824 + int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); 825 + if (want_ncq != using_ncq) 826 + __mv_stop_dma(ap); 827 + } 828 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 829 + struct mv_host_priv *hpriv = ap->host->private_data; 830 + int hard_port = mv_hardport_from_port(ap->port_no); 831 + void __iomem *hc_mmio = mv_hc_base_from_port( 832 + ap->host->iomap[MV_PRIMARY_BAR], hard_port); 833 + u32 hc_irq_cause, ipending; 834 + 835 /* clear EDMA event indicators, if any */ 836 + writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 837 838 + /* clear EDMA interrupt indicator, if any */ 839 + hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 840 + ipending = (DEV_IRQ << hard_port) | 841 + (CRPB_DMA_DONE << hard_port); 842 + if (hc_irq_cause & ipending) { 843 + writelfl(hc_irq_cause & ~ipending, 844 + hc_mmio + HC_IRQ_CAUSE_OFS); 845 + } 846 847 + mv_edma_cfg(pp, hpriv, port_mmio, want_ncq); 848 + 849 + /* clear FIS IRQ Cause */ 850 + writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 851 + 852 + mv_set_edma_ptrs(port_mmio, hpriv, pp); 853 + 854 + writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); 855 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 856 } 857 + WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); 858 } 859 860 /** ··· 1003 return -EINVAL; 1004 } 1005 1006 + static void mv6_dev_config(struct ata_device *adev) 1007 { 1008 + /* 1009 + * We don't have hob_nsect when doing NCQ commands on Gen-II. 1010 + * See mv_qc_prep() for more info. 1011 + */ 1012 + if (adev->flags & ATA_DFLAG_NCQ) 1013 + if (adev->max_sectors > ATA_MAX_SECTORS) 1014 + adev->max_sectors = ATA_MAX_SECTORS; 1015 + } 1016 + 1017 + static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, 1018 + void __iomem *port_mmio, int want_ncq) 1019 + { 1020 + u32 cfg; 1021 1022 /* set up non-NCQ EDMA configuration */ 1023 + cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ 1024 1025 + if (IS_GEN_I(hpriv)) 1026 cfg |= (1 << 8); /* enab config burst size mask */ 1027 1028 + else if (IS_GEN_II(hpriv)) 1029 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 1030 1031 else if (IS_GEN_IIE(hpriv)) { 1032 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1033 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1034 cfg |= (1 << 18); /* enab early completion */ 1035 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ 1036 } 1037 1038 + if (want_ncq) { 1039 + cfg |= EDMA_CFG_NCQ; 1040 + pp->pp_flags |= MV_PP_FLAG_NCQ_EN; 1041 + } else 1042 + pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN; 1043 + 1044 writelfl(cfg, port_mmio + EDMA_CFG_OFS); 1045 + } 1046 + 1047 + static void mv_port_free_dma_mem(struct ata_port *ap) 1048 + { 1049 + struct mv_host_priv *hpriv = ap->host->private_data; 1050 + struct mv_port_priv *pp = ap->private_data; 1051 + int tag; 1052 + 1053 + if (pp->crqb) { 1054 + dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); 1055 + pp->crqb = NULL; 1056 + } 1057 + if (pp->crpb) { 1058 + dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); 1059 + pp->crpb = NULL; 1060 + } 1061 + /* 1062 + * For GEN_I, there's no NCQ, so we have only a single sg_tbl. 1063 + * For later hardware, we have one unique sg_tbl per NCQ tag. 1064 + */ 1065 + for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1066 + if (pp->sg_tbl[tag]) { 1067 + if (tag == 0 || !IS_GEN_I(hpriv)) 1068 + dma_pool_free(hpriv->sg_tbl_pool, 1069 + pp->sg_tbl[tag], 1070 + pp->sg_tbl_dma[tag]); 1071 + pp->sg_tbl[tag] = NULL; 1072 + } 1073 + } 1074 } 1075 1076 /** ··· 1051 struct mv_host_priv *hpriv = ap->host->private_data; 1052 struct mv_port_priv *pp; 1053 void __iomem *port_mmio = mv_ap_base(ap); 1054 unsigned long flags; 1055 + int tag, rc; 1056 1057 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1058 if (!pp) 1059 return -ENOMEM; 1060 + ap->private_data = pp; 1061 1062 rc = ata_pad_alloc(ap, dev); 1063 if (rc) 1064 return rc; 1065 1066 + pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); 1067 + if (!pp->crqb) 1068 + return -ENOMEM; 1069 + memset(pp->crqb, 0, MV_CRQB_Q_SZ); 1070 1071 + pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); 1072 + if (!pp->crpb) 1073 + goto out_port_free_dma_mem; 1074 + memset(pp->crpb, 0, MV_CRPB_Q_SZ); 1075 1076 + /* 1077 + * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. 1078 + * For later hardware, we need one unique sg_tbl per NCQ tag. 1079 */ 1080 + for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1081 + if (tag == 0 || !IS_GEN_I(hpriv)) { 1082 + pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, 1083 + GFP_KERNEL, &pp->sg_tbl_dma[tag]); 1084 + if (!pp->sg_tbl[tag]) 1085 + goto out_port_free_dma_mem; 1086 + } else { 1087 + pp->sg_tbl[tag] = pp->sg_tbl[0]; 1088 + pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; 1089 + } 1090 + } 1091 1092 spin_lock_irqsave(&ap->host->lock, flags); 1093 1094 + mv_edma_cfg(pp, hpriv, port_mmio, 0); 1095 mv_set_edma_ptrs(port_mmio, hpriv, pp); 1096 1097 spin_unlock_irqrestore(&ap->host->lock, flags); ··· 1104 * we'll be unable to send non-data, PIO, etc due to restricted access 1105 * to shadow regs. 1106 */ 1107 return 0; 1108 + 1109 + out_port_free_dma_mem: 1110 + mv_port_free_dma_mem(ap); 1111 + return -ENOMEM; 1112 } 1113 1114 /** ··· 1120 static void mv_port_stop(struct ata_port *ap) 1121 { 1122 mv_stop_dma(ap); 1123 + mv_port_free_dma_mem(ap); 1124 } 1125 1126 /** ··· 1138 struct mv_sg *mv_sg, *last_sg = NULL; 1139 unsigned int si; 1140 1141 + mv_sg = pp->sg_tbl[qc->tag]; 1142 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1143 dma_addr_t addr = sg_dma_address(sg); 1144 u32 sg_len = sg_dma_len(sg); ··· 1194 u16 flags = 0; 1195 unsigned in_index; 1196 1197 + if ((qc->tf.protocol != ATA_PROT_DMA) && 1198 + (qc->tf.protocol != ATA_PROT_NCQ)) 1199 return; 1200 1201 /* Fill in command request block ··· 1203 flags |= CRQB_FLAG_READ; 1204 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1205 flags |= qc->tag << CRQB_TAG_SHIFT; 1206 1207 /* get current queue index from software */ 1208 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1209 1210 pp->crqb[in_index].sg_addr = 1211 + cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 1212 pp->crqb[in_index].sg_addr_hi = 1213 + cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 1214 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); 1215 1216 cw = &pp->crqb[in_index].ata_cmd[0]; ··· 1231 case ATA_CMD_WRITE_FUA_EXT: 1232 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 1233 break; 1234 case ATA_CMD_FPDMA_READ: 1235 case ATA_CMD_FPDMA_WRITE: 1236 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 1237 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); 1238 break; 1239 default: 1240 /* The only other commands EDMA supports in non-queued and 1241 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none ··· 1286 unsigned in_index; 1287 u32 flags = 0; 1288 1289 + if ((qc->tf.protocol != ATA_PROT_DMA) && 1290 + (qc->tf.protocol != ATA_PROT_NCQ)) 1291 return; 1292 1293 /* Fill in Gen IIE command request block ··· 1296 1297 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1298 flags |= qc->tag << CRQB_TAG_SHIFT; 1299 + flags |= qc->tag << CRQB_HOSTQ_SHIFT; 1300 1301 /* get current queue index from software */ 1302 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1303 1304 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; 1305 + crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 1306 + crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 1307 crqb->flags = cpu_to_le32(flags); 1308 1309 tf = &qc->tf; ··· 1351 struct ata_port *ap = qc->ap; 1352 void __iomem *port_mmio = mv_ap_base(ap); 1353 struct mv_port_priv *pp = ap->private_data; 1354 u32 in_index; 1355 1356 + if ((qc->tf.protocol != ATA_PROT_DMA) && 1357 + (qc->tf.protocol != ATA_PROT_NCQ)) { 1358 /* We're about to send a non-EDMA capable command to the 1359 * port. Turn off EDMA so there won't be problems accessing 1360 * shadow block, etc registers. ··· 1363 return ata_qc_issue_prot(qc); 1364 } 1365 1366 + mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); 1367 1368 pp->req_idx++; 1369 ··· 1437 ata_ehi_hotplugged(ehi); 1438 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? 1439 "dev disconnect" : "dev connect"); 1440 + action |= ATA_EH_HARDRESET; 1441 } 1442 1443 if (IS_GEN_I(hpriv)) { ··· 1465 } 1466 1467 /* Clear EDMA now that SERR cleanup done */ 1468 + writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1469 1470 if (!err_mask) { 1471 err_mask = AC_ERR_OTHER; ··· 1538 * support for queueing. this works transparently for 1539 * queued and non-queued modes. 1540 */ 1541 + else 1542 + tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f; 1543 1544 qc = ata_qc_from_tag(ap, tag); 1545 1546 + /* For non-NCQ mode, the lower 8 bits of status 1547 + * are from EDMA_ERR_IRQ_CAUSE_OFS, 1548 + * which should be zero if all went well. 1549 */ 1550 status = le16_to_cpu(pp->crpb[out_index].flags); 1551 + if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { 1552 mv_err_intr(ap, qc); 1553 return; 1554 } ··· 1715 struct ata_host *host = dev_instance; 1716 unsigned int hc, handled = 0, n_hcs; 1717 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; 1718 + u32 irq_stat, irq_mask; 1719 1720 + spin_lock(&host->lock); 1721 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); 1722 + irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS); 1723 1724 /* check the cases where we either have nothing pending or have read 1725 * a bogus register value which can indicate HW removal or PCI fault 1726 */ 1727 + if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat)) 1728 + goto out_unlock; 1729 1730 n_hcs = mv_get_hc_count(host->ports[0]->flags); 1731 1732 + if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) { 1733 mv_pci_error(host, mmio); 1734 handled = 1; 1735 goto out_unlock; /* skip all other HC irq handling */ ··· 1799 return -EINVAL; 1800 } 1801 1802 + static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) 1803 { 1804 + struct pci_dev *pdev = to_pci_dev(host->dev); 1805 int early_5080; 1806 1807 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); ··· 1811 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 1812 } 1813 1814 + mv_reset_pci_bus(host, mmio); 1815 } 1816 1817 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) ··· 1935 1936 #undef ZERO 1937 #define ZERO(reg) writel(0, mmio + (reg)) 1938 + static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) 1939 { 1940 struct mv_host_priv *hpriv = host->private_data; 1941 u32 tmp; 1942 ··· 2329 mv_hardreset, mv_postreset); 2330 } 2331 2332 static void mv_eh_freeze(struct ata_port *ap) 2333 { 2334 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; ··· 2427 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); 2428 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2429 2430 + /* unmask all non-transient EDMA error interrupts */ 2431 + writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS); 2432 2433 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 2434 readl(port_mmio + EDMA_CFG_OFS), ··· 2586 static int mv_init_host(struct ata_host *host, unsigned int board_idx) 2587 { 2588 int rc = 0, n_hc, port, hc; 2589 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; 2590 struct mv_host_priv *hpriv = host->private_data; 2591 ··· 2607 goto done; 2608 2609 hpriv->ops->reset_flash(hpriv, mmio); 2610 + hpriv->ops->reset_bus(host, mmio); 2611 hpriv->ops->enable_leds(hpriv, mmio); 2612 2613 for (port = 0; port < host->n_ports; port++) { ··· 2630 2631 mv_port_init(&ap->ioaddr, port_mmio); 2632 2633 + #ifdef CONFIG_PCI 2634 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); 2635 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); 2636 + #endif 2637 } 2638 2639 for (hc = 0; hc < n_hc; hc++) { ··· 2665 readl(mmio + hpriv->irq_mask_ofs)); 2666 2667 done: 2668 + return rc; 2669 + } 2670 + 2671 + #ifdef CONFIG_PCI 2672 + static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 2673 + 2674 + static struct pci_driver mv_pci_driver = { 2675 + .name = DRV_NAME, 2676 + .id_table = mv_pci_tbl, 2677 + .probe = mv_init_one, 2678 + .remove = ata_pci_remove_one, 2679 + }; 2680 + 2681 + /* 2682 + * module options 2683 + */ 2684 + static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ 2685 + 2686 + 2687 + /* move to PCI layer or libata core? */ 2688 + static int pci_go_64(struct pci_dev *pdev) 2689 + { 2690 + int rc; 2691 + 2692 + if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 2693 + rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 2694 + if (rc) { 2695 + rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 2696 + if (rc) { 2697 + dev_printk(KERN_ERR, &pdev->dev, 2698 + "64-bit DMA enable failed\n"); 2699 + return rc; 2700 + } 2701 + } 2702 + } else { 2703 + rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 2704 + if (rc) { 2705 + dev_printk(KERN_ERR, &pdev->dev, 2706 + "32-bit DMA enable failed\n"); 2707 + return rc; 2708 + } 2709 + rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 2710 + if (rc) { 2711 + dev_printk(KERN_ERR, &pdev->dev, 2712 + "32-bit consistent DMA enable failed\n"); 2713 + return rc; 2714 + } 2715 + } 2716 + 2717 return rc; 2718 } 2719 ··· 2708 "Gen-%s %u slots %u ports %s mode IRQ via %s\n", 2709 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, 2710 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 2711 + } 2712 + 2713 + static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) 2714 + { 2715 + hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, 2716 + MV_CRQB_Q_SZ, 0); 2717 + if (!hpriv->crqb_pool) 2718 + return -ENOMEM; 2719 + 2720 + hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, 2721 + MV_CRPB_Q_SZ, 0); 2722 + if (!hpriv->crpb_pool) 2723 + return -ENOMEM; 2724 + 2725 + hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, 2726 + MV_SG_TBL_SZ, 0); 2727 + if (!hpriv->sg_tbl_pool) 2728 + return -ENOMEM; 2729 + 2730 + return 0; 2731 } 2732 2733 /** ··· 2755 if (rc) 2756 return rc; 2757 2758 + rc = mv_create_dma_pools(hpriv, &pdev->dev); 2759 + if (rc) 2760 + return rc; 2761 + 2762 /* initialize adapter */ 2763 rc = mv_init_host(host, board_idx); 2764 if (rc) ··· 2772 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 2773 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); 2774 } 2775 + #endif 2776 2777 static int __init mv_init(void) 2778 { 2779 + int rc = -ENODEV; 2780 + #ifdef CONFIG_PCI 2781 + rc = pci_register_driver(&mv_pci_driver); 2782 + #endif 2783 + return rc; 2784 } 2785 2786 static void __exit mv_exit(void) 2787 { 2788 + #ifdef CONFIG_PCI 2789 pci_unregister_driver(&mv_pci_driver); 2790 + #endif 2791 } 2792 2793 MODULE_AUTHOR("Brett Russ"); ··· 2789 MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 2790 MODULE_VERSION(DRV_VERSION); 2791 2792 + #ifdef CONFIG_PCI 2793 module_param(msi, int, 0444); 2794 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); 2795 + #endif 2796 2797 module_init(mv_init); 2798 module_exit(mv_exit);
+12 -6
drivers/ata/sata_nv.c
··· 1011 } 1012 1013 if (status & (NV_ADMA_STAT_DONE | 1014 - NV_ADMA_STAT_CPBERR)) { 1015 - u32 check_commands; 1016 int pos, error = 0; 1017 1018 - if (ata_tag_valid(ap->link.active_tag)) 1019 - check_commands = 1 << ap->link.active_tag; 1020 - else 1021 - check_commands = ap->link.sactive; 1022 1023 /** Check CPBs for completed commands */ 1024 while ((pos = ffs(check_commands)) && !error) {
··· 1011 } 1012 1013 if (status & (NV_ADMA_STAT_DONE | 1014 + NV_ADMA_STAT_CPBERR | 1015 + NV_ADMA_STAT_CMD_COMPLETE)) { 1016 + u32 check_commands = notifier_clears[i]; 1017 int pos, error = 0; 1018 1019 + if (status & NV_ADMA_STAT_CPBERR) { 1020 + /* Check all active commands */ 1021 + if (ata_tag_valid(ap->link.active_tag)) 1022 + check_commands = 1 << 1023 + ap->link.active_tag; 1024 + else 1025 + check_commands = ap-> 1026 + link.sactive; 1027 + } 1028 1029 /** Check CPBs for completed commands */ 1030 while ((pos = ffs(check_commands)) && !error) {
+2 -1
drivers/pci/pci.c
··· 823 dr = get_pci_dr(pdev); 824 if (unlikely(!dr)) 825 return -ENOMEM; 826 - WARN_ON(!!dr->enabled); 827 828 rc = pci_enable_device(pdev); 829 if (!rc) {
··· 823 dr = get_pci_dr(pdev); 824 if (unlikely(!dr)) 825 return -ENOMEM; 826 + if (dr->enabled) 827 + return 0; 828 829 rc = pci_enable_device(pdev); 830 if (!rc) {