Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev

* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev: (24 commits)
pci: allow multiple calls to pcim_enable_device()
Blackfin pata-bf54x driver: fix compiling bug - no ata_port struct in struct ata_device any more
Blackfin pata-bf54x driver: should cover all possible interrupt sources
Blackfin pata-bf54x driver: Add debug information
Blackfin pata-bf54x driver: Remove obsolete PM function
pata_sl82c105: dual channel support
ata_piix.c: make piix_merge_scr() static
sata_nv: fix for completion handling
sata_mv: Remove PCI dependency
sata_mv ncq Comments and version bump
sata_mv ncq Remove post internal cmd op
sata_mv ncq Enable NCQ operation
sata_mv ncq Introduce per-tag SG tables
ata_piix: IDE mode SATA patch for Intel ICH10 DeviceID's
ahci: RAID mode SATA patch for Intel ICH10 DeviceID's
sata_mv ncq Use DMA memory pools for hardware memory tables
sata_mv ncq Restrict max sectors to 8-bits on GenII NCQ
sata_mv ncq Ignore response status LSB on NCQ
sata_mv ncq Use hqtag instead of ioid
sata_mv ncq Add want ncq parameter for EDMA configuration
...

+389 -218
+1 -1
drivers/ata/Kconfig
··· 69 69 70 70 config SATA_MV 71 71 tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)" 72 - depends on PCI && EXPERIMENTAL 72 + depends on EXPERIMENTAL 73 73 help 74 74 This option enables support for the Marvell Serial ATA family. 75 75 Currently supports 88SX[56]0[48][01] chips.
+2
drivers/ata/ahci.c
··· 475 475 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ 476 476 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ 477 477 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ 478 + { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ 479 + { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ 478 480 479 481 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 480 482 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+9 -1
drivers/ata/ata_piix.c
··· 267 267 { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, 268 268 /* SATA Controller IDE (Tolapai) */ 269 269 { 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata_ahci }, 270 + /* SATA Controller IDE (ICH10) */ 271 + { 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, 272 + /* SATA Controller IDE (ICH10) */ 273 + { 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 274 + /* SATA Controller IDE (ICH10) */ 275 + { 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, 276 + /* SATA Controller IDE (ICH10) */ 277 + { 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 270 278 271 279 { } /* terminate list */ 272 280 }; ··· 1076 1068 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA); 1077 1069 } 1078 1070 1079 - u32 piix_merge_scr(u32 val0, u32 val1, const int * const *merge_tbl) 1071 + static u32 piix_merge_scr(u32 val0, u32 val1, const int * const *merge_tbl) 1080 1072 { 1081 1073 u32 val = 0; 1082 1074 int i, mi;
+24 -29
drivers/ata/pata_bf54x.c
··· 299 299 */ 300 300 n6 = num_clocks_min(t6min, fsclk); 301 301 if (mode >= 0 && mode <= 4 && n6 >= 1) { 302 - pr_debug("set piomode: mode=%d, fsclk=%ud\n", mode, fsclk); 302 + dev_dbg(adev->link->ap->dev, "set piomode: mode=%d, fsclk=%ud\n", mode, fsclk); 303 303 /* calculate the timing values for register transfers. */ 304 304 while (mode > 0 && pio_fsclk[mode] > fsclk) 305 305 mode--; ··· 376 376 377 377 mode = adev->dma_mode - XFER_UDMA_0; 378 378 if (mode >= 0 && mode <= 5) { 379 - pr_debug("set udmamode: mode=%d\n", mode); 379 + dev_dbg(adev->link->ap->dev, "set udmamode: mode=%d\n", mode); 380 380 /* the most restrictive timing value is t6 and tc, 381 381 * the DIOW - data hold. If one SCLK pulse is longer 382 382 * than this minimum value then register ··· 433 433 434 434 mode = adev->dma_mode - XFER_MW_DMA_0; 435 435 if (mode >= 0 && mode <= 2) { 436 - pr_debug("set mdmamode: mode=%d\n", mode); 436 + dev_dbg(adev->link->ap->dev, "set mdmamode: mode=%d\n", mode); 437 437 /* the most restrictive timing value is tf, the DMACK to 438 438 * read data released. If one SCLK pulse is longer than 439 439 * this maximum value then the MDMA mode ··· 697 697 write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal); 698 698 write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam); 699 699 write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah); 700 - pr_debug("hob: feat 0x%X nsect 0x%X, lba 0x%X " 700 + dev_dbg(ap->dev, "hob: feat 0x%X nsect 0x%X, lba 0x%X " 701 701 "0x%X 0x%X\n", 702 702 tf->hob_feature, 703 703 tf->hob_nsect, ··· 711 711 write_atapi_register(base, ATA_REG_LBAL, tf->lbal); 712 712 write_atapi_register(base, ATA_REG_LBAM, tf->lbam); 713 713 write_atapi_register(base, ATA_REG_LBAH, tf->lbah); 714 - pr_debug("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 714 + dev_dbg(ap->dev, "feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 715 715 tf->feature, 716 716 tf->nsect, 717 717 tf->lbal, ··· 721 721 722 722 if (tf->flags & ATA_TFLAG_DEVICE) { 723 723 write_atapi_register(base, ATA_REG_DEVICE, tf->device); 724 - pr_debug("device 0x%X\n", tf->device); 724 + dev_dbg(ap->dev, "device 0x%X\n", tf->device); 725 725 } 726 726 727 727 ata_wait_idle(ap); ··· 782 782 const struct ata_taskfile *tf) 783 783 { 784 784 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 785 - pr_debug("ata%u: cmd 0x%X\n", ap->print_id, tf->command); 785 + dev_dbg(ap->dev, "ata%u: cmd 0x%X\n", ap->print_id, tf->command); 786 786 787 787 write_atapi_register(base, ATA_REG_CMD, tf->command); 788 788 ata_pause(ap); ··· 834 834 struct scatterlist *sg; 835 835 unsigned int si; 836 836 837 - pr_debug("in atapi dma setup\n"); 837 + dev_dbg(qc->ap->dev, "in atapi dma setup\n"); 838 838 /* Program the ATA_CTRL register with dir */ 839 839 if (qc->tf.flags & ATA_TFLAG_WRITE) { 840 840 /* fill the ATAPI DMA controller */ ··· 870 870 struct scatterlist *sg; 871 871 unsigned int si; 872 872 873 - pr_debug("in atapi dma start\n"); 873 + dev_dbg(qc->ap->dev, "in atapi dma start\n"); 874 874 if (!(ap->udma_mask || ap->mwdma_mask)) 875 875 return; 876 876 ··· 888 888 sg_dma_address(sg) + sg_dma_len(sg)); 889 889 } 890 890 enable_dma(CH_ATAPI_TX); 891 - pr_debug("enable udma write\n"); 891 + dev_dbg(qc->ap->dev, "enable udma write\n"); 892 892 893 893 /* Send ATA DMA write command */ 894 894 bfin_exec_command(ap, &qc->tf); ··· 898 898 | XFER_DIR)); 899 899 } else { 900 900 enable_dma(CH_ATAPI_RX); 901 - pr_debug("enable udma read\n"); 901 + dev_dbg(qc->ap->dev, "enable udma read\n"); 902 902 903 903 /* Send ATA DMA read command */ 904 904 bfin_exec_command(ap, &qc->tf); ··· 936 936 struct scatterlist *sg; 937 937 unsigned int si; 938 938 939 - pr_debug("in atapi dma stop\n"); 939 + dev_dbg(qc->ap->dev, "in atapi dma stop\n"); 940 940 if (!(ap->udma_mask || ap->mwdma_mask)) 941 941 return; 942 942 ··· 1147 1147 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1148 1148 unsigned short int_status = ATAPI_GET_INT_STATUS(base); 1149 1149 1150 - if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON)) { 1150 + if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON)) 1151 1151 host_stat |= ATA_DMA_ACTIVE; 1152 - } 1153 - if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT)) { 1152 + if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT| 1153 + ATAPI_DEV_INT)) 1154 1154 host_stat |= ATA_DMA_INTR; 1155 - } 1156 - if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT)) { 1157 - host_stat |= ATA_DMA_ERR; 1158 - } 1155 + if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT)) 1156 + host_stat |= ATA_DMA_ERR|ATA_DMA_INTR; 1157 + 1158 + dev_dbg(ap->dev, "ATAPI: host_stat=0x%x\n", host_stat); 1159 1159 1160 1160 return host_stat; 1161 1161 } ··· 1213 1213 { 1214 1214 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1215 1215 1216 - pr_debug("in atapi irq clear\n"); 1217 - 1216 + dev_dbg(ap->dev, "in atapi irq clear\n"); 1218 1217 ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT 1219 1218 | MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT 1220 1219 | MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT); ··· 1231 1232 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1232 1233 u8 tmp; 1233 1234 1234 - pr_debug("in atapi irq on\n"); 1235 + dev_dbg(ap->dev, "in atapi irq on\n"); 1235 1236 ap->ctl &= ~ATA_NIEN; 1236 1237 ap->last_ctl = ap->ctl; 1237 1238 ··· 1254 1255 { 1255 1256 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1256 1257 1257 - pr_debug("in atapi dma freeze\n"); 1258 + dev_dbg(ap->dev, "in atapi dma freeze\n"); 1258 1259 ap->ctl |= ATA_NIEN; 1259 1260 ap->last_ctl = ap->ctl; 1260 1261 ··· 1327 1328 1328 1329 static void bfin_port_stop(struct ata_port *ap) 1329 1330 { 1330 - pr_debug("in atapi port stop\n"); 1331 + dev_dbg(ap->dev, "in atapi port stop\n"); 1331 1332 if (ap->udma_mask != 0 || ap->mwdma_mask != 0) { 1332 1333 free_dma(CH_ATAPI_RX); 1333 1334 free_dma(CH_ATAPI_TX); ··· 1336 1337 1337 1338 static int bfin_port_start(struct ata_port *ap) 1338 1339 { 1339 - pr_debug("in atapi port start\n"); 1340 + dev_dbg(ap->dev, "in atapi port start\n"); 1340 1341 if (!(ap->udma_mask || ap->mwdma_mask)) 1341 1342 return 0; 1342 1343 ··· 1372 1373 .slave_configure = ata_scsi_slave_config, 1373 1374 .slave_destroy = ata_scsi_slave_destroy, 1374 1375 .bios_param = ata_std_bios_param, 1375 - #ifdef CONFIG_PM 1376 - .resume = ata_scsi_device_resume, 1377 - .suspend = ata_scsi_device_suspend, 1378 - #endif 1379 1376 }; 1380 1377 1381 1378 static const struct ata_port_operations bfin_pata_ops = {
+31 -2
drivers/ata/pata_sl82c105.c
··· 26 26 #include <linux/libata.h> 27 27 28 28 #define DRV_NAME "pata_sl82c105" 29 - #define DRV_VERSION "0.3.2" 29 + #define DRV_VERSION "0.3.3" 30 30 31 31 enum { 32 32 /* ··· 206 206 sl82c105_set_piomode(ap, qc->dev); 207 207 } 208 208 209 + /** 210 + * sl82c105_qc_defer - implement serialization 211 + * @qc: command 212 + * 213 + * We must issue one command per host not per channel because 214 + * of the reset bug. 215 + * 216 + * Q: is the scsi host lock sufficient ? 217 + */ 218 + 219 + static int sl82c105_qc_defer(struct ata_queued_cmd *qc) 220 + { 221 + struct ata_host *host = qc->ap->host; 222 + struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; 223 + int rc; 224 + 225 + /* First apply the usual rules */ 226 + rc = ata_std_qc_defer(qc); 227 + if (rc != 0) 228 + return rc; 229 + 230 + /* Now apply serialization rules. Only allow a command if the 231 + other channel state machine is idle */ 232 + if (alt && alt->qc_active) 233 + return ATA_DEFER_PORT; 234 + return 0; 235 + } 236 + 209 237 static struct scsi_host_template sl82c105_sht = { 210 238 .module = THIS_MODULE, 211 239 .name = DRV_NAME, ··· 273 245 .bmdma_stop = sl82c105_bmdma_stop, 274 246 .bmdma_status = ata_bmdma_status, 275 247 248 + .qc_defer = sl82c105_qc_defer, 276 249 .qc_prep = ata_qc_prep, 277 250 .qc_issue = ata_qc_issue_prot, 278 251 ··· 341 312 }; 342 313 /* for now use only the first port */ 343 314 const struct ata_port_info *ppi[] = { &info_early, 344 - &ata_dummy_port_info }; 315 + NULL }; 345 316 u32 val; 346 317 int rev; 347 318
+308 -178
drivers/ata/sata_mv.c
··· 29 29 I distinctly remember a couple workarounds (one related to PCI-X) 30 30 are still needed. 31 31 32 - 4) Add NCQ support (easy to intermediate, once new-EH support appears) 32 + 2) Improve/fix IRQ and error handling sequences. 33 + 34 + 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it). 35 + 36 + 4) Think about TCQ support here, and for libata in general 37 + with controllers that suppport it via host-queuing hardware 38 + (a software-only implementation could be a nightmare). 33 39 34 40 5) Investigate problems with PCI Message Signalled Interrupts (MSI). 35 41 ··· 59 53 Target mode, for those without docs, is the ability to directly 60 54 connect two SATA controllers. 61 55 62 - 13) Verify that 7042 is fully supported. I only have a 6042. 63 - 64 56 */ 65 57 66 58 ··· 77 73 #include <linux/libata.h> 78 74 79 75 #define DRV_NAME "sata_mv" 80 - #define DRV_VERSION "1.01" 76 + #define DRV_VERSION "1.20" 81 77 82 78 enum { 83 79 /* BAR's are enumerated in terms of pci_resource_start() terms */ ··· 111 107 112 108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB 113 109 * CRPB needs alignment on a 256B boundary. Size == 256B 114 - * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB 115 110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B 116 111 */ 117 112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), 118 113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), 119 - MV_MAX_SG_CT = 176, 114 + MV_MAX_SG_CT = 256, 120 115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), 121 - MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ), 122 116 123 117 MV_PORTS_PER_HC = 4, 124 118 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ ··· 127 125 /* Host Flags */ 128 126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 129 127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 128 + /* SoC integrated controllers, no PCI interface */ 129 + MV_FLAG_SOC = (1 << 28), 130 + 130 131 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 131 132 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 132 133 ATA_FLAG_PIO_POLLING, ··· 175 170 176 171 PCIE_IRQ_CAUSE_OFS = 0x1900, 177 172 PCIE_IRQ_MASK_OFS = 0x1910, 178 - PCIE_UNMASK_ALL_IRQS = 0x70a, /* assorted bits */ 173 + PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ 179 174 180 175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, 181 176 HC_MAIN_IRQ_MASK_OFS = 0x1d64, ··· 215 210 /* SATA registers */ 216 211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 217 212 SATA_ACTIVE_OFS = 0x350, 213 + SATA_FIS_IRQ_CAUSE_OFS = 0x364, 218 214 PHY_MODE3 = 0x310, 219 215 PHY_MODE4 = 0x314, 220 216 PHY_MODE2 = 0x330, ··· 228 222 229 223 /* Port registers */ 230 224 EDMA_CFG_OFS = 0, 231 - EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */ 232 - EDMA_CFG_NCQ = (1 << 5), 233 - EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 234 - EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 235 - EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 225 + EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ 226 + EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ 227 + EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 228 + EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 229 + EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 236 230 237 231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 238 232 EDMA_ERR_IRQ_MASK_OFS = 0xc, ··· 250 244 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ 251 245 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ 252 246 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ 247 + 253 248 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ 254 - EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), 249 + EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ 250 + EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ 251 + EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ 252 + EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ 253 + 255 254 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ 255 + 256 256 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ 257 + EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ 258 + EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ 259 + EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ 260 + EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ 261 + EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ 262 + 257 263 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ 264 + 258 265 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ 259 266 EDMA_ERR_OVERRUN_5 = (1 << 5), 260 267 EDMA_ERR_UNDERRUN_5 = (1 << 6), 268 + 269 + EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | 270 + EDMA_ERR_LNK_CTRL_RX_1 | 271 + EDMA_ERR_LNK_CTRL_RX_3 | 272 + EDMA_ERR_LNK_CTRL_TX, 273 + 261 274 EDMA_EH_FREEZE = EDMA_ERR_D_PAR | 262 275 EDMA_ERR_PRD_PAR | 263 276 EDMA_ERR_DEV_DCON | ··· 336 311 337 312 /* Port private flags (pp_flags) */ 338 313 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 314 + MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ 339 315 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */ 340 316 }; 341 317 342 318 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 343 319 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 344 320 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 321 + #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) 345 322 346 323 enum { 347 324 /* DMA boundary 0xffff is required by the s/g splitting ··· 406 379 dma_addr_t crqb_dma; 407 380 struct mv_crpb *crpb; 408 381 dma_addr_t crpb_dma; 409 - struct mv_sg *sg_tbl; 410 - dma_addr_t sg_tbl_dma; 382 + struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; 383 + dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; 411 384 412 385 unsigned int req_idx; 413 386 unsigned int resp_idx; ··· 427 400 u32 irq_cause_ofs; 428 401 u32 irq_mask_ofs; 429 402 u32 unmask_all_irqs; 403 + /* 404 + * These consistent DMA memory pools give us guaranteed 405 + * alignment for hardware-accessed data structures, 406 + * and less memory waste in accomplishing the alignment. 407 + */ 408 + struct dma_pool *crqb_pool; 409 + struct dma_pool *crpb_pool; 410 + struct dma_pool *sg_tbl_pool; 430 411 }; 431 412 432 413 struct mv_hw_ops { ··· 446 411 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, 447 412 unsigned int n_hc); 448 413 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); 449 - void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio); 414 + void (*reset_bus)(struct ata_host *host, void __iomem *mmio); 450 415 }; 451 416 452 417 static void mv_irq_clear(struct ata_port *ap); ··· 460 425 static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 461 426 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 462 427 static void mv_error_handler(struct ata_port *ap); 463 - static void mv_post_int_cmd(struct ata_queued_cmd *qc); 464 428 static void mv_eh_freeze(struct ata_port *ap); 465 429 static void mv_eh_thaw(struct ata_port *ap); 466 - static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 430 + static void mv6_dev_config(struct ata_device *dev); 467 431 468 432 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 469 433 unsigned int port); ··· 472 438 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 473 439 unsigned int n_hc); 474 440 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 475 - static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio); 441 + static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); 476 442 477 443 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 478 444 unsigned int port); ··· 482 448 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 483 449 unsigned int n_hc); 484 450 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 485 - static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio); 451 + static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); 486 452 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, 487 453 unsigned int port_no); 454 + static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, 455 + void __iomem *port_mmio, int want_ncq); 456 + static int __mv_stop_dma(struct ata_port *ap); 488 457 458 + /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 459 + * because we have to allow room for worst case splitting of 460 + * PRDs for 64K boundaries in mv_fill_sg(). 461 + */ 489 462 static struct scsi_host_template mv5_sht = { 490 463 .module = THIS_MODULE, 491 464 .name = DRV_NAME, ··· 516 475 .name = DRV_NAME, 517 476 .ioctl = ata_scsi_ioctl, 518 477 .queuecommand = ata_scsi_queuecmd, 519 - .can_queue = ATA_DEF_QUEUE, 478 + .change_queue_depth = ata_scsi_change_queue_depth, 479 + .can_queue = MV_MAX_Q_DEPTH - 1, 520 480 .this_id = ATA_SHT_THIS_ID, 521 481 .sg_tablesize = MV_MAX_SG_CT / 2, 522 482 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, ··· 547 505 .irq_on = ata_irq_on, 548 506 549 507 .error_handler = mv_error_handler, 550 - .post_internal_cmd = mv_post_int_cmd, 551 508 .freeze = mv_eh_freeze, 552 509 .thaw = mv_eh_thaw, 553 510 ··· 558 517 }; 559 518 560 519 static const struct ata_port_operations mv6_ops = { 520 + .dev_config = mv6_dev_config, 561 521 .tf_load = ata_tf_load, 562 522 .tf_read = ata_tf_read, 563 523 .check_status = ata_check_status, ··· 575 533 .irq_on = ata_irq_on, 576 534 577 535 .error_handler = mv_error_handler, 578 - .post_internal_cmd = mv_post_int_cmd, 579 536 .freeze = mv_eh_freeze, 580 537 .thaw = mv_eh_thaw, 538 + .qc_defer = ata_std_qc_defer, 581 539 582 540 .scr_read = mv_scr_read, 583 541 .scr_write = mv_scr_write, ··· 603 561 .irq_on = ata_irq_on, 604 562 605 563 .error_handler = mv_error_handler, 606 - .post_internal_cmd = mv_post_int_cmd, 607 564 .freeze = mv_eh_freeze, 608 565 .thaw = mv_eh_thaw, 566 + .qc_defer = ata_std_qc_defer, 609 567 610 568 .scr_read = mv_scr_read, 611 569 .scr_write = mv_scr_write, ··· 634 592 .port_ops = &mv5_ops, 635 593 }, 636 594 { /* chip_604x */ 637 - .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, 595 + .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 596 + ATA_FLAG_NCQ, 638 597 .pio_mask = 0x1f, /* pio0-4 */ 639 598 .udma_mask = ATA_UDMA6, 640 599 .port_ops = &mv6_ops, 641 600 }, 642 601 { /* chip_608x */ 643 602 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 644 - MV_FLAG_DUAL_HC, 603 + ATA_FLAG_NCQ | MV_FLAG_DUAL_HC, 645 604 .pio_mask = 0x1f, /* pio0-4 */ 646 605 .udma_mask = ATA_UDMA6, 647 606 .port_ops = &mv6_ops, 648 607 }, 649 608 { /* chip_6042 */ 650 - .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, 609 + .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 610 + ATA_FLAG_NCQ, 651 611 .pio_mask = 0x1f, /* pio0-4 */ 652 612 .udma_mask = ATA_UDMA6, 653 613 .port_ops = &mv_iie_ops, 654 614 }, 655 615 { /* chip_7042 */ 656 - .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, 616 + .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 617 + ATA_FLAG_NCQ, 657 618 .pio_mask = 0x1f, /* pio0-4 */ 658 619 .udma_mask = ATA_UDMA6, 659 620 .port_ops = &mv_iie_ops, ··· 693 648 { } /* terminate list */ 694 649 }; 695 650 696 - static struct pci_driver mv_pci_driver = { 697 - .name = DRV_NAME, 698 - .id_table = mv_pci_tbl, 699 - .probe = mv_init_one, 700 - .remove = ata_pci_remove_one, 701 - }; 702 - 703 651 static const struct mv_hw_ops mv5xxx_ops = { 704 652 .phy_errata = mv5_phy_errata, 705 653 .enable_leds = mv5_enable_leds, ··· 710 672 .reset_flash = mv6_reset_flash, 711 673 .reset_bus = mv_reset_pci_bus, 712 674 }; 713 - 714 - /* 715 - * module options 716 - */ 717 - static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ 718 - 719 - 720 - /* move to PCI layer or libata core? */ 721 - static int pci_go_64(struct pci_dev *pdev) 722 - { 723 - int rc; 724 - 725 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 726 - rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 727 - if (rc) { 728 - rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 729 - if (rc) { 730 - dev_printk(KERN_ERR, &pdev->dev, 731 - "64-bit DMA enable failed\n"); 732 - return rc; 733 - } 734 - } 735 - } else { 736 - rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 737 - if (rc) { 738 - dev_printk(KERN_ERR, &pdev->dev, 739 - "32-bit DMA enable failed\n"); 740 - return rc; 741 - } 742 - rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 743 - if (rc) { 744 - dev_printk(KERN_ERR, &pdev->dev, 745 - "32-bit consistent DMA enable failed\n"); 746 - return rc; 747 - } 748 - } 749 - 750 - return rc; 751 - } 752 675 753 676 /* 754 677 * Functions ··· 814 815 * LOCKING: 815 816 * Inherited from caller. 816 817 */ 817 - static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv, 818 - struct mv_port_priv *pp) 818 + static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, 819 + struct mv_port_priv *pp, u8 protocol) 819 820 { 821 + int want_ncq = (protocol == ATA_PROT_NCQ); 822 + 823 + if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 824 + int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); 825 + if (want_ncq != using_ncq) 826 + __mv_stop_dma(ap); 827 + } 820 828 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 829 + struct mv_host_priv *hpriv = ap->host->private_data; 830 + int hard_port = mv_hardport_from_port(ap->port_no); 831 + void __iomem *hc_mmio = mv_hc_base_from_port( 832 + ap->host->iomap[MV_PRIMARY_BAR], hard_port); 833 + u32 hc_irq_cause, ipending; 834 + 821 835 /* clear EDMA event indicators, if any */ 822 - writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS); 836 + writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 823 837 824 - mv_set_edma_ptrs(base, hpriv, pp); 838 + /* clear EDMA interrupt indicator, if any */ 839 + hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 840 + ipending = (DEV_IRQ << hard_port) | 841 + (CRPB_DMA_DONE << hard_port); 842 + if (hc_irq_cause & ipending) { 843 + writelfl(hc_irq_cause & ~ipending, 844 + hc_mmio + HC_IRQ_CAUSE_OFS); 845 + } 825 846 826 - writelfl(EDMA_EN, base + EDMA_CMD_OFS); 847 + mv_edma_cfg(pp, hpriv, port_mmio, want_ncq); 848 + 849 + /* clear FIS IRQ Cause */ 850 + writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 851 + 852 + mv_set_edma_ptrs(port_mmio, hpriv, pp); 853 + 854 + writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); 827 855 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 828 856 } 829 - WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS))); 857 + WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); 830 858 } 831 859 832 860 /** ··· 1029 1003 return -EINVAL; 1030 1004 } 1031 1005 1032 - static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, 1033 - void __iomem *port_mmio) 1006 + static void mv6_dev_config(struct ata_device *adev) 1034 1007 { 1035 - u32 cfg = readl(port_mmio + EDMA_CFG_OFS); 1008 + /* 1009 + * We don't have hob_nsect when doing NCQ commands on Gen-II. 1010 + * See mv_qc_prep() for more info. 1011 + */ 1012 + if (adev->flags & ATA_DFLAG_NCQ) 1013 + if (adev->max_sectors > ATA_MAX_SECTORS) 1014 + adev->max_sectors = ATA_MAX_SECTORS; 1015 + } 1016 + 1017 + static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, 1018 + void __iomem *port_mmio, int want_ncq) 1019 + { 1020 + u32 cfg; 1036 1021 1037 1022 /* set up non-NCQ EDMA configuration */ 1038 - cfg &= ~(1 << 9); /* disable eQue */ 1023 + cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ 1039 1024 1040 - if (IS_GEN_I(hpriv)) { 1041 - cfg &= ~0x1f; /* clear queue depth */ 1025 + if (IS_GEN_I(hpriv)) 1042 1026 cfg |= (1 << 8); /* enab config burst size mask */ 1043 - } 1044 1027 1045 - else if (IS_GEN_II(hpriv)) { 1046 - cfg &= ~0x1f; /* clear queue depth */ 1028 + else if (IS_GEN_II(hpriv)) 1047 1029 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 1048 - cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */ 1049 - } 1050 1030 1051 1031 else if (IS_GEN_IIE(hpriv)) { 1052 1032 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1053 1033 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1054 - cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */ 1055 1034 cfg |= (1 << 18); /* enab early completion */ 1056 1035 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ 1057 - cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */ 1058 - cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */ 1059 1036 } 1060 1037 1038 + if (want_ncq) { 1039 + cfg |= EDMA_CFG_NCQ; 1040 + pp->pp_flags |= MV_PP_FLAG_NCQ_EN; 1041 + } else 1042 + pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN; 1043 + 1061 1044 writelfl(cfg, port_mmio + EDMA_CFG_OFS); 1045 + } 1046 + 1047 + static void mv_port_free_dma_mem(struct ata_port *ap) 1048 + { 1049 + struct mv_host_priv *hpriv = ap->host->private_data; 1050 + struct mv_port_priv *pp = ap->private_data; 1051 + int tag; 1052 + 1053 + if (pp->crqb) { 1054 + dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); 1055 + pp->crqb = NULL; 1056 + } 1057 + if (pp->crpb) { 1058 + dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); 1059 + pp->crpb = NULL; 1060 + } 1061 + /* 1062 + * For GEN_I, there's no NCQ, so we have only a single sg_tbl. 1063 + * For later hardware, we have one unique sg_tbl per NCQ tag. 1064 + */ 1065 + for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1066 + if (pp->sg_tbl[tag]) { 1067 + if (tag == 0 || !IS_GEN_I(hpriv)) 1068 + dma_pool_free(hpriv->sg_tbl_pool, 1069 + pp->sg_tbl[tag], 1070 + pp->sg_tbl_dma[tag]); 1071 + pp->sg_tbl[tag] = NULL; 1072 + } 1073 + } 1062 1074 } 1063 1075 1064 1076 /** ··· 1115 1051 struct mv_host_priv *hpriv = ap->host->private_data; 1116 1052 struct mv_port_priv *pp; 1117 1053 void __iomem *port_mmio = mv_ap_base(ap); 1118 - void *mem; 1119 - dma_addr_t mem_dma; 1120 1054 unsigned long flags; 1121 - int rc; 1055 + int tag, rc; 1122 1056 1123 1057 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1124 1058 if (!pp) 1125 1059 return -ENOMEM; 1126 - 1127 - mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma, 1128 - GFP_KERNEL); 1129 - if (!mem) 1130 - return -ENOMEM; 1131 - memset(mem, 0, MV_PORT_PRIV_DMA_SZ); 1060 + ap->private_data = pp; 1132 1061 1133 1062 rc = ata_pad_alloc(ap, dev); 1134 1063 if (rc) 1135 1064 return rc; 1136 1065 1137 - /* First item in chunk of DMA memory: 1138 - * 32-slot command request table (CRQB), 32 bytes each in size 1139 - */ 1140 - pp->crqb = mem; 1141 - pp->crqb_dma = mem_dma; 1142 - mem += MV_CRQB_Q_SZ; 1143 - mem_dma += MV_CRQB_Q_SZ; 1066 + pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); 1067 + if (!pp->crqb) 1068 + return -ENOMEM; 1069 + memset(pp->crqb, 0, MV_CRQB_Q_SZ); 1144 1070 1145 - /* Second item: 1146 - * 32-slot command response table (CRPB), 8 bytes each in size 1147 - */ 1148 - pp->crpb = mem; 1149 - pp->crpb_dma = mem_dma; 1150 - mem += MV_CRPB_Q_SZ; 1151 - mem_dma += MV_CRPB_Q_SZ; 1071 + pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); 1072 + if (!pp->crpb) 1073 + goto out_port_free_dma_mem; 1074 + memset(pp->crpb, 0, MV_CRPB_Q_SZ); 1152 1075 1153 - /* Third item: 1154 - * Table of scatter-gather descriptors (ePRD), 16 bytes each 1076 + /* 1077 + * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. 1078 + * For later hardware, we need one unique sg_tbl per NCQ tag. 1155 1079 */ 1156 - pp->sg_tbl = mem; 1157 - pp->sg_tbl_dma = mem_dma; 1080 + for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1081 + if (tag == 0 || !IS_GEN_I(hpriv)) { 1082 + pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, 1083 + GFP_KERNEL, &pp->sg_tbl_dma[tag]); 1084 + if (!pp->sg_tbl[tag]) 1085 + goto out_port_free_dma_mem; 1086 + } else { 1087 + pp->sg_tbl[tag] = pp->sg_tbl[0]; 1088 + pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; 1089 + } 1090 + } 1158 1091 1159 1092 spin_lock_irqsave(&ap->host->lock, flags); 1160 1093 1161 - mv_edma_cfg(ap, hpriv, port_mmio); 1162 - 1094 + mv_edma_cfg(pp, hpriv, port_mmio, 0); 1163 1095 mv_set_edma_ptrs(port_mmio, hpriv, pp); 1164 1096 1165 1097 spin_unlock_irqrestore(&ap->host->lock, flags); ··· 1164 1104 * we'll be unable to send non-data, PIO, etc due to restricted access 1165 1105 * to shadow regs. 1166 1106 */ 1167 - ap->private_data = pp; 1168 1107 return 0; 1108 + 1109 + out_port_free_dma_mem: 1110 + mv_port_free_dma_mem(ap); 1111 + return -ENOMEM; 1169 1112 } 1170 1113 1171 1114 /** ··· 1183 1120 static void mv_port_stop(struct ata_port *ap) 1184 1121 { 1185 1122 mv_stop_dma(ap); 1123 + mv_port_free_dma_mem(ap); 1186 1124 } 1187 1125 1188 1126 /** ··· 1202 1138 struct mv_sg *mv_sg, *last_sg = NULL; 1203 1139 unsigned int si; 1204 1140 1205 - mv_sg = pp->sg_tbl; 1141 + mv_sg = pp->sg_tbl[qc->tag]; 1206 1142 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1207 1143 dma_addr_t addr = sg_dma_address(sg); 1208 1144 u32 sg_len = sg_dma_len(sg); ··· 1258 1194 u16 flags = 0; 1259 1195 unsigned in_index; 1260 1196 1261 - if (qc->tf.protocol != ATA_PROT_DMA) 1197 + if ((qc->tf.protocol != ATA_PROT_DMA) && 1198 + (qc->tf.protocol != ATA_PROT_NCQ)) 1262 1199 return; 1263 1200 1264 1201 /* Fill in command request block ··· 1268 1203 flags |= CRQB_FLAG_READ; 1269 1204 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1270 1205 flags |= qc->tag << CRQB_TAG_SHIFT; 1271 - flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/ 1272 1206 1273 1207 /* get current queue index from software */ 1274 1208 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1275 1209 1276 1210 pp->crqb[in_index].sg_addr = 1277 - cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1211 + cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 1278 1212 pp->crqb[in_index].sg_addr_hi = 1279 - cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1213 + cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 1280 1214 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); 1281 1215 1282 1216 cw = &pp->crqb[in_index].ata_cmd[0]; ··· 1295 1231 case ATA_CMD_WRITE_FUA_EXT: 1296 1232 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 1297 1233 break; 1298 - #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */ 1299 1234 case ATA_CMD_FPDMA_READ: 1300 1235 case ATA_CMD_FPDMA_WRITE: 1301 1236 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 1302 1237 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); 1303 1238 break; 1304 - #endif /* FIXME: remove this line when NCQ added */ 1305 1239 default: 1306 1240 /* The only other commands EDMA supports in non-queued and 1307 1241 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none ··· 1348 1286 unsigned in_index; 1349 1287 u32 flags = 0; 1350 1288 1351 - if (qc->tf.protocol != ATA_PROT_DMA) 1289 + if ((qc->tf.protocol != ATA_PROT_DMA) && 1290 + (qc->tf.protocol != ATA_PROT_NCQ)) 1352 1291 return; 1353 1292 1354 1293 /* Fill in Gen IIE command request block ··· 1359 1296 1360 1297 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1361 1298 flags |= qc->tag << CRQB_TAG_SHIFT; 1362 - flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really- 1363 - what we use as our tag */ 1299 + flags |= qc->tag << CRQB_HOSTQ_SHIFT; 1364 1300 1365 1301 /* get current queue index from software */ 1366 1302 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1367 1303 1368 1304 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; 1369 - crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1370 - crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1305 + crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 1306 + crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 1371 1307 crqb->flags = cpu_to_le32(flags); 1372 1308 1373 1309 tf = &qc->tf; ··· 1413 1351 struct ata_port *ap = qc->ap; 1414 1352 void __iomem *port_mmio = mv_ap_base(ap); 1415 1353 struct mv_port_priv *pp = ap->private_data; 1416 - struct mv_host_priv *hpriv = ap->host->private_data; 1417 1354 u32 in_index; 1418 1355 1419 - if (qc->tf.protocol != ATA_PROT_DMA) { 1356 + if ((qc->tf.protocol != ATA_PROT_DMA) && 1357 + (qc->tf.protocol != ATA_PROT_NCQ)) { 1420 1358 /* We're about to send a non-EDMA capable command to the 1421 1359 * port. Turn off EDMA so there won't be problems accessing 1422 1360 * shadow block, etc registers. ··· 1425 1363 return ata_qc_issue_prot(qc); 1426 1364 } 1427 1365 1428 - mv_start_dma(port_mmio, hpriv, pp); 1429 - 1430 - in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1431 - 1432 - /* until we do queuing, the queue should be empty at this point */ 1433 - WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) 1434 - >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1366 + mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); 1435 1367 1436 1368 pp->req_idx++; 1437 1369 ··· 1493 1437 ata_ehi_hotplugged(ehi); 1494 1438 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? 1495 1439 "dev disconnect" : "dev connect"); 1440 + action |= ATA_EH_HARDRESET; 1496 1441 } 1497 1442 1498 1443 if (IS_GEN_I(hpriv)) { ··· 1522 1465 } 1523 1466 1524 1467 /* Clear EDMA now that SERR cleanup done */ 1525 - writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1468 + writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1526 1469 1527 1470 if (!err_mask) { 1528 1471 err_mask = AC_ERR_OTHER; ··· 1595 1538 * support for queueing. this works transparently for 1596 1539 * queued and non-queued modes. 1597 1540 */ 1598 - else if (IS_GEN_II(hpriv)) 1599 - tag = (le16_to_cpu(pp->crpb[out_index].id) 1600 - >> CRPB_IOID_SHIFT_6) & 0x3f; 1601 - 1602 - else /* IS_GEN_IIE */ 1603 - tag = (le16_to_cpu(pp->crpb[out_index].id) 1604 - >> CRPB_IOID_SHIFT_7) & 0x3f; 1541 + else 1542 + tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f; 1605 1543 1606 1544 qc = ata_qc_from_tag(ap, tag); 1607 1545 1608 - /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS 1609 - * bits (WARNING: might not necessarily be associated 1610 - * with this command), which -should- be clear 1611 - * if all is well 1546 + /* For non-NCQ mode, the lower 8 bits of status 1547 + * are from EDMA_ERR_IRQ_CAUSE_OFS, 1548 + * which should be zero if all went well. 1612 1549 */ 1613 1550 status = le16_to_cpu(pp->crpb[out_index].flags); 1614 - if (unlikely(status & 0xff)) { 1551 + if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { 1615 1552 mv_err_intr(ap, qc); 1616 1553 return; 1617 1554 } ··· 1766 1715 struct ata_host *host = dev_instance; 1767 1716 unsigned int hc, handled = 0, n_hcs; 1768 1717 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; 1769 - u32 irq_stat; 1718 + u32 irq_stat, irq_mask; 1770 1719 1720 + spin_lock(&host->lock); 1771 1721 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); 1722 + irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS); 1772 1723 1773 1724 /* check the cases where we either have nothing pending or have read 1774 1725 * a bogus register value which can indicate HW removal or PCI fault 1775 1726 */ 1776 - if (!irq_stat || (0xffffffffU == irq_stat)) 1777 - return IRQ_NONE; 1727 + if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat)) 1728 + goto out_unlock; 1778 1729 1779 1730 n_hcs = mv_get_hc_count(host->ports[0]->flags); 1780 - spin_lock(&host->lock); 1781 1731 1782 - if (unlikely(irq_stat & PCI_ERR)) { 1732 + if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) { 1783 1733 mv_pci_error(host, mmio); 1784 1734 handled = 1; 1785 1735 goto out_unlock; /* skip all other HC irq handling */ ··· 1851 1799 return -EINVAL; 1852 1800 } 1853 1801 1854 - static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio) 1802 + static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) 1855 1803 { 1804 + struct pci_dev *pdev = to_pci_dev(host->dev); 1856 1805 int early_5080; 1857 1806 1858 1807 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); ··· 1864 1811 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 1865 1812 } 1866 1813 1867 - mv_reset_pci_bus(pdev, mmio); 1814 + mv_reset_pci_bus(host, mmio); 1868 1815 } 1869 1816 1870 1817 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) ··· 1988 1935 1989 1936 #undef ZERO 1990 1937 #define ZERO(reg) writel(0, mmio + (reg)) 1991 - static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio) 1938 + static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) 1992 1939 { 1993 - struct ata_host *host = dev_get_drvdata(&pdev->dev); 1994 1940 struct mv_host_priv *hpriv = host->private_data; 1995 1941 u32 tmp; 1996 1942 ··· 2381 2329 mv_hardreset, mv_postreset); 2382 2330 } 2383 2331 2384 - static void mv_post_int_cmd(struct ata_queued_cmd *qc) 2385 - { 2386 - mv_stop_dma(qc->ap); 2387 - } 2388 - 2389 2332 static void mv_eh_freeze(struct ata_port *ap) 2390 2333 { 2391 2334 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; ··· 2474 2427 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); 2475 2428 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2476 2429 2477 - /* unmask all EDMA error interrupts */ 2478 - writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS); 2430 + /* unmask all non-transient EDMA error interrupts */ 2431 + writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS); 2479 2432 2480 2433 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 2481 2434 readl(port_mmio + EDMA_CFG_OFS), ··· 2633 2586 static int mv_init_host(struct ata_host *host, unsigned int board_idx) 2634 2587 { 2635 2588 int rc = 0, n_hc, port, hc; 2636 - struct pci_dev *pdev = to_pci_dev(host->dev); 2637 2589 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; 2638 2590 struct mv_host_priv *hpriv = host->private_data; 2639 2591 ··· 2653 2607 goto done; 2654 2608 2655 2609 hpriv->ops->reset_flash(hpriv, mmio); 2656 - hpriv->ops->reset_bus(pdev, mmio); 2610 + hpriv->ops->reset_bus(host, mmio); 2657 2611 hpriv->ops->enable_leds(hpriv, mmio); 2658 2612 2659 2613 for (port = 0; port < host->n_ports; port++) { ··· 2676 2630 2677 2631 mv_port_init(&ap->ioaddr, port_mmio); 2678 2632 2633 + #ifdef CONFIG_PCI 2679 2634 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); 2680 2635 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); 2636 + #endif 2681 2637 } 2682 2638 2683 2639 for (hc = 0; hc < n_hc; hc++) { ··· 2713 2665 readl(mmio + hpriv->irq_mask_ofs)); 2714 2666 2715 2667 done: 2668 + return rc; 2669 + } 2670 + 2671 + #ifdef CONFIG_PCI 2672 + static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 2673 + 2674 + static struct pci_driver mv_pci_driver = { 2675 + .name = DRV_NAME, 2676 + .id_table = mv_pci_tbl, 2677 + .probe = mv_init_one, 2678 + .remove = ata_pci_remove_one, 2679 + }; 2680 + 2681 + /* 2682 + * module options 2683 + */ 2684 + static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ 2685 + 2686 + 2687 + /* move to PCI layer or libata core? */ 2688 + static int pci_go_64(struct pci_dev *pdev) 2689 + { 2690 + int rc; 2691 + 2692 + if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 2693 + rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 2694 + if (rc) { 2695 + rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 2696 + if (rc) { 2697 + dev_printk(KERN_ERR, &pdev->dev, 2698 + "64-bit DMA enable failed\n"); 2699 + return rc; 2700 + } 2701 + } 2702 + } else { 2703 + rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 2704 + if (rc) { 2705 + dev_printk(KERN_ERR, &pdev->dev, 2706 + "32-bit DMA enable failed\n"); 2707 + return rc; 2708 + } 2709 + rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 2710 + if (rc) { 2711 + dev_printk(KERN_ERR, &pdev->dev, 2712 + "32-bit consistent DMA enable failed\n"); 2713 + return rc; 2714 + } 2715 + } 2716 + 2716 2717 return rc; 2717 2718 } 2718 2719 ··· 2805 2708 "Gen-%s %u slots %u ports %s mode IRQ via %s\n", 2806 2709 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, 2807 2710 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 2711 + } 2712 + 2713 + static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) 2714 + { 2715 + hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, 2716 + MV_CRQB_Q_SZ, 0); 2717 + if (!hpriv->crqb_pool) 2718 + return -ENOMEM; 2719 + 2720 + hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, 2721 + MV_CRPB_Q_SZ, 0); 2722 + if (!hpriv->crpb_pool) 2723 + return -ENOMEM; 2724 + 2725 + hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, 2726 + MV_SG_TBL_SZ, 0); 2727 + if (!hpriv->sg_tbl_pool) 2728 + return -ENOMEM; 2729 + 2730 + return 0; 2808 2731 } 2809 2732 2810 2733 /** ··· 2872 2755 if (rc) 2873 2756 return rc; 2874 2757 2758 + rc = mv_create_dma_pools(hpriv, &pdev->dev); 2759 + if (rc) 2760 + return rc; 2761 + 2875 2762 /* initialize adapter */ 2876 2763 rc = mv_init_host(host, board_idx); 2877 2764 if (rc) ··· 2893 2772 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 2894 2773 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); 2895 2774 } 2775 + #endif 2896 2776 2897 2777 static int __init mv_init(void) 2898 2778 { 2899 - return pci_register_driver(&mv_pci_driver); 2779 + int rc = -ENODEV; 2780 + #ifdef CONFIG_PCI 2781 + rc = pci_register_driver(&mv_pci_driver); 2782 + #endif 2783 + return rc; 2900 2784 } 2901 2785 2902 2786 static void __exit mv_exit(void) 2903 2787 { 2788 + #ifdef CONFIG_PCI 2904 2789 pci_unregister_driver(&mv_pci_driver); 2790 + #endif 2905 2791 } 2906 2792 2907 2793 MODULE_AUTHOR("Brett Russ"); ··· 2917 2789 MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 2918 2790 MODULE_VERSION(DRV_VERSION); 2919 2791 2792 + #ifdef CONFIG_PCI 2920 2793 module_param(msi, int, 0444); 2921 2794 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); 2795 + #endif 2922 2796 2923 2797 module_init(mv_init); 2924 2798 module_exit(mv_exit);
+12 -6
drivers/ata/sata_nv.c
··· 1011 1011 } 1012 1012 1013 1013 if (status & (NV_ADMA_STAT_DONE | 1014 - NV_ADMA_STAT_CPBERR)) { 1015 - u32 check_commands; 1014 + NV_ADMA_STAT_CPBERR | 1015 + NV_ADMA_STAT_CMD_COMPLETE)) { 1016 + u32 check_commands = notifier_clears[i]; 1016 1017 int pos, error = 0; 1017 1018 1018 - if (ata_tag_valid(ap->link.active_tag)) 1019 - check_commands = 1 << ap->link.active_tag; 1020 - else 1021 - check_commands = ap->link.sactive; 1019 + if (status & NV_ADMA_STAT_CPBERR) { 1020 + /* Check all active commands */ 1021 + if (ata_tag_valid(ap->link.active_tag)) 1022 + check_commands = 1 << 1023 + ap->link.active_tag; 1024 + else 1025 + check_commands = ap-> 1026 + link.sactive; 1027 + } 1022 1028 1023 1029 /** Check CPBs for completed commands */ 1024 1030 while ((pos = ffs(check_commands)) && !error) {
+2 -1
drivers/pci/pci.c
··· 823 823 dr = get_pci_dr(pdev); 824 824 if (unlikely(!dr)) 825 825 return -ENOMEM; 826 - WARN_ON(!!dr->enabled); 826 + if (dr->enabled) 827 + return 0; 827 828 828 829 rc = pci_enable_device(pdev); 829 830 if (!rc) {