Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
39
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
44 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
62*/
63
64
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
72#include <linux/dma-mapping.h>
73#include <linux/device.h>
74#include <scsi/scsi_host.h>
75#include <scsi/scsi_cmnd.h>
76#include <scsi/scsi_device.h>
77#include <linux/libata.h>
78
79#define DRV_NAME "sata_mv"
80#define DRV_VERSION "1.20"
81
82enum {
83 /* BAR's are enumerated in terms of pci_resource_start() terms */
84 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
85 MV_IO_BAR = 2, /* offset 0x18: IO space */
86 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
87
88 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
89 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
90
91 MV_PCI_REG_BASE = 0,
92 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
93 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
94 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
95 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
96 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
97 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
98
99 MV_SATAHC0_REG_BASE = 0x20000,
100 MV_FLASH_CTL = 0x1046c,
101 MV_GPIO_PORT_CTL = 0x104f0,
102 MV_RESET_CFG = 0x180d8,
103
104 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
105 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
106 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
107 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
108
109 MV_MAX_Q_DEPTH = 32,
110 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
111
112 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
113 * CRPB needs alignment on a 256B boundary. Size == 256B
114 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
115 */
116 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
117 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
118 MV_MAX_SG_CT = 256,
119 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
120
121 MV_PORTS_PER_HC = 4,
122 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
123 MV_PORT_HC_SHIFT = 2,
124 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
125 MV_PORT_MASK = 3,
126
127 /* Host Flags */
128 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
129 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
130 /* SoC integrated controllers, no PCI interface */
131 MV_FLAG_SOC = (1 << 28),
132
133 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
134 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
135 ATA_FLAG_PIO_POLLING,
136 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
137
138 CRQB_FLAG_READ = (1 << 0),
139 CRQB_TAG_SHIFT = 1,
140 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
141 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
142 CRQB_CMD_ADDR_SHIFT = 8,
143 CRQB_CMD_CS = (0x2 << 11),
144 CRQB_CMD_LAST = (1 << 15),
145
146 CRPB_FLAG_STATUS_SHIFT = 8,
147 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
148 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
149
150 EPRD_FLAG_END_OF_TBL = (1 << 31),
151
152 /* PCI interface registers */
153
154 PCI_COMMAND_OFS = 0xc00,
155
156 PCI_MAIN_CMD_STS_OFS = 0xd30,
157 STOP_PCI_MASTER = (1 << 2),
158 PCI_MASTER_EMPTY = (1 << 3),
159 GLOB_SFT_RST = (1 << 4),
160
161 MV_PCI_MODE = 0xd00,
162 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
163 MV_PCI_DISC_TIMER = 0xd04,
164 MV_PCI_MSI_TRIGGER = 0xc38,
165 MV_PCI_SERR_MASK = 0xc28,
166 MV_PCI_XBAR_TMOUT = 0x1d04,
167 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
168 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
169 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
170 MV_PCI_ERR_COMMAND = 0x1d50,
171
172 PCI_IRQ_CAUSE_OFS = 0x1d58,
173 PCI_IRQ_MASK_OFS = 0x1d5c,
174 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
175
176 PCIE_IRQ_CAUSE_OFS = 0x1900,
177 PCIE_IRQ_MASK_OFS = 0x1910,
178 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
179
180 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
181 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
182 PORT0_ERR = (1 << 0), /* shift by port # */
183 PORT0_DONE = (1 << 1), /* shift by port # */
184 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
185 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
186 PCI_ERR = (1 << 18),
187 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
188 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
189 PORTS_0_3_COAL_DONE = (1 << 8),
190 PORTS_4_7_COAL_DONE = (1 << 17),
191 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
192 GPIO_INT = (1 << 22),
193 SELF_INT = (1 << 23),
194 TWSI_INT = (1 << 24),
195 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
196 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
197 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
198 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
199 HC_MAIN_RSVD),
200 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
201 HC_MAIN_RSVD_5),
202
203 /* SATAHC registers */
204 HC_CFG_OFS = 0,
205
206 HC_IRQ_CAUSE_OFS = 0x14,
207 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
208 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
209 DEV_IRQ = (1 << 8), /* shift by port # */
210
211 /* Shadow block registers */
212 SHD_BLK_OFS = 0x100,
213 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
214
215 /* SATA registers */
216 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
217 SATA_ACTIVE_OFS = 0x350,
218 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
219 PHY_MODE3 = 0x310,
220 PHY_MODE4 = 0x314,
221 PHY_MODE2 = 0x330,
222 MV5_PHY_MODE = 0x74,
223 MV5_LT_MODE = 0x30,
224 MV5_PHY_CTL = 0x0C,
225 SATA_INTERFACE_CTL = 0x050,
226
227 MV_M2_PREAMP_MASK = 0x7e0,
228
229 /* Port registers */
230 EDMA_CFG_OFS = 0,
231 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
232 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
233 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
234 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
235 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
236
237 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
238 EDMA_ERR_IRQ_MASK_OFS = 0xc,
239 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
240 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
241 EDMA_ERR_DEV = (1 << 2), /* device error */
242 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
243 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
244 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
245 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
246 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
247 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
248 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
249 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
250 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
251 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
252 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
253
254 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
255 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
256 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
257 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
258 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
259
260 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
261
262 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
263 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
264 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
265 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
266 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
267 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
268
269 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
270
271 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
272 EDMA_ERR_OVERRUN_5 = (1 << 5),
273 EDMA_ERR_UNDERRUN_5 = (1 << 6),
274
275 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
276 EDMA_ERR_LNK_CTRL_RX_1 |
277 EDMA_ERR_LNK_CTRL_RX_3 |
278 EDMA_ERR_LNK_CTRL_TX,
279
280 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
281 EDMA_ERR_PRD_PAR |
282 EDMA_ERR_DEV_DCON |
283 EDMA_ERR_DEV_CON |
284 EDMA_ERR_SERR |
285 EDMA_ERR_SELF_DIS |
286 EDMA_ERR_CRQB_PAR |
287 EDMA_ERR_CRPB_PAR |
288 EDMA_ERR_INTRL_PAR |
289 EDMA_ERR_IORDY |
290 EDMA_ERR_LNK_CTRL_RX_2 |
291 EDMA_ERR_LNK_DATA_RX |
292 EDMA_ERR_LNK_DATA_TX |
293 EDMA_ERR_TRANS_PROTO,
294 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
295 EDMA_ERR_PRD_PAR |
296 EDMA_ERR_DEV_DCON |
297 EDMA_ERR_DEV_CON |
298 EDMA_ERR_OVERRUN_5 |
299 EDMA_ERR_UNDERRUN_5 |
300 EDMA_ERR_SELF_DIS_5 |
301 EDMA_ERR_CRQB_PAR |
302 EDMA_ERR_CRPB_PAR |
303 EDMA_ERR_INTRL_PAR |
304 EDMA_ERR_IORDY,
305
306 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
307 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
308
309 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
310 EDMA_REQ_Q_PTR_SHIFT = 5,
311
312 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
313 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
314 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
315 EDMA_RSP_Q_PTR_SHIFT = 3,
316
317 EDMA_CMD_OFS = 0x28, /* EDMA command register */
318 EDMA_EN = (1 << 0), /* enable EDMA */
319 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
320 ATA_RST = (1 << 2), /* reset trans/link/phy */
321
322 EDMA_IORDY_TMOUT = 0x34,
323 EDMA_ARB_CFG = 0x38,
324
325 /* Host private flags (hp_flags) */
326 MV_HP_FLAG_MSI = (1 << 0),
327 MV_HP_ERRATA_50XXB0 = (1 << 1),
328 MV_HP_ERRATA_50XXB2 = (1 << 2),
329 MV_HP_ERRATA_60X1B2 = (1 << 3),
330 MV_HP_ERRATA_60X1C0 = (1 << 4),
331 MV_HP_ERRATA_XX42A0 = (1 << 5),
332 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
333 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
334 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
335 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
336
337 /* Port private flags (pp_flags) */
338 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
339 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
340 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
341};
342
343#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
344#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
345#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
346#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
347
348enum {
349 /* DMA boundary 0xffff is required by the s/g splitting
350 * we need on /length/ in mv_fill-sg().
351 */
352 MV_DMA_BOUNDARY = 0xffffU,
353
354 /* mask of register bits containing lower 32 bits
355 * of EDMA request queue DMA address
356 */
357 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
358
359 /* ditto, for response queue */
360 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
361};
362
363enum chip_type {
364 chip_504x,
365 chip_508x,
366 chip_5080,
367 chip_604x,
368 chip_608x,
369 chip_6042,
370 chip_7042,
371};
372
373/* Command ReQuest Block: 32B */
374struct mv_crqb {
375 __le32 sg_addr;
376 __le32 sg_addr_hi;
377 __le16 ctrl_flags;
378 __le16 ata_cmd[11];
379};
380
381struct mv_crqb_iie {
382 __le32 addr;
383 __le32 addr_hi;
384 __le32 flags;
385 __le32 len;
386 __le32 ata_cmd[4];
387};
388
389/* Command ResPonse Block: 8B */
390struct mv_crpb {
391 __le16 id;
392 __le16 flags;
393 __le32 tmstmp;
394};
395
396/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
397struct mv_sg {
398 __le32 addr;
399 __le32 flags_size;
400 __le32 addr_hi;
401 __le32 reserved;
402};
403
404struct mv_port_priv {
405 struct mv_crqb *crqb;
406 dma_addr_t crqb_dma;
407 struct mv_crpb *crpb;
408 dma_addr_t crpb_dma;
409 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
410 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
411
412 unsigned int req_idx;
413 unsigned int resp_idx;
414
415 u32 pp_flags;
416};
417
418struct mv_port_signal {
419 u32 amps;
420 u32 pre;
421};
422
423struct mv_host_priv {
424 u32 hp_flags;
425 struct mv_port_signal signal[8];
426 const struct mv_hw_ops *ops;
427 u32 irq_cause_ofs;
428 u32 irq_mask_ofs;
429 u32 unmask_all_irqs;
430 /*
431 * These consistent DMA memory pools give us guaranteed
432 * alignment for hardware-accessed data structures,
433 * and less memory waste in accomplishing the alignment.
434 */
435 struct dma_pool *crqb_pool;
436 struct dma_pool *crpb_pool;
437 struct dma_pool *sg_tbl_pool;
438};
439
440struct mv_hw_ops {
441 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
442 unsigned int port);
443 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
444 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
445 void __iomem *mmio);
446 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
447 unsigned int n_hc);
448 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
449 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
450};
451
452static void mv_irq_clear(struct ata_port *ap);
453static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
454static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
455static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
456static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
457static int mv_port_start(struct ata_port *ap);
458static void mv_port_stop(struct ata_port *ap);
459static void mv_qc_prep(struct ata_queued_cmd *qc);
460static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
461static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
462static void mv_error_handler(struct ata_port *ap);
463static void mv_eh_freeze(struct ata_port *ap);
464static void mv_eh_thaw(struct ata_port *ap);
465static void mv6_dev_config(struct ata_device *dev);
466
467static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
468 unsigned int port);
469static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
470static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
471 void __iomem *mmio);
472static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
473 unsigned int n_hc);
474static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
475static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
476
477static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
478 unsigned int port);
479static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
480static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
481 void __iomem *mmio);
482static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
483 unsigned int n_hc);
484static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
485static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
486static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
487 unsigned int port_no);
488static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
489 void __iomem *port_mmio, int want_ncq);
490static int __mv_stop_dma(struct ata_port *ap);
491
492/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
493 * because we have to allow room for worst case splitting of
494 * PRDs for 64K boundaries in mv_fill_sg().
495 */
496static struct scsi_host_template mv5_sht = {
497 .module = THIS_MODULE,
498 .name = DRV_NAME,
499 .ioctl = ata_scsi_ioctl,
500 .queuecommand = ata_scsi_queuecmd,
501 .can_queue = ATA_DEF_QUEUE,
502 .this_id = ATA_SHT_THIS_ID,
503 .sg_tablesize = MV_MAX_SG_CT / 2,
504 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
505 .emulated = ATA_SHT_EMULATED,
506 .use_clustering = 1,
507 .proc_name = DRV_NAME,
508 .dma_boundary = MV_DMA_BOUNDARY,
509 .slave_configure = ata_scsi_slave_config,
510 .slave_destroy = ata_scsi_slave_destroy,
511 .bios_param = ata_std_bios_param,
512};
513
514static struct scsi_host_template mv6_sht = {
515 .module = THIS_MODULE,
516 .name = DRV_NAME,
517 .ioctl = ata_scsi_ioctl,
518 .queuecommand = ata_scsi_queuecmd,
519 .change_queue_depth = ata_scsi_change_queue_depth,
520 .can_queue = MV_MAX_Q_DEPTH - 1,
521 .this_id = ATA_SHT_THIS_ID,
522 .sg_tablesize = MV_MAX_SG_CT / 2,
523 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
524 .emulated = ATA_SHT_EMULATED,
525 .use_clustering = 1,
526 .proc_name = DRV_NAME,
527 .dma_boundary = MV_DMA_BOUNDARY,
528 .slave_configure = ata_scsi_slave_config,
529 .slave_destroy = ata_scsi_slave_destroy,
530 .bios_param = ata_std_bios_param,
531};
532
533static const struct ata_port_operations mv5_ops = {
534 .tf_load = ata_tf_load,
535 .tf_read = ata_tf_read,
536 .check_status = ata_check_status,
537 .exec_command = ata_exec_command,
538 .dev_select = ata_std_dev_select,
539
540 .cable_detect = ata_cable_sata,
541
542 .qc_prep = mv_qc_prep,
543 .qc_issue = mv_qc_issue,
544 .data_xfer = ata_data_xfer,
545
546 .irq_clear = mv_irq_clear,
547 .irq_on = ata_irq_on,
548
549 .error_handler = mv_error_handler,
550 .freeze = mv_eh_freeze,
551 .thaw = mv_eh_thaw,
552
553 .scr_read = mv5_scr_read,
554 .scr_write = mv5_scr_write,
555
556 .port_start = mv_port_start,
557 .port_stop = mv_port_stop,
558};
559
560static const struct ata_port_operations mv6_ops = {
561 .dev_config = mv6_dev_config,
562 .tf_load = ata_tf_load,
563 .tf_read = ata_tf_read,
564 .check_status = ata_check_status,
565 .exec_command = ata_exec_command,
566 .dev_select = ata_std_dev_select,
567
568 .cable_detect = ata_cable_sata,
569
570 .qc_prep = mv_qc_prep,
571 .qc_issue = mv_qc_issue,
572 .data_xfer = ata_data_xfer,
573
574 .irq_clear = mv_irq_clear,
575 .irq_on = ata_irq_on,
576
577 .error_handler = mv_error_handler,
578 .freeze = mv_eh_freeze,
579 .thaw = mv_eh_thaw,
580 .qc_defer = ata_std_qc_defer,
581
582 .scr_read = mv_scr_read,
583 .scr_write = mv_scr_write,
584
585 .port_start = mv_port_start,
586 .port_stop = mv_port_stop,
587};
588
589static const struct ata_port_operations mv_iie_ops = {
590 .tf_load = ata_tf_load,
591 .tf_read = ata_tf_read,
592 .check_status = ata_check_status,
593 .exec_command = ata_exec_command,
594 .dev_select = ata_std_dev_select,
595
596 .cable_detect = ata_cable_sata,
597
598 .qc_prep = mv_qc_prep_iie,
599 .qc_issue = mv_qc_issue,
600 .data_xfer = ata_data_xfer,
601
602 .irq_clear = mv_irq_clear,
603 .irq_on = ata_irq_on,
604
605 .error_handler = mv_error_handler,
606 .freeze = mv_eh_freeze,
607 .thaw = mv_eh_thaw,
608 .qc_defer = ata_std_qc_defer,
609
610 .scr_read = mv_scr_read,
611 .scr_write = mv_scr_write,
612
613 .port_start = mv_port_start,
614 .port_stop = mv_port_stop,
615};
616
617static const struct ata_port_info mv_port_info[] = {
618 { /* chip_504x */
619 .flags = MV_COMMON_FLAGS,
620 .pio_mask = 0x1f, /* pio0-4 */
621 .udma_mask = ATA_UDMA6,
622 .port_ops = &mv5_ops,
623 },
624 { /* chip_508x */
625 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
626 .pio_mask = 0x1f, /* pio0-4 */
627 .udma_mask = ATA_UDMA6,
628 .port_ops = &mv5_ops,
629 },
630 { /* chip_5080 */
631 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
632 .pio_mask = 0x1f, /* pio0-4 */
633 .udma_mask = ATA_UDMA6,
634 .port_ops = &mv5_ops,
635 },
636 { /* chip_604x */
637 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
638 ATA_FLAG_NCQ,
639 .pio_mask = 0x1f, /* pio0-4 */
640 .udma_mask = ATA_UDMA6,
641 .port_ops = &mv6_ops,
642 },
643 { /* chip_608x */
644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
645 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
646 .pio_mask = 0x1f, /* pio0-4 */
647 .udma_mask = ATA_UDMA6,
648 .port_ops = &mv6_ops,
649 },
650 { /* chip_6042 */
651 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
652 ATA_FLAG_NCQ,
653 .pio_mask = 0x1f, /* pio0-4 */
654 .udma_mask = ATA_UDMA6,
655 .port_ops = &mv_iie_ops,
656 },
657 { /* chip_7042 */
658 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
659 ATA_FLAG_NCQ,
660 .pio_mask = 0x1f, /* pio0-4 */
661 .udma_mask = ATA_UDMA6,
662 .port_ops = &mv_iie_ops,
663 },
664};
665
666static const struct pci_device_id mv_pci_tbl[] = {
667 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
668 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
669 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
670 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
671 /* RocketRAID 1740/174x have different identifiers */
672 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
673 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
674
675 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
676 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
677 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
678 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
679 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
680
681 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
682
683 /* Adaptec 1430SA */
684 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
685
686 /* Marvell 7042 support */
687 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
688
689 /* Highpoint RocketRAID PCIe series */
690 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
691 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
692
693 { } /* terminate list */
694};
695
696static const struct mv_hw_ops mv5xxx_ops = {
697 .phy_errata = mv5_phy_errata,
698 .enable_leds = mv5_enable_leds,
699 .read_preamp = mv5_read_preamp,
700 .reset_hc = mv5_reset_hc,
701 .reset_flash = mv5_reset_flash,
702 .reset_bus = mv5_reset_bus,
703};
704
705static const struct mv_hw_ops mv6xxx_ops = {
706 .phy_errata = mv6_phy_errata,
707 .enable_leds = mv6_enable_leds,
708 .read_preamp = mv6_read_preamp,
709 .reset_hc = mv6_reset_hc,
710 .reset_flash = mv6_reset_flash,
711 .reset_bus = mv_reset_pci_bus,
712};
713
714/*
715 * Functions
716 */
717
718static inline void writelfl(unsigned long data, void __iomem *addr)
719{
720 writel(data, addr);
721 (void) readl(addr); /* flush to avoid PCI posted write */
722}
723
724static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
725{
726 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
727}
728
729static inline unsigned int mv_hc_from_port(unsigned int port)
730{
731 return port >> MV_PORT_HC_SHIFT;
732}
733
734static inline unsigned int mv_hardport_from_port(unsigned int port)
735{
736 return port & MV_PORT_MASK;
737}
738
739static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
740 unsigned int port)
741{
742 return mv_hc_base(base, mv_hc_from_port(port));
743}
744
745static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
746{
747 return mv_hc_base_from_port(base, port) +
748 MV_SATAHC_ARBTR_REG_SZ +
749 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
750}
751
752static inline void __iomem *mv_ap_base(struct ata_port *ap)
753{
754 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
755}
756
757static inline int mv_get_hc_count(unsigned long port_flags)
758{
759 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
760}
761
762static void mv_irq_clear(struct ata_port *ap)
763{
764}
765
766static void mv_set_edma_ptrs(void __iomem *port_mmio,
767 struct mv_host_priv *hpriv,
768 struct mv_port_priv *pp)
769{
770 u32 index;
771
772 /*
773 * initialize request queue
774 */
775 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
776
777 WARN_ON(pp->crqb_dma & 0x3ff);
778 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
779 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
780 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
781
782 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
783 writelfl((pp->crqb_dma & 0xffffffff) | index,
784 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
785 else
786 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
787
788 /*
789 * initialize response queue
790 */
791 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
792
793 WARN_ON(pp->crpb_dma & 0xff);
794 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
795
796 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
797 writelfl((pp->crpb_dma & 0xffffffff) | index,
798 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
799 else
800 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
801
802 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
803 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
804}
805
806/**
807 * mv_start_dma - Enable eDMA engine
808 * @base: port base address
809 * @pp: port private data
810 *
811 * Verify the local cache of the eDMA state is accurate with a
812 * WARN_ON.
813 *
814 * LOCKING:
815 * Inherited from caller.
816 */
817static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
818 struct mv_port_priv *pp, u8 protocol)
819{
820 int want_ncq = (protocol == ATA_PROT_NCQ);
821
822 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
823 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
824 if (want_ncq != using_ncq)
825 __mv_stop_dma(ap);
826 }
827 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
828 struct mv_host_priv *hpriv = ap->host->private_data;
829 int hard_port = mv_hardport_from_port(ap->port_no);
830 void __iomem *hc_mmio = mv_hc_base_from_port(
831 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
832 u32 hc_irq_cause, ipending;
833
834 /* clear EDMA event indicators, if any */
835 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
836
837 /* clear EDMA interrupt indicator, if any */
838 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
839 ipending = (DEV_IRQ << hard_port) |
840 (CRPB_DMA_DONE << hard_port);
841 if (hc_irq_cause & ipending) {
842 writelfl(hc_irq_cause & ~ipending,
843 hc_mmio + HC_IRQ_CAUSE_OFS);
844 }
845
846 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
847
848 /* clear FIS IRQ Cause */
849 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
850
851 mv_set_edma_ptrs(port_mmio, hpriv, pp);
852
853 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
854 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
855 }
856 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
857}
858
859/**
860 * __mv_stop_dma - Disable eDMA engine
861 * @ap: ATA channel to manipulate
862 *
863 * Verify the local cache of the eDMA state is accurate with a
864 * WARN_ON.
865 *
866 * LOCKING:
867 * Inherited from caller.
868 */
869static int __mv_stop_dma(struct ata_port *ap)
870{
871 void __iomem *port_mmio = mv_ap_base(ap);
872 struct mv_port_priv *pp = ap->private_data;
873 u32 reg;
874 int i, err = 0;
875
876 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
877 /* Disable EDMA if active. The disable bit auto clears.
878 */
879 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
880 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
881 } else {
882 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
883 }
884
885 /* now properly wait for the eDMA to stop */
886 for (i = 1000; i > 0; i--) {
887 reg = readl(port_mmio + EDMA_CMD_OFS);
888 if (!(reg & EDMA_EN))
889 break;
890
891 udelay(100);
892 }
893
894 if (reg & EDMA_EN) {
895 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
896 err = -EIO;
897 }
898
899 return err;
900}
901
902static int mv_stop_dma(struct ata_port *ap)
903{
904 unsigned long flags;
905 int rc;
906
907 spin_lock_irqsave(&ap->host->lock, flags);
908 rc = __mv_stop_dma(ap);
909 spin_unlock_irqrestore(&ap->host->lock, flags);
910
911 return rc;
912}
913
914#ifdef ATA_DEBUG
915static void mv_dump_mem(void __iomem *start, unsigned bytes)
916{
917 int b, w;
918 for (b = 0; b < bytes; ) {
919 DPRINTK("%p: ", start + b);
920 for (w = 0; b < bytes && w < 4; w++) {
921 printk("%08x ", readl(start + b));
922 b += sizeof(u32);
923 }
924 printk("\n");
925 }
926}
927#endif
928
929static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
930{
931#ifdef ATA_DEBUG
932 int b, w;
933 u32 dw;
934 for (b = 0; b < bytes; ) {
935 DPRINTK("%02x: ", b);
936 for (w = 0; b < bytes && w < 4; w++) {
937 (void) pci_read_config_dword(pdev, b, &dw);
938 printk("%08x ", dw);
939 b += sizeof(u32);
940 }
941 printk("\n");
942 }
943#endif
944}
945static void mv_dump_all_regs(void __iomem *mmio_base, int port,
946 struct pci_dev *pdev)
947{
948#ifdef ATA_DEBUG
949 void __iomem *hc_base = mv_hc_base(mmio_base,
950 port >> MV_PORT_HC_SHIFT);
951 void __iomem *port_base;
952 int start_port, num_ports, p, start_hc, num_hcs, hc;
953
954 if (0 > port) {
955 start_hc = start_port = 0;
956 num_ports = 8; /* shld be benign for 4 port devs */
957 num_hcs = 2;
958 } else {
959 start_hc = port >> MV_PORT_HC_SHIFT;
960 start_port = port;
961 num_ports = num_hcs = 1;
962 }
963 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
964 num_ports > 1 ? num_ports - 1 : start_port);
965
966 if (NULL != pdev) {
967 DPRINTK("PCI config space regs:\n");
968 mv_dump_pci_cfg(pdev, 0x68);
969 }
970 DPRINTK("PCI regs:\n");
971 mv_dump_mem(mmio_base+0xc00, 0x3c);
972 mv_dump_mem(mmio_base+0xd00, 0x34);
973 mv_dump_mem(mmio_base+0xf00, 0x4);
974 mv_dump_mem(mmio_base+0x1d00, 0x6c);
975 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
976 hc_base = mv_hc_base(mmio_base, hc);
977 DPRINTK("HC regs (HC %i):\n", hc);
978 mv_dump_mem(hc_base, 0x1c);
979 }
980 for (p = start_port; p < start_port + num_ports; p++) {
981 port_base = mv_port_base(mmio_base, p);
982 DPRINTK("EDMA regs (port %i):\n", p);
983 mv_dump_mem(port_base, 0x54);
984 DPRINTK("SATA regs (port %i):\n", p);
985 mv_dump_mem(port_base+0x300, 0x60);
986 }
987#endif
988}
989
990static unsigned int mv_scr_offset(unsigned int sc_reg_in)
991{
992 unsigned int ofs;
993
994 switch (sc_reg_in) {
995 case SCR_STATUS:
996 case SCR_CONTROL:
997 case SCR_ERROR:
998 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
999 break;
1000 case SCR_ACTIVE:
1001 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1002 break;
1003 default:
1004 ofs = 0xffffffffU;
1005 break;
1006 }
1007 return ofs;
1008}
1009
1010static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1011{
1012 unsigned int ofs = mv_scr_offset(sc_reg_in);
1013
1014 if (ofs != 0xffffffffU) {
1015 *val = readl(mv_ap_base(ap) + ofs);
1016 return 0;
1017 } else
1018 return -EINVAL;
1019}
1020
1021static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1022{
1023 unsigned int ofs = mv_scr_offset(sc_reg_in);
1024
1025 if (ofs != 0xffffffffU) {
1026 writelfl(val, mv_ap_base(ap) + ofs);
1027 return 0;
1028 } else
1029 return -EINVAL;
1030}
1031
1032static void mv6_dev_config(struct ata_device *adev)
1033{
1034 /*
1035 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1036 * See mv_qc_prep() for more info.
1037 */
1038 if (adev->flags & ATA_DFLAG_NCQ)
1039 if (adev->max_sectors > ATA_MAX_SECTORS)
1040 adev->max_sectors = ATA_MAX_SECTORS;
1041}
1042
1043static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1044 void __iomem *port_mmio, int want_ncq)
1045{
1046 u32 cfg;
1047
1048 /* set up non-NCQ EDMA configuration */
1049 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1050
1051 if (IS_GEN_I(hpriv))
1052 cfg |= (1 << 8); /* enab config burst size mask */
1053
1054 else if (IS_GEN_II(hpriv))
1055 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1056
1057 else if (IS_GEN_IIE(hpriv)) {
1058 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1059 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1060 cfg |= (1 << 18); /* enab early completion */
1061 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1062 }
1063
1064 if (want_ncq) {
1065 cfg |= EDMA_CFG_NCQ;
1066 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1067 } else
1068 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1069
1070 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1071}
1072
1073static void mv_port_free_dma_mem(struct ata_port *ap)
1074{
1075 struct mv_host_priv *hpriv = ap->host->private_data;
1076 struct mv_port_priv *pp = ap->private_data;
1077 int tag;
1078
1079 if (pp->crqb) {
1080 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1081 pp->crqb = NULL;
1082 }
1083 if (pp->crpb) {
1084 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1085 pp->crpb = NULL;
1086 }
1087 /*
1088 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1089 * For later hardware, we have one unique sg_tbl per NCQ tag.
1090 */
1091 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1092 if (pp->sg_tbl[tag]) {
1093 if (tag == 0 || !IS_GEN_I(hpriv))
1094 dma_pool_free(hpriv->sg_tbl_pool,
1095 pp->sg_tbl[tag],
1096 pp->sg_tbl_dma[tag]);
1097 pp->sg_tbl[tag] = NULL;
1098 }
1099 }
1100}
1101
1102/**
1103 * mv_port_start - Port specific init/start routine.
1104 * @ap: ATA channel to manipulate
1105 *
1106 * Allocate and point to DMA memory, init port private memory,
1107 * zero indices.
1108 *
1109 * LOCKING:
1110 * Inherited from caller.
1111 */
1112static int mv_port_start(struct ata_port *ap)
1113{
1114 struct device *dev = ap->host->dev;
1115 struct mv_host_priv *hpriv = ap->host->private_data;
1116 struct mv_port_priv *pp;
1117 void __iomem *port_mmio = mv_ap_base(ap);
1118 unsigned long flags;
1119 int tag, rc;
1120
1121 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1122 if (!pp)
1123 return -ENOMEM;
1124 ap->private_data = pp;
1125
1126 rc = ata_pad_alloc(ap, dev);
1127 if (rc)
1128 return rc;
1129
1130 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1131 if (!pp->crqb)
1132 return -ENOMEM;
1133 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1134
1135 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1136 if (!pp->crpb)
1137 goto out_port_free_dma_mem;
1138 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1139
1140 /*
1141 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1142 * For later hardware, we need one unique sg_tbl per NCQ tag.
1143 */
1144 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1145 if (tag == 0 || !IS_GEN_I(hpriv)) {
1146 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1147 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1148 if (!pp->sg_tbl[tag])
1149 goto out_port_free_dma_mem;
1150 } else {
1151 pp->sg_tbl[tag] = pp->sg_tbl[0];
1152 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1153 }
1154 }
1155
1156 spin_lock_irqsave(&ap->host->lock, flags);
1157
1158 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1159 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1160
1161 spin_unlock_irqrestore(&ap->host->lock, flags);
1162
1163 /* Don't turn on EDMA here...do it before DMA commands only. Else
1164 * we'll be unable to send non-data, PIO, etc due to restricted access
1165 * to shadow regs.
1166 */
1167 return 0;
1168
1169out_port_free_dma_mem:
1170 mv_port_free_dma_mem(ap);
1171 return -ENOMEM;
1172}
1173
1174/**
1175 * mv_port_stop - Port specific cleanup/stop routine.
1176 * @ap: ATA channel to manipulate
1177 *
1178 * Stop DMA, cleanup port memory.
1179 *
1180 * LOCKING:
1181 * This routine uses the host lock to protect the DMA stop.
1182 */
1183static void mv_port_stop(struct ata_port *ap)
1184{
1185 mv_stop_dma(ap);
1186 mv_port_free_dma_mem(ap);
1187}
1188
1189/**
1190 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1191 * @qc: queued command whose SG list to source from
1192 *
1193 * Populate the SG list and mark the last entry.
1194 *
1195 * LOCKING:
1196 * Inherited from caller.
1197 */
1198static void mv_fill_sg(struct ata_queued_cmd *qc)
1199{
1200 struct mv_port_priv *pp = qc->ap->private_data;
1201 struct scatterlist *sg;
1202 struct mv_sg *mv_sg, *last_sg = NULL;
1203 unsigned int si;
1204
1205 mv_sg = pp->sg_tbl[qc->tag];
1206 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1207 dma_addr_t addr = sg_dma_address(sg);
1208 u32 sg_len = sg_dma_len(sg);
1209
1210 while (sg_len) {
1211 u32 offset = addr & 0xffff;
1212 u32 len = sg_len;
1213
1214 if ((offset + sg_len > 0x10000))
1215 len = 0x10000 - offset;
1216
1217 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1218 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1219 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1220
1221 sg_len -= len;
1222 addr += len;
1223
1224 last_sg = mv_sg;
1225 mv_sg++;
1226 }
1227 }
1228
1229 if (likely(last_sg))
1230 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1231}
1232
1233static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1234{
1235 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1236 (last ? CRQB_CMD_LAST : 0);
1237 *cmdw = cpu_to_le16(tmp);
1238}
1239
1240/**
1241 * mv_qc_prep - Host specific command preparation.
1242 * @qc: queued command to prepare
1243 *
1244 * This routine simply redirects to the general purpose routine
1245 * if command is not DMA. Else, it handles prep of the CRQB
1246 * (command request block), does some sanity checking, and calls
1247 * the SG load routine.
1248 *
1249 * LOCKING:
1250 * Inherited from caller.
1251 */
1252static void mv_qc_prep(struct ata_queued_cmd *qc)
1253{
1254 struct ata_port *ap = qc->ap;
1255 struct mv_port_priv *pp = ap->private_data;
1256 __le16 *cw;
1257 struct ata_taskfile *tf;
1258 u16 flags = 0;
1259 unsigned in_index;
1260
1261 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1262 (qc->tf.protocol != ATA_PROT_NCQ))
1263 return;
1264
1265 /* Fill in command request block
1266 */
1267 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1268 flags |= CRQB_FLAG_READ;
1269 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1270 flags |= qc->tag << CRQB_TAG_SHIFT;
1271
1272 /* get current queue index from software */
1273 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1274
1275 pp->crqb[in_index].sg_addr =
1276 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1277 pp->crqb[in_index].sg_addr_hi =
1278 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1279 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1280
1281 cw = &pp->crqb[in_index].ata_cmd[0];
1282 tf = &qc->tf;
1283
1284 /* Sadly, the CRQB cannot accomodate all registers--there are
1285 * only 11 bytes...so we must pick and choose required
1286 * registers based on the command. So, we drop feature and
1287 * hob_feature for [RW] DMA commands, but they are needed for
1288 * NCQ. NCQ will drop hob_nsect.
1289 */
1290 switch (tf->command) {
1291 case ATA_CMD_READ:
1292 case ATA_CMD_READ_EXT:
1293 case ATA_CMD_WRITE:
1294 case ATA_CMD_WRITE_EXT:
1295 case ATA_CMD_WRITE_FUA_EXT:
1296 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1297 break;
1298 case ATA_CMD_FPDMA_READ:
1299 case ATA_CMD_FPDMA_WRITE:
1300 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1301 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1302 break;
1303 default:
1304 /* The only other commands EDMA supports in non-queued and
1305 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1306 * of which are defined/used by Linux. If we get here, this
1307 * driver needs work.
1308 *
1309 * FIXME: modify libata to give qc_prep a return value and
1310 * return error here.
1311 */
1312 BUG_ON(tf->command);
1313 break;
1314 }
1315 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1316 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1317 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1318 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1319 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1320 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1321 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1322 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1323 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1324
1325 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1326 return;
1327 mv_fill_sg(qc);
1328}
1329
1330/**
1331 * mv_qc_prep_iie - Host specific command preparation.
1332 * @qc: queued command to prepare
1333 *
1334 * This routine simply redirects to the general purpose routine
1335 * if command is not DMA. Else, it handles prep of the CRQB
1336 * (command request block), does some sanity checking, and calls
1337 * the SG load routine.
1338 *
1339 * LOCKING:
1340 * Inherited from caller.
1341 */
1342static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1343{
1344 struct ata_port *ap = qc->ap;
1345 struct mv_port_priv *pp = ap->private_data;
1346 struct mv_crqb_iie *crqb;
1347 struct ata_taskfile *tf;
1348 unsigned in_index;
1349 u32 flags = 0;
1350
1351 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1352 (qc->tf.protocol != ATA_PROT_NCQ))
1353 return;
1354
1355 /* Fill in Gen IIE command request block
1356 */
1357 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1358 flags |= CRQB_FLAG_READ;
1359
1360 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1361 flags |= qc->tag << CRQB_TAG_SHIFT;
1362 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1363
1364 /* get current queue index from software */
1365 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1366
1367 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1368 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1369 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1370 crqb->flags = cpu_to_le32(flags);
1371
1372 tf = &qc->tf;
1373 crqb->ata_cmd[0] = cpu_to_le32(
1374 (tf->command << 16) |
1375 (tf->feature << 24)
1376 );
1377 crqb->ata_cmd[1] = cpu_to_le32(
1378 (tf->lbal << 0) |
1379 (tf->lbam << 8) |
1380 (tf->lbah << 16) |
1381 (tf->device << 24)
1382 );
1383 crqb->ata_cmd[2] = cpu_to_le32(
1384 (tf->hob_lbal << 0) |
1385 (tf->hob_lbam << 8) |
1386 (tf->hob_lbah << 16) |
1387 (tf->hob_feature << 24)
1388 );
1389 crqb->ata_cmd[3] = cpu_to_le32(
1390 (tf->nsect << 0) |
1391 (tf->hob_nsect << 8)
1392 );
1393
1394 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1395 return;
1396 mv_fill_sg(qc);
1397}
1398
1399/**
1400 * mv_qc_issue - Initiate a command to the host
1401 * @qc: queued command to start
1402 *
1403 * This routine simply redirects to the general purpose routine
1404 * if command is not DMA. Else, it sanity checks our local
1405 * caches of the request producer/consumer indices then enables
1406 * DMA and bumps the request producer index.
1407 *
1408 * LOCKING:
1409 * Inherited from caller.
1410 */
1411static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1412{
1413 struct ata_port *ap = qc->ap;
1414 void __iomem *port_mmio = mv_ap_base(ap);
1415 struct mv_port_priv *pp = ap->private_data;
1416 u32 in_index;
1417
1418 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1419 (qc->tf.protocol != ATA_PROT_NCQ)) {
1420 /* We're about to send a non-EDMA capable command to the
1421 * port. Turn off EDMA so there won't be problems accessing
1422 * shadow block, etc registers.
1423 */
1424 __mv_stop_dma(ap);
1425 return ata_qc_issue_prot(qc);
1426 }
1427
1428 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1429
1430 pp->req_idx++;
1431
1432 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1433
1434 /* and write the request in pointer to kick the EDMA to life */
1435 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1436 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1437
1438 return 0;
1439}
1440
1441/**
1442 * mv_err_intr - Handle error interrupts on the port
1443 * @ap: ATA channel to manipulate
1444 * @reset_allowed: bool: 0 == don't trigger from reset here
1445 *
1446 * In most cases, just clear the interrupt and move on. However,
1447 * some cases require an eDMA reset, which is done right before
1448 * the COMRESET in mv_phy_reset(). The SERR case requires a
1449 * clear of pending errors in the SATA SERROR register. Finally,
1450 * if the port disabled DMA, update our cached copy to match.
1451 *
1452 * LOCKING:
1453 * Inherited from caller.
1454 */
1455static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1456{
1457 void __iomem *port_mmio = mv_ap_base(ap);
1458 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1459 struct mv_port_priv *pp = ap->private_data;
1460 struct mv_host_priv *hpriv = ap->host->private_data;
1461 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1462 unsigned int action = 0, err_mask = 0;
1463 struct ata_eh_info *ehi = &ap->link.eh_info;
1464
1465 ata_ehi_clear_desc(ehi);
1466
1467 if (!edma_enabled) {
1468 /* just a guess: do we need to do this? should we
1469 * expand this, and do it in all cases?
1470 */
1471 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1472 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1473 }
1474
1475 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1476
1477 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1478
1479 /*
1480 * all generations share these EDMA error cause bits
1481 */
1482
1483 if (edma_err_cause & EDMA_ERR_DEV)
1484 err_mask |= AC_ERR_DEV;
1485 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1486 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1487 EDMA_ERR_INTRL_PAR)) {
1488 err_mask |= AC_ERR_ATA_BUS;
1489 action |= ATA_EH_HARDRESET;
1490 ata_ehi_push_desc(ehi, "parity error");
1491 }
1492 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1493 ata_ehi_hotplugged(ehi);
1494 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1495 "dev disconnect" : "dev connect");
1496 action |= ATA_EH_HARDRESET;
1497 }
1498
1499 if (IS_GEN_I(hpriv)) {
1500 eh_freeze_mask = EDMA_EH_FREEZE_5;
1501
1502 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1503 struct mv_port_priv *pp = ap->private_data;
1504 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1505 ata_ehi_push_desc(ehi, "EDMA self-disable");
1506 }
1507 } else {
1508 eh_freeze_mask = EDMA_EH_FREEZE;
1509
1510 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1511 struct mv_port_priv *pp = ap->private_data;
1512 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1513 ata_ehi_push_desc(ehi, "EDMA self-disable");
1514 }
1515
1516 if (edma_err_cause & EDMA_ERR_SERR) {
1517 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1518 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1519 err_mask = AC_ERR_ATA_BUS;
1520 action |= ATA_EH_HARDRESET;
1521 }
1522 }
1523
1524 /* Clear EDMA now that SERR cleanup done */
1525 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1526
1527 if (!err_mask) {
1528 err_mask = AC_ERR_OTHER;
1529 action |= ATA_EH_HARDRESET;
1530 }
1531
1532 ehi->serror |= serr;
1533 ehi->action |= action;
1534
1535 if (qc)
1536 qc->err_mask |= err_mask;
1537 else
1538 ehi->err_mask |= err_mask;
1539
1540 if (edma_err_cause & eh_freeze_mask)
1541 ata_port_freeze(ap);
1542 else
1543 ata_port_abort(ap);
1544}
1545
1546static void mv_intr_pio(struct ata_port *ap)
1547{
1548 struct ata_queued_cmd *qc;
1549 u8 ata_status;
1550
1551 /* ignore spurious intr if drive still BUSY */
1552 ata_status = readb(ap->ioaddr.status_addr);
1553 if (unlikely(ata_status & ATA_BUSY))
1554 return;
1555
1556 /* get active ATA command */
1557 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1558 if (unlikely(!qc)) /* no active tag */
1559 return;
1560 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1561 return;
1562
1563 /* and finally, complete the ATA command */
1564 qc->err_mask |= ac_err_mask(ata_status);
1565 ata_qc_complete(qc);
1566}
1567
1568static void mv_intr_edma(struct ata_port *ap)
1569{
1570 void __iomem *port_mmio = mv_ap_base(ap);
1571 struct mv_host_priv *hpriv = ap->host->private_data;
1572 struct mv_port_priv *pp = ap->private_data;
1573 struct ata_queued_cmd *qc;
1574 u32 out_index, in_index;
1575 bool work_done = false;
1576
1577 /* get h/w response queue pointer */
1578 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1579 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1580
1581 while (1) {
1582 u16 status;
1583 unsigned int tag;
1584
1585 /* get s/w response queue last-read pointer, and compare */
1586 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1587 if (in_index == out_index)
1588 break;
1589
1590 /* 50xx: get active ATA command */
1591 if (IS_GEN_I(hpriv))
1592 tag = ap->link.active_tag;
1593
1594 /* Gen II/IIE: get active ATA command via tag, to enable
1595 * support for queueing. this works transparently for
1596 * queued and non-queued modes.
1597 */
1598 else
1599 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1600
1601 qc = ata_qc_from_tag(ap, tag);
1602
1603 /* For non-NCQ mode, the lower 8 bits of status
1604 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1605 * which should be zero if all went well.
1606 */
1607 status = le16_to_cpu(pp->crpb[out_index].flags);
1608 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1609 mv_err_intr(ap, qc);
1610 return;
1611 }
1612
1613 /* and finally, complete the ATA command */
1614 if (qc) {
1615 qc->err_mask |=
1616 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1617 ata_qc_complete(qc);
1618 }
1619
1620 /* advance software response queue pointer, to
1621 * indicate (after the loop completes) to hardware
1622 * that we have consumed a response queue entry.
1623 */
1624 work_done = true;
1625 pp->resp_idx++;
1626 }
1627
1628 if (work_done)
1629 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1630 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1631 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1632}
1633
1634/**
1635 * mv_host_intr - Handle all interrupts on the given host controller
1636 * @host: host specific structure
1637 * @relevant: port error bits relevant to this host controller
1638 * @hc: which host controller we're to look at
1639 *
1640 * Read then write clear the HC interrupt status then walk each
1641 * port connected to the HC and see if it needs servicing. Port
1642 * success ints are reported in the HC interrupt status reg, the
1643 * port error ints are reported in the higher level main
1644 * interrupt status register and thus are passed in via the
1645 * 'relevant' argument.
1646 *
1647 * LOCKING:
1648 * Inherited from caller.
1649 */
1650static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1651{
1652 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1653 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1654 u32 hc_irq_cause;
1655 int port, port0;
1656
1657 if (hc == 0)
1658 port0 = 0;
1659 else
1660 port0 = MV_PORTS_PER_HC;
1661
1662 /* we'll need the HC success int register in most cases */
1663 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1664 if (!hc_irq_cause)
1665 return;
1666
1667 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1668
1669 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1670 hc, relevant, hc_irq_cause);
1671
1672 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1673 struct ata_port *ap = host->ports[port];
1674 struct mv_port_priv *pp = ap->private_data;
1675 int have_err_bits, hard_port, shift;
1676
1677 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1678 continue;
1679
1680 shift = port << 1; /* (port * 2) */
1681 if (port >= MV_PORTS_PER_HC) {
1682 shift++; /* skip bit 8 in the HC Main IRQ reg */
1683 }
1684 have_err_bits = ((PORT0_ERR << shift) & relevant);
1685
1686 if (unlikely(have_err_bits)) {
1687 struct ata_queued_cmd *qc;
1688
1689 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1690 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1691 continue;
1692
1693 mv_err_intr(ap, qc);
1694 continue;
1695 }
1696
1697 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1698
1699 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1700 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1701 mv_intr_edma(ap);
1702 } else {
1703 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1704 mv_intr_pio(ap);
1705 }
1706 }
1707 VPRINTK("EXIT\n");
1708}
1709
1710static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1711{
1712 struct mv_host_priv *hpriv = host->private_data;
1713 struct ata_port *ap;
1714 struct ata_queued_cmd *qc;
1715 struct ata_eh_info *ehi;
1716 unsigned int i, err_mask, printed = 0;
1717 u32 err_cause;
1718
1719 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1720
1721 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1722 err_cause);
1723
1724 DPRINTK("All regs @ PCI error\n");
1725 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1726
1727 writelfl(0, mmio + hpriv->irq_cause_ofs);
1728
1729 for (i = 0; i < host->n_ports; i++) {
1730 ap = host->ports[i];
1731 if (!ata_link_offline(&ap->link)) {
1732 ehi = &ap->link.eh_info;
1733 ata_ehi_clear_desc(ehi);
1734 if (!printed++)
1735 ata_ehi_push_desc(ehi,
1736 "PCI err cause 0x%08x", err_cause);
1737 err_mask = AC_ERR_HOST_BUS;
1738 ehi->action = ATA_EH_HARDRESET;
1739 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1740 if (qc)
1741 qc->err_mask |= err_mask;
1742 else
1743 ehi->err_mask |= err_mask;
1744
1745 ata_port_freeze(ap);
1746 }
1747 }
1748}
1749
1750/**
1751 * mv_interrupt - Main interrupt event handler
1752 * @irq: unused
1753 * @dev_instance: private data; in this case the host structure
1754 *
1755 * Read the read only register to determine if any host
1756 * controllers have pending interrupts. If so, call lower level
1757 * routine to handle. Also check for PCI errors which are only
1758 * reported here.
1759 *
1760 * LOCKING:
1761 * This routine holds the host lock while processing pending
1762 * interrupts.
1763 */
1764static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1765{
1766 struct ata_host *host = dev_instance;
1767 unsigned int hc, handled = 0, n_hcs;
1768 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1769 u32 irq_stat, irq_mask;
1770
1771 spin_lock(&host->lock);
1772 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1773 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
1774
1775 /* check the cases where we either have nothing pending or have read
1776 * a bogus register value which can indicate HW removal or PCI fault
1777 */
1778 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1779 goto out_unlock;
1780
1781 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1782
1783 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1784 mv_pci_error(host, mmio);
1785 handled = 1;
1786 goto out_unlock; /* skip all other HC irq handling */
1787 }
1788
1789 for (hc = 0; hc < n_hcs; hc++) {
1790 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1791 if (relevant) {
1792 mv_host_intr(host, relevant, hc);
1793 handled = 1;
1794 }
1795 }
1796
1797out_unlock:
1798 spin_unlock(&host->lock);
1799
1800 return IRQ_RETVAL(handled);
1801}
1802
1803static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1804{
1805 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1806 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1807
1808 return hc_mmio + ofs;
1809}
1810
1811static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1812{
1813 unsigned int ofs;
1814
1815 switch (sc_reg_in) {
1816 case SCR_STATUS:
1817 case SCR_ERROR:
1818 case SCR_CONTROL:
1819 ofs = sc_reg_in * sizeof(u32);
1820 break;
1821 default:
1822 ofs = 0xffffffffU;
1823 break;
1824 }
1825 return ofs;
1826}
1827
1828static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1829{
1830 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1831 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1832 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1833
1834 if (ofs != 0xffffffffU) {
1835 *val = readl(addr + ofs);
1836 return 0;
1837 } else
1838 return -EINVAL;
1839}
1840
1841static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1842{
1843 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1844 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1845 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1846
1847 if (ofs != 0xffffffffU) {
1848 writelfl(val, addr + ofs);
1849 return 0;
1850 } else
1851 return -EINVAL;
1852}
1853
1854static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1855{
1856 struct pci_dev *pdev = to_pci_dev(host->dev);
1857 int early_5080;
1858
1859 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1860
1861 if (!early_5080) {
1862 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1863 tmp |= (1 << 0);
1864 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1865 }
1866
1867 mv_reset_pci_bus(host, mmio);
1868}
1869
1870static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1871{
1872 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1873}
1874
1875static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1876 void __iomem *mmio)
1877{
1878 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1879 u32 tmp;
1880
1881 tmp = readl(phy_mmio + MV5_PHY_MODE);
1882
1883 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1884 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1885}
1886
1887static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1888{
1889 u32 tmp;
1890
1891 writel(0, mmio + MV_GPIO_PORT_CTL);
1892
1893 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1894
1895 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1896 tmp |= ~(1 << 0);
1897 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1898}
1899
1900static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1901 unsigned int port)
1902{
1903 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1904 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1905 u32 tmp;
1906 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1907
1908 if (fix_apm_sq) {
1909 tmp = readl(phy_mmio + MV5_LT_MODE);
1910 tmp |= (1 << 19);
1911 writel(tmp, phy_mmio + MV5_LT_MODE);
1912
1913 tmp = readl(phy_mmio + MV5_PHY_CTL);
1914 tmp &= ~0x3;
1915 tmp |= 0x1;
1916 writel(tmp, phy_mmio + MV5_PHY_CTL);
1917 }
1918
1919 tmp = readl(phy_mmio + MV5_PHY_MODE);
1920 tmp &= ~mask;
1921 tmp |= hpriv->signal[port].pre;
1922 tmp |= hpriv->signal[port].amps;
1923 writel(tmp, phy_mmio + MV5_PHY_MODE);
1924}
1925
1926
1927#undef ZERO
1928#define ZERO(reg) writel(0, port_mmio + (reg))
1929static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1930 unsigned int port)
1931{
1932 void __iomem *port_mmio = mv_port_base(mmio, port);
1933
1934 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1935
1936 mv_channel_reset(hpriv, mmio, port);
1937
1938 ZERO(0x028); /* command */
1939 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1940 ZERO(0x004); /* timer */
1941 ZERO(0x008); /* irq err cause */
1942 ZERO(0x00c); /* irq err mask */
1943 ZERO(0x010); /* rq bah */
1944 ZERO(0x014); /* rq inp */
1945 ZERO(0x018); /* rq outp */
1946 ZERO(0x01c); /* respq bah */
1947 ZERO(0x024); /* respq outp */
1948 ZERO(0x020); /* respq inp */
1949 ZERO(0x02c); /* test control */
1950 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1951}
1952#undef ZERO
1953
1954#define ZERO(reg) writel(0, hc_mmio + (reg))
1955static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1956 unsigned int hc)
1957{
1958 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1959 u32 tmp;
1960
1961 ZERO(0x00c);
1962 ZERO(0x010);
1963 ZERO(0x014);
1964 ZERO(0x018);
1965
1966 tmp = readl(hc_mmio + 0x20);
1967 tmp &= 0x1c1c1c1c;
1968 tmp |= 0x03030303;
1969 writel(tmp, hc_mmio + 0x20);
1970}
1971#undef ZERO
1972
1973static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1974 unsigned int n_hc)
1975{
1976 unsigned int hc, port;
1977
1978 for (hc = 0; hc < n_hc; hc++) {
1979 for (port = 0; port < MV_PORTS_PER_HC; port++)
1980 mv5_reset_hc_port(hpriv, mmio,
1981 (hc * MV_PORTS_PER_HC) + port);
1982
1983 mv5_reset_one_hc(hpriv, mmio, hc);
1984 }
1985
1986 return 0;
1987}
1988
1989#undef ZERO
1990#define ZERO(reg) writel(0, mmio + (reg))
1991static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
1992{
1993 struct mv_host_priv *hpriv = host->private_data;
1994 u32 tmp;
1995
1996 tmp = readl(mmio + MV_PCI_MODE);
1997 tmp &= 0xff00ffff;
1998 writel(tmp, mmio + MV_PCI_MODE);
1999
2000 ZERO(MV_PCI_DISC_TIMER);
2001 ZERO(MV_PCI_MSI_TRIGGER);
2002 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2003 ZERO(HC_MAIN_IRQ_MASK_OFS);
2004 ZERO(MV_PCI_SERR_MASK);
2005 ZERO(hpriv->irq_cause_ofs);
2006 ZERO(hpriv->irq_mask_ofs);
2007 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2008 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2009 ZERO(MV_PCI_ERR_ATTRIBUTE);
2010 ZERO(MV_PCI_ERR_COMMAND);
2011}
2012#undef ZERO
2013
2014static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2015{
2016 u32 tmp;
2017
2018 mv5_reset_flash(hpriv, mmio);
2019
2020 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2021 tmp &= 0x3;
2022 tmp |= (1 << 5) | (1 << 6);
2023 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2024}
2025
2026/**
2027 * mv6_reset_hc - Perform the 6xxx global soft reset
2028 * @mmio: base address of the HBA
2029 *
2030 * This routine only applies to 6xxx parts.
2031 *
2032 * LOCKING:
2033 * Inherited from caller.
2034 */
2035static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2036 unsigned int n_hc)
2037{
2038 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2039 int i, rc = 0;
2040 u32 t;
2041
2042 /* Following procedure defined in PCI "main command and status
2043 * register" table.
2044 */
2045 t = readl(reg);
2046 writel(t | STOP_PCI_MASTER, reg);
2047
2048 for (i = 0; i < 1000; i++) {
2049 udelay(1);
2050 t = readl(reg);
2051 if (PCI_MASTER_EMPTY & t)
2052 break;
2053 }
2054 if (!(PCI_MASTER_EMPTY & t)) {
2055 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2056 rc = 1;
2057 goto done;
2058 }
2059
2060 /* set reset */
2061 i = 5;
2062 do {
2063 writel(t | GLOB_SFT_RST, reg);
2064 t = readl(reg);
2065 udelay(1);
2066 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2067
2068 if (!(GLOB_SFT_RST & t)) {
2069 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2070 rc = 1;
2071 goto done;
2072 }
2073
2074 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2075 i = 5;
2076 do {
2077 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2078 t = readl(reg);
2079 udelay(1);
2080 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2081
2082 if (GLOB_SFT_RST & t) {
2083 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2084 rc = 1;
2085 }
2086done:
2087 return rc;
2088}
2089
2090static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2091 void __iomem *mmio)
2092{
2093 void __iomem *port_mmio;
2094 u32 tmp;
2095
2096 tmp = readl(mmio + MV_RESET_CFG);
2097 if ((tmp & (1 << 0)) == 0) {
2098 hpriv->signal[idx].amps = 0x7 << 8;
2099 hpriv->signal[idx].pre = 0x1 << 5;
2100 return;
2101 }
2102
2103 port_mmio = mv_port_base(mmio, idx);
2104 tmp = readl(port_mmio + PHY_MODE2);
2105
2106 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2107 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2108}
2109
2110static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2111{
2112 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2113}
2114
2115static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2116 unsigned int port)
2117{
2118 void __iomem *port_mmio = mv_port_base(mmio, port);
2119
2120 u32 hp_flags = hpriv->hp_flags;
2121 int fix_phy_mode2 =
2122 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2123 int fix_phy_mode4 =
2124 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2125 u32 m2, tmp;
2126
2127 if (fix_phy_mode2) {
2128 m2 = readl(port_mmio + PHY_MODE2);
2129 m2 &= ~(1 << 16);
2130 m2 |= (1 << 31);
2131 writel(m2, port_mmio + PHY_MODE2);
2132
2133 udelay(200);
2134
2135 m2 = readl(port_mmio + PHY_MODE2);
2136 m2 &= ~((1 << 16) | (1 << 31));
2137 writel(m2, port_mmio + PHY_MODE2);
2138
2139 udelay(200);
2140 }
2141
2142 /* who knows what this magic does */
2143 tmp = readl(port_mmio + PHY_MODE3);
2144 tmp &= ~0x7F800000;
2145 tmp |= 0x2A800000;
2146 writel(tmp, port_mmio + PHY_MODE3);
2147
2148 if (fix_phy_mode4) {
2149 u32 m4;
2150
2151 m4 = readl(port_mmio + PHY_MODE4);
2152
2153 if (hp_flags & MV_HP_ERRATA_60X1B2)
2154 tmp = readl(port_mmio + 0x310);
2155
2156 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2157
2158 writel(m4, port_mmio + PHY_MODE4);
2159
2160 if (hp_flags & MV_HP_ERRATA_60X1B2)
2161 writel(tmp, port_mmio + 0x310);
2162 }
2163
2164 /* Revert values of pre-emphasis and signal amps to the saved ones */
2165 m2 = readl(port_mmio + PHY_MODE2);
2166
2167 m2 &= ~MV_M2_PREAMP_MASK;
2168 m2 |= hpriv->signal[port].amps;
2169 m2 |= hpriv->signal[port].pre;
2170 m2 &= ~(1 << 16);
2171
2172 /* according to mvSata 3.6.1, some IIE values are fixed */
2173 if (IS_GEN_IIE(hpriv)) {
2174 m2 &= ~0xC30FF01F;
2175 m2 |= 0x0000900F;
2176 }
2177
2178 writel(m2, port_mmio + PHY_MODE2);
2179}
2180
2181static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2182 unsigned int port_no)
2183{
2184 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2185
2186 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2187
2188 if (IS_GEN_II(hpriv)) {
2189 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2190 ifctl |= (1 << 7); /* enable gen2i speed */
2191 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2192 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2193 }
2194
2195 udelay(25); /* allow reset propagation */
2196
2197 /* Spec never mentions clearing the bit. Marvell's driver does
2198 * clear the bit, however.
2199 */
2200 writelfl(0, port_mmio + EDMA_CMD_OFS);
2201
2202 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2203
2204 if (IS_GEN_I(hpriv))
2205 mdelay(1);
2206}
2207
2208/**
2209 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2210 * @ap: ATA channel to manipulate
2211 *
2212 * Part of this is taken from __sata_phy_reset and modified to
2213 * not sleep since this routine gets called from interrupt level.
2214 *
2215 * LOCKING:
2216 * Inherited from caller. This is coded to safe to call at
2217 * interrupt level, i.e. it does not sleep.
2218 */
2219static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2220 unsigned long deadline)
2221{
2222 struct mv_port_priv *pp = ap->private_data;
2223 struct mv_host_priv *hpriv = ap->host->private_data;
2224 void __iomem *port_mmio = mv_ap_base(ap);
2225 int retry = 5;
2226 u32 sstatus;
2227
2228 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2229
2230#ifdef DEBUG
2231 {
2232 u32 sstatus, serror, scontrol;
2233
2234 mv_scr_read(ap, SCR_STATUS, &sstatus);
2235 mv_scr_read(ap, SCR_ERROR, &serror);
2236 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2237 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2238 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2239 }
2240#endif
2241
2242 /* Issue COMRESET via SControl */
2243comreset_retry:
2244 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2245 msleep(1);
2246
2247 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2248 msleep(20);
2249
2250 do {
2251 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2252 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2253 break;
2254
2255 msleep(1);
2256 } while (time_before(jiffies, deadline));
2257
2258 /* work around errata */
2259 if (IS_GEN_II(hpriv) &&
2260 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2261 (retry-- > 0))
2262 goto comreset_retry;
2263
2264#ifdef DEBUG
2265 {
2266 u32 sstatus, serror, scontrol;
2267
2268 mv_scr_read(ap, SCR_STATUS, &sstatus);
2269 mv_scr_read(ap, SCR_ERROR, &serror);
2270 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2271 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2272 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2273 }
2274#endif
2275
2276 if (ata_link_offline(&ap->link)) {
2277 *class = ATA_DEV_NONE;
2278 return;
2279 }
2280
2281 /* even after SStatus reflects that device is ready,
2282 * it seems to take a while for link to be fully
2283 * established (and thus Status no longer 0x80/0x7F),
2284 * so we poll a bit for that, here.
2285 */
2286 retry = 20;
2287 while (1) {
2288 u8 drv_stat = ata_check_status(ap);
2289 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2290 break;
2291 msleep(500);
2292 if (retry-- <= 0)
2293 break;
2294 if (time_after(jiffies, deadline))
2295 break;
2296 }
2297
2298 /* FIXME: if we passed the deadline, the following
2299 * code probably produces an invalid result
2300 */
2301
2302 /* finally, read device signature from TF registers */
2303 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2304
2305 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2306
2307 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2308
2309 VPRINTK("EXIT\n");
2310}
2311
2312static int mv_prereset(struct ata_link *link, unsigned long deadline)
2313{
2314 struct ata_port *ap = link->ap;
2315 struct mv_port_priv *pp = ap->private_data;
2316 struct ata_eh_context *ehc = &link->eh_context;
2317 int rc;
2318
2319 rc = mv_stop_dma(ap);
2320 if (rc)
2321 ehc->i.action |= ATA_EH_HARDRESET;
2322
2323 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2324 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2325 ehc->i.action |= ATA_EH_HARDRESET;
2326 }
2327
2328 /* if we're about to do hardreset, nothing more to do */
2329 if (ehc->i.action & ATA_EH_HARDRESET)
2330 return 0;
2331
2332 if (ata_link_online(link))
2333 rc = ata_wait_ready(ap, deadline);
2334 else
2335 rc = -ENODEV;
2336
2337 return rc;
2338}
2339
2340static int mv_hardreset(struct ata_link *link, unsigned int *class,
2341 unsigned long deadline)
2342{
2343 struct ata_port *ap = link->ap;
2344 struct mv_host_priv *hpriv = ap->host->private_data;
2345 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2346
2347 mv_stop_dma(ap);
2348
2349 mv_channel_reset(hpriv, mmio, ap->port_no);
2350
2351 mv_phy_reset(ap, class, deadline);
2352
2353 return 0;
2354}
2355
2356static void mv_postreset(struct ata_link *link, unsigned int *classes)
2357{
2358 struct ata_port *ap = link->ap;
2359 u32 serr;
2360
2361 /* print link status */
2362 sata_print_link_status(link);
2363
2364 /* clear SError */
2365 sata_scr_read(link, SCR_ERROR, &serr);
2366 sata_scr_write_flush(link, SCR_ERROR, serr);
2367
2368 /* bail out if no device is present */
2369 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2370 DPRINTK("EXIT, no device\n");
2371 return;
2372 }
2373
2374 /* set up device control */
2375 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2376}
2377
2378static void mv_error_handler(struct ata_port *ap)
2379{
2380 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2381 mv_hardreset, mv_postreset);
2382}
2383
2384static void mv_eh_freeze(struct ata_port *ap)
2385{
2386 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2387 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2388 u32 tmp, mask;
2389 unsigned int shift;
2390
2391 /* FIXME: handle coalescing completion events properly */
2392
2393 shift = ap->port_no * 2;
2394 if (hc > 0)
2395 shift++;
2396
2397 mask = 0x3 << shift;
2398
2399 /* disable assertion of portN err, done events */
2400 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2401 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2402}
2403
2404static void mv_eh_thaw(struct ata_port *ap)
2405{
2406 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2407 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2408 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2409 void __iomem *port_mmio = mv_ap_base(ap);
2410 u32 tmp, mask, hc_irq_cause;
2411 unsigned int shift, hc_port_no = ap->port_no;
2412
2413 /* FIXME: handle coalescing completion events properly */
2414
2415 shift = ap->port_no * 2;
2416 if (hc > 0) {
2417 shift++;
2418 hc_port_no -= 4;
2419 }
2420
2421 mask = 0x3 << shift;
2422
2423 /* clear EDMA errors on this port */
2424 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2425
2426 /* clear pending irq events */
2427 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2428 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2429 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2430 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2431
2432 /* enable assertion of portN err, done events */
2433 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2434 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2435}
2436
2437/**
2438 * mv_port_init - Perform some early initialization on a single port.
2439 * @port: libata data structure storing shadow register addresses
2440 * @port_mmio: base address of the port
2441 *
2442 * Initialize shadow register mmio addresses, clear outstanding
2443 * interrupts on the port, and unmask interrupts for the future
2444 * start of the port.
2445 *
2446 * LOCKING:
2447 * Inherited from caller.
2448 */
2449static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2450{
2451 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2452 unsigned serr_ofs;
2453
2454 /* PIO related setup
2455 */
2456 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2457 port->error_addr =
2458 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2459 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2460 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2461 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2462 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2463 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2464 port->status_addr =
2465 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2466 /* special case: control/altstatus doesn't have ATA_REG_ address */
2467 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2468
2469 /* unused: */
2470 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2471
2472 /* Clear any currently outstanding port interrupt conditions */
2473 serr_ofs = mv_scr_offset(SCR_ERROR);
2474 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2475 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2476
2477 /* unmask all non-transient EDMA error interrupts */
2478 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2479
2480 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2481 readl(port_mmio + EDMA_CFG_OFS),
2482 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2483 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2484}
2485
2486static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2487{
2488 struct pci_dev *pdev = to_pci_dev(host->dev);
2489 struct mv_host_priv *hpriv = host->private_data;
2490 u32 hp_flags = hpriv->hp_flags;
2491
2492 switch (board_idx) {
2493 case chip_5080:
2494 hpriv->ops = &mv5xxx_ops;
2495 hp_flags |= MV_HP_GEN_I;
2496
2497 switch (pdev->revision) {
2498 case 0x1:
2499 hp_flags |= MV_HP_ERRATA_50XXB0;
2500 break;
2501 case 0x3:
2502 hp_flags |= MV_HP_ERRATA_50XXB2;
2503 break;
2504 default:
2505 dev_printk(KERN_WARNING, &pdev->dev,
2506 "Applying 50XXB2 workarounds to unknown rev\n");
2507 hp_flags |= MV_HP_ERRATA_50XXB2;
2508 break;
2509 }
2510 break;
2511
2512 case chip_504x:
2513 case chip_508x:
2514 hpriv->ops = &mv5xxx_ops;
2515 hp_flags |= MV_HP_GEN_I;
2516
2517 switch (pdev->revision) {
2518 case 0x0:
2519 hp_flags |= MV_HP_ERRATA_50XXB0;
2520 break;
2521 case 0x3:
2522 hp_flags |= MV_HP_ERRATA_50XXB2;
2523 break;
2524 default:
2525 dev_printk(KERN_WARNING, &pdev->dev,
2526 "Applying B2 workarounds to unknown rev\n");
2527 hp_flags |= MV_HP_ERRATA_50XXB2;
2528 break;
2529 }
2530 break;
2531
2532 case chip_604x:
2533 case chip_608x:
2534 hpriv->ops = &mv6xxx_ops;
2535 hp_flags |= MV_HP_GEN_II;
2536
2537 switch (pdev->revision) {
2538 case 0x7:
2539 hp_flags |= MV_HP_ERRATA_60X1B2;
2540 break;
2541 case 0x9:
2542 hp_flags |= MV_HP_ERRATA_60X1C0;
2543 break;
2544 default:
2545 dev_printk(KERN_WARNING, &pdev->dev,
2546 "Applying B2 workarounds to unknown rev\n");
2547 hp_flags |= MV_HP_ERRATA_60X1B2;
2548 break;
2549 }
2550 break;
2551
2552 case chip_7042:
2553 hp_flags |= MV_HP_PCIE;
2554 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2555 (pdev->device == 0x2300 || pdev->device == 0x2310))
2556 {
2557 /*
2558 * Highpoint RocketRAID PCIe 23xx series cards:
2559 *
2560 * Unconfigured drives are treated as "Legacy"
2561 * by the BIOS, and it overwrites sector 8 with
2562 * a "Lgcy" metadata block prior to Linux boot.
2563 *
2564 * Configured drives (RAID or JBOD) leave sector 8
2565 * alone, but instead overwrite a high numbered
2566 * sector for the RAID metadata. This sector can
2567 * be determined exactly, by truncating the physical
2568 * drive capacity to a nice even GB value.
2569 *
2570 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2571 *
2572 * Warn the user, lest they think we're just buggy.
2573 */
2574 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2575 " BIOS CORRUPTS DATA on all attached drives,"
2576 " regardless of if/how they are configured."
2577 " BEWARE!\n");
2578 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2579 " use sectors 8-9 on \"Legacy\" drives,"
2580 " and avoid the final two gigabytes on"
2581 " all RocketRAID BIOS initialized drives.\n");
2582 }
2583 case chip_6042:
2584 hpriv->ops = &mv6xxx_ops;
2585 hp_flags |= MV_HP_GEN_IIE;
2586
2587 switch (pdev->revision) {
2588 case 0x0:
2589 hp_flags |= MV_HP_ERRATA_XX42A0;
2590 break;
2591 case 0x1:
2592 hp_flags |= MV_HP_ERRATA_60X1C0;
2593 break;
2594 default:
2595 dev_printk(KERN_WARNING, &pdev->dev,
2596 "Applying 60X1C0 workarounds to unknown rev\n");
2597 hp_flags |= MV_HP_ERRATA_60X1C0;
2598 break;
2599 }
2600 break;
2601
2602 default:
2603 dev_printk(KERN_ERR, &pdev->dev,
2604 "BUG: invalid board index %u\n", board_idx);
2605 return 1;
2606 }
2607
2608 hpriv->hp_flags = hp_flags;
2609 if (hp_flags & MV_HP_PCIE) {
2610 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2611 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2612 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2613 } else {
2614 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2615 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2616 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2617 }
2618
2619 return 0;
2620}
2621
2622/**
2623 * mv_init_host - Perform some early initialization of the host.
2624 * @host: ATA host to initialize
2625 * @board_idx: controller index
2626 *
2627 * If possible, do an early global reset of the host. Then do
2628 * our port init and clear/unmask all/relevant host interrupts.
2629 *
2630 * LOCKING:
2631 * Inherited from caller.
2632 */
2633static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2634{
2635 int rc = 0, n_hc, port, hc;
2636 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2637 struct mv_host_priv *hpriv = host->private_data;
2638
2639 /* global interrupt mask */
2640 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2641
2642 rc = mv_chip_id(host, board_idx);
2643 if (rc)
2644 goto done;
2645
2646 n_hc = mv_get_hc_count(host->ports[0]->flags);
2647
2648 for (port = 0; port < host->n_ports; port++)
2649 hpriv->ops->read_preamp(hpriv, port, mmio);
2650
2651 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2652 if (rc)
2653 goto done;
2654
2655 hpriv->ops->reset_flash(hpriv, mmio);
2656 hpriv->ops->reset_bus(host, mmio);
2657 hpriv->ops->enable_leds(hpriv, mmio);
2658
2659 for (port = 0; port < host->n_ports; port++) {
2660 if (IS_GEN_II(hpriv)) {
2661 void __iomem *port_mmio = mv_port_base(mmio, port);
2662
2663 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2664 ifctl |= (1 << 7); /* enable gen2i speed */
2665 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2666 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2667 }
2668
2669 hpriv->ops->phy_errata(hpriv, mmio, port);
2670 }
2671
2672 for (port = 0; port < host->n_ports; port++) {
2673 struct ata_port *ap = host->ports[port];
2674 void __iomem *port_mmio = mv_port_base(mmio, port);
2675 unsigned int offset = port_mmio - mmio;
2676
2677 mv_port_init(&ap->ioaddr, port_mmio);
2678
2679#ifdef CONFIG_PCI
2680 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2681 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2682#endif
2683 }
2684
2685 for (hc = 0; hc < n_hc; hc++) {
2686 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2687
2688 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2689 "(before clear)=0x%08x\n", hc,
2690 readl(hc_mmio + HC_CFG_OFS),
2691 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2692
2693 /* Clear any currently outstanding hc interrupt conditions */
2694 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2695 }
2696
2697 /* Clear any currently outstanding host interrupt conditions */
2698 writelfl(0, mmio + hpriv->irq_cause_ofs);
2699
2700 /* and unmask interrupt generation for host regs */
2701 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2702
2703 if (IS_GEN_I(hpriv))
2704 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2705 else
2706 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2707
2708 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2709 "PCI int cause/mask=0x%08x/0x%08x\n",
2710 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2711 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2712 readl(mmio + hpriv->irq_cause_ofs),
2713 readl(mmio + hpriv->irq_mask_ofs));
2714
2715done:
2716 return rc;
2717}
2718
2719#ifdef CONFIG_PCI
2720static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
2721
2722static struct pci_driver mv_pci_driver = {
2723 .name = DRV_NAME,
2724 .id_table = mv_pci_tbl,
2725 .probe = mv_init_one,
2726 .remove = ata_pci_remove_one,
2727};
2728
2729/*
2730 * module options
2731 */
2732static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2733
2734
2735/* move to PCI layer or libata core? */
2736static int pci_go_64(struct pci_dev *pdev)
2737{
2738 int rc;
2739
2740 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2741 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2742 if (rc) {
2743 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2744 if (rc) {
2745 dev_printk(KERN_ERR, &pdev->dev,
2746 "64-bit DMA enable failed\n");
2747 return rc;
2748 }
2749 }
2750 } else {
2751 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2752 if (rc) {
2753 dev_printk(KERN_ERR, &pdev->dev,
2754 "32-bit DMA enable failed\n");
2755 return rc;
2756 }
2757 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2758 if (rc) {
2759 dev_printk(KERN_ERR, &pdev->dev,
2760 "32-bit consistent DMA enable failed\n");
2761 return rc;
2762 }
2763 }
2764
2765 return rc;
2766}
2767
2768/**
2769 * mv_print_info - Dump key info to kernel log for perusal.
2770 * @host: ATA host to print info about
2771 *
2772 * FIXME: complete this.
2773 *
2774 * LOCKING:
2775 * Inherited from caller.
2776 */
2777static void mv_print_info(struct ata_host *host)
2778{
2779 struct pci_dev *pdev = to_pci_dev(host->dev);
2780 struct mv_host_priv *hpriv = host->private_data;
2781 u8 scc;
2782 const char *scc_s, *gen;
2783
2784 /* Use this to determine the HW stepping of the chip so we know
2785 * what errata to workaround
2786 */
2787 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2788 if (scc == 0)
2789 scc_s = "SCSI";
2790 else if (scc == 0x01)
2791 scc_s = "RAID";
2792 else
2793 scc_s = "?";
2794
2795 if (IS_GEN_I(hpriv))
2796 gen = "I";
2797 else if (IS_GEN_II(hpriv))
2798 gen = "II";
2799 else if (IS_GEN_IIE(hpriv))
2800 gen = "IIE";
2801 else
2802 gen = "?";
2803
2804 dev_printk(KERN_INFO, &pdev->dev,
2805 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2806 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2807 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2808}
2809
2810static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2811{
2812 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2813 MV_CRQB_Q_SZ, 0);
2814 if (!hpriv->crqb_pool)
2815 return -ENOMEM;
2816
2817 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2818 MV_CRPB_Q_SZ, 0);
2819 if (!hpriv->crpb_pool)
2820 return -ENOMEM;
2821
2822 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2823 MV_SG_TBL_SZ, 0);
2824 if (!hpriv->sg_tbl_pool)
2825 return -ENOMEM;
2826
2827 return 0;
2828}
2829
2830/**
2831 * mv_init_one - handle a positive probe of a Marvell host
2832 * @pdev: PCI device found
2833 * @ent: PCI device ID entry for the matched host
2834 *
2835 * LOCKING:
2836 * Inherited from caller.
2837 */
2838static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2839{
2840 static int printed_version;
2841 unsigned int board_idx = (unsigned int)ent->driver_data;
2842 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2843 struct ata_host *host;
2844 struct mv_host_priv *hpriv;
2845 int n_ports, rc;
2846
2847 if (!printed_version++)
2848 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2849
2850 /* allocate host */
2851 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2852
2853 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2854 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2855 if (!host || !hpriv)
2856 return -ENOMEM;
2857 host->private_data = hpriv;
2858
2859 /* acquire resources */
2860 rc = pcim_enable_device(pdev);
2861 if (rc)
2862 return rc;
2863
2864 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2865 if (rc == -EBUSY)
2866 pcim_pin_device(pdev);
2867 if (rc)
2868 return rc;
2869 host->iomap = pcim_iomap_table(pdev);
2870
2871 rc = pci_go_64(pdev);
2872 if (rc)
2873 return rc;
2874
2875 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2876 if (rc)
2877 return rc;
2878
2879 /* initialize adapter */
2880 rc = mv_init_host(host, board_idx);
2881 if (rc)
2882 return rc;
2883
2884 /* Enable interrupts */
2885 if (msi && pci_enable_msi(pdev))
2886 pci_intx(pdev, 1);
2887
2888 mv_dump_pci_cfg(pdev, 0x68);
2889 mv_print_info(host);
2890
2891 pci_set_master(pdev);
2892 pci_try_set_mwi(pdev);
2893 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2894 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2895}
2896#endif
2897
2898static int __init mv_init(void)
2899{
2900 int rc = -ENODEV;
2901#ifdef CONFIG_PCI
2902 rc = pci_register_driver(&mv_pci_driver);
2903#endif
2904 return rc;
2905}
2906
2907static void __exit mv_exit(void)
2908{
2909#ifdef CONFIG_PCI
2910 pci_unregister_driver(&mv_pci_driver);
2911#endif
2912}
2913
2914MODULE_AUTHOR("Brett Russ");
2915MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2916MODULE_LICENSE("GPL");
2917MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2918MODULE_VERSION(DRV_VERSION);
2919
2920#ifdef CONFIG_PCI
2921module_param(msi, int, 0444);
2922MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2923#endif
2924
2925module_init(mv_init);
2926module_exit(mv_exit);