Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SCSI] mvsas: split driver into multiple files

Split mvsas driver into multiple source codes, based on the split
and function distribution found in Marvell's mvsas update.

Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>

authored by

Jeff Garzik and committed by
James Bottomley
dd4969a8 2ad52f47

+2278 -2117
+3 -2
drivers/scsi/mvsas/Makefile
··· 22 22 # USA 23 23 24 24 obj-$(CONFIG_SCSI_MVSAS) += mvsas.o 25 - mvsas-y += mv_sas.o 26 - 25 + mvsas-y += mv_init.o \ 26 + mv_sas.o \ 27 + mv_64xx.o
+184
drivers/scsi/mvsas/mv_64xx.c
··· 1 + /* 2 + mv_64xx.c - Marvell 88SE6440 SAS/SATA support 3 + 4 + Copyright 2007 Red Hat, Inc. 5 + Copyright 2008 Marvell. <kewei@marvell.com> 6 + 7 + This program is free software; you can redistribute it and/or 8 + modify it under the terms of the GNU General Public License as 9 + published by the Free Software Foundation; either version 2, 10 + or (at your option) any later version. 11 + 12 + This program is distributed in the hope that it will be useful, 13 + but WITHOUT ANY WARRANTY; without even the implied warranty 14 + of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 15 + See the GNU General Public License for more details. 16 + 17 + You should have received a copy of the GNU General Public 18 + License along with this program; see the file COPYING. If not, 19 + write to the Free Software Foundation, 675 Mass Ave, Cambridge, 20 + MA 02139, USA. 21 + 22 + */ 23 + 24 + #include "mv_sas.h" 25 + #include "mv_64xx.h" 26 + #include "mv_chips.h" 27 + 28 + void mvs_detect_porttype(struct mvs_info *mvi, int i) 29 + { 30 + void __iomem *regs = mvi->regs; 31 + u32 reg; 32 + struct mvs_phy *phy = &mvi->phy[i]; 33 + 34 + /* TODO check & save device type */ 35 + reg = mr32(GBL_PORT_TYPE); 36 + 37 + if (reg & MODE_SAS_SATA & (1 << i)) 38 + phy->phy_type |= PORT_TYPE_SAS; 39 + else 40 + phy->phy_type |= PORT_TYPE_SATA; 41 + } 42 + 43 + void mvs_enable_xmt(struct mvs_info *mvi, int PhyId) 44 + { 45 + void __iomem *regs = mvi->regs; 46 + u32 tmp; 47 + 48 + tmp = mr32(PCS); 49 + if (mvi->chip->n_phy <= 4) 50 + tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT); 51 + else 52 + tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2); 53 + mw32(PCS, tmp); 54 + } 55 + 56 + void __devinit mvs_phy_hacks(struct mvs_info *mvi) 57 + { 58 + void __iomem *regs = mvi->regs; 59 + u32 tmp; 60 + 61 + /* workaround for SATA R-ERR, to ignore phy glitch */ 62 + tmp = mvs_cr32(regs, CMD_PHY_TIMER); 63 + tmp &= ~(1 << 9); 64 + tmp |= (1 << 10); 65 + mvs_cw32(regs, CMD_PHY_TIMER, tmp); 66 + 67 + /* enable retry 127 times */ 68 + mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f); 69 + 70 + /* extend open frame timeout to max */ 71 + tmp = mvs_cr32(regs, CMD_SAS_CTL0); 72 + tmp &= ~0xffff; 73 + tmp |= 0x3fff; 74 + mvs_cw32(regs, CMD_SAS_CTL0, tmp); 75 + 76 + /* workaround for WDTIMEOUT , set to 550 ms */ 77 + mvs_cw32(regs, CMD_WD_TIMER, 0x86470); 78 + 79 + /* not to halt for different port op during wideport link change */ 80 + mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); 81 + 82 + /* workaround for Seagate disk not-found OOB sequence, recv 83 + * COMINIT before sending out COMWAKE */ 84 + tmp = mvs_cr32(regs, CMD_PHY_MODE_21); 85 + tmp &= 0x0000ffff; 86 + tmp |= 0x00fa0000; 87 + mvs_cw32(regs, CMD_PHY_MODE_21, tmp); 88 + 89 + tmp = mvs_cr32(regs, CMD_PHY_TIMER); 90 + tmp &= 0x1fffffff; 91 + tmp |= (2U << 29); /* 8 ms retry */ 92 + mvs_cw32(regs, CMD_PHY_TIMER, tmp); 93 + 94 + /* TEST - for phy decoding error, adjust voltage levels */ 95 + mw32(P0_VSR_ADDR + 0, 0x8); 96 + mw32(P0_VSR_DATA + 0, 0x2F0); 97 + 98 + mw32(P0_VSR_ADDR + 8, 0x8); 99 + mw32(P0_VSR_DATA + 8, 0x2F0); 100 + 101 + mw32(P0_VSR_ADDR + 16, 0x8); 102 + mw32(P0_VSR_DATA + 16, 0x2F0); 103 + 104 + mw32(P0_VSR_ADDR + 24, 0x8); 105 + mw32(P0_VSR_DATA + 24, 0x2F0); 106 + 107 + } 108 + 109 + void mvs_hba_interrupt_enable(struct mvs_info *mvi) 110 + { 111 + void __iomem *regs = mvi->regs; 112 + u32 tmp; 113 + 114 + tmp = mr32(GBL_CTL); 115 + 116 + mw32(GBL_CTL, tmp | INT_EN); 117 + } 118 + 119 + void mvs_hba_interrupt_disable(struct mvs_info *mvi) 120 + { 121 + void __iomem *regs = mvi->regs; 122 + u32 tmp; 123 + 124 + tmp = mr32(GBL_CTL); 125 + 126 + mw32(GBL_CTL, tmp & ~INT_EN); 127 + } 128 + 129 + void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port) 130 + { 131 + void __iomem *regs = mvi->regs; 132 + u32 tmp, offs; 133 + u8 *tfs = &port->taskfileset; 134 + 135 + if (*tfs == MVS_ID_NOT_MAPPED) 136 + return; 137 + 138 + offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); 139 + if (*tfs < 16) { 140 + tmp = mr32(PCS); 141 + mw32(PCS, tmp & ~offs); 142 + } else { 143 + tmp = mr32(CTL); 144 + mw32(CTL, tmp & ~offs); 145 + } 146 + 147 + tmp = mr32(INT_STAT_SRS) & (1U << *tfs); 148 + if (tmp) 149 + mw32(INT_STAT_SRS, tmp); 150 + 151 + *tfs = MVS_ID_NOT_MAPPED; 152 + } 153 + 154 + u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port) 155 + { 156 + int i; 157 + u32 tmp, offs; 158 + void __iomem *regs = mvi->regs; 159 + 160 + if (port->taskfileset != MVS_ID_NOT_MAPPED) 161 + return 0; 162 + 163 + tmp = mr32(PCS); 164 + 165 + for (i = 0; i < mvi->chip->srs_sz; i++) { 166 + if (i == 16) 167 + tmp = mr32(CTL); 168 + offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); 169 + if (!(tmp & offs)) { 170 + port->taskfileset = i; 171 + 172 + if (i < 16) 173 + mw32(PCS, tmp | offs); 174 + else 175 + mw32(CTL, tmp | offs); 176 + tmp = mr32(INT_STAT_SRS) & (1U << i); 177 + if (tmp) 178 + mw32(INT_STAT_SRS, tmp); 179 + return 0; 180 + } 181 + } 182 + return MVS_ID_NOT_MAPPED; 183 + } 184 +
+92
drivers/scsi/mvsas/mv_64xx.h
··· 1 + #ifndef _MVS64XX_REG_H_ 2 + #define _MVS64XX_REG_H_ 3 + 4 + /* enhanced mode registers (BAR4) */ 5 + enum hw_registers { 6 + MVS_GBL_CTL = 0x04, /* global control */ 7 + MVS_GBL_INT_STAT = 0x08, /* global irq status */ 8 + MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ 9 + MVS_GBL_PORT_TYPE = 0xa0, /* port type */ 10 + 11 + MVS_CTL = 0x100, /* SAS/SATA port configuration */ 12 + MVS_PCS = 0x104, /* SAS/SATA port control/status */ 13 + MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ 14 + MVS_CMD_LIST_HI = 0x10C, 15 + MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ 16 + MVS_RX_FIS_HI = 0x114, 17 + 18 + MVS_TX_CFG = 0x120, /* TX configuration */ 19 + MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ 20 + MVS_TX_HI = 0x128, 21 + 22 + MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ 23 + MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ 24 + MVS_RX_CFG = 0x134, /* RX configuration */ 25 + MVS_RX_LO = 0x138, /* RX (completion) ring addr */ 26 + MVS_RX_HI = 0x13C, 27 + MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ 28 + 29 + MVS_INT_COAL = 0x148, /* Int coalescing config */ 30 + MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ 31 + MVS_INT_STAT = 0x150, /* Central int status */ 32 + MVS_INT_MASK = 0x154, /* Central int enable */ 33 + MVS_INT_STAT_SRS = 0x158, /* SATA register set status */ 34 + MVS_INT_MASK_SRS = 0x15C, 35 + 36 + /* ports 1-3 follow after this */ 37 + MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ 38 + MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ 39 + MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */ 40 + MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */ 41 + 42 + /* ports 1-3 follow after this */ 43 + MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ 44 + MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ 45 + 46 + MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ 47 + MVS_CMD_DATA = 0x1BC, /* Command register port (data) */ 48 + 49 + /* ports 1-3 follow after this */ 50 + MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ 51 + MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ 52 + MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */ 53 + MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */ 54 + 55 + /* ports 1-3 follow after this */ 56 + MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ 57 + MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ 58 + MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */ 59 + MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */ 60 + }; 61 + 62 + enum pci_cfg_registers { 63 + PCR_PHY_CTL = 0x40, 64 + PCR_PHY_CTL2 = 0x90, 65 + PCR_DEV_CTRL = 0xE8, 66 + }; 67 + 68 + /* SAS/SATA Vendor Specific Port Registers */ 69 + enum sas_sata_vsp_regs { 70 + VSR_PHY_STAT = 0x00, /* Phy Status */ 71 + VSR_PHY_MODE1 = 0x01, /* phy tx */ 72 + VSR_PHY_MODE2 = 0x02, /* tx scc */ 73 + VSR_PHY_MODE3 = 0x03, /* pll */ 74 + VSR_PHY_MODE4 = 0x04, /* VCO */ 75 + VSR_PHY_MODE5 = 0x05, /* Rx */ 76 + VSR_PHY_MODE6 = 0x06, /* CDR */ 77 + VSR_PHY_MODE7 = 0x07, /* Impedance */ 78 + VSR_PHY_MODE8 = 0x08, /* Voltage */ 79 + VSR_PHY_MODE9 = 0x09, /* Test */ 80 + VSR_PHY_MODE10 = 0x0A, /* Power */ 81 + VSR_PHY_MODE11 = 0x0B, /* Phy Mode */ 82 + VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */ 83 + VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ 84 + }; 85 + 86 + struct mvs_prd { 87 + __le64 addr; /* 64-bit buffer address */ 88 + __le32 reserved; 89 + __le32 len; /* 16-bit length */ 90 + }; 91 + 92 + #endif
+118
drivers/scsi/mvsas/mv_chips.h
··· 1 + #ifndef _MV_CHIPS_H_ 2 + #define _MV_CHIPS_H_ 3 + 4 + #define mr32(reg) readl(regs + MVS_##reg) 5 + #define mw32(reg,val) writel((val), regs + MVS_##reg) 6 + #define mw32_f(reg,val) do { \ 7 + writel((val), regs + MVS_##reg); \ 8 + readl(regs + MVS_##reg); \ 9 + } while (0) 10 + 11 + static inline u32 mvs_cr32(void __iomem *regs, u32 addr) 12 + { 13 + mw32(CMD_ADDR, addr); 14 + return mr32(CMD_DATA); 15 + } 16 + 17 + static inline void mvs_cw32(void __iomem *regs, u32 addr, u32 val) 18 + { 19 + mw32(CMD_ADDR, addr); 20 + mw32(CMD_DATA, val); 21 + } 22 + 23 + static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) 24 + { 25 + void __iomem *regs = mvi->regs; 26 + return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4): 27 + mr32(P4_SER_CTLSTAT + (port - 4) * 4); 28 + } 29 + 30 + static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) 31 + { 32 + void __iomem *regs = mvi->regs; 33 + if (port < 4) 34 + mw32(P0_SER_CTLSTAT + port * 4, val); 35 + else 36 + mw32(P4_SER_CTLSTAT + (port - 4) * 4, val); 37 + } 38 + 39 + static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port) 40 + { 41 + void __iomem *regs = mvi->regs + off; 42 + void __iomem *regs2 = mvi->regs + off2; 43 + return (port < 4)?readl(regs + port * 8): 44 + readl(regs2 + (port - 4) * 8); 45 + } 46 + 47 + static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2, 48 + u32 port, u32 val) 49 + { 50 + void __iomem *regs = mvi->regs + off; 51 + void __iomem *regs2 = mvi->regs + off2; 52 + if (port < 4) 53 + writel(val, regs + port * 8); 54 + else 55 + writel(val, regs2 + (port - 4) * 8); 56 + } 57 + 58 + static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port) 59 + { 60 + return mvs_read_port(mvi, MVS_P0_CFG_DATA, 61 + MVS_P4_CFG_DATA, port); 62 + } 63 + 64 + static inline void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val) 65 + { 66 + mvs_write_port(mvi, MVS_P0_CFG_DATA, 67 + MVS_P4_CFG_DATA, port, val); 68 + } 69 + 70 + static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr) 71 + { 72 + mvs_write_port(mvi, MVS_P0_CFG_ADDR, 73 + MVS_P4_CFG_ADDR, port, addr); 74 + } 75 + 76 + static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) 77 + { 78 + return mvs_read_port(mvi, MVS_P0_VSR_DATA, 79 + MVS_P4_VSR_DATA, port); 80 + } 81 + 82 + static inline void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val) 83 + { 84 + mvs_write_port(mvi, MVS_P0_VSR_DATA, 85 + MVS_P4_VSR_DATA, port, val); 86 + } 87 + 88 + static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr) 89 + { 90 + mvs_write_port(mvi, MVS_P0_VSR_ADDR, 91 + MVS_P4_VSR_ADDR, port, addr); 92 + } 93 + 94 + static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) 95 + { 96 + return mvs_read_port(mvi, MVS_P0_INT_STAT, 97 + MVS_P4_INT_STAT, port); 98 + } 99 + 100 + static inline void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val) 101 + { 102 + mvs_write_port(mvi, MVS_P0_INT_STAT, 103 + MVS_P4_INT_STAT, port, val); 104 + } 105 + 106 + static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port) 107 + { 108 + return mvs_read_port(mvi, MVS_P0_INT_MASK, 109 + MVS_P4_INT_MASK, port); 110 + } 111 + 112 + static inline void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val) 113 + { 114 + mvs_write_port(mvi, MVS_P0_INT_MASK, 115 + MVS_P4_INT_MASK, port, val); 116 + } 117 + 118 + #endif
+441
drivers/scsi/mvsas/mv_defs.h
··· 1 + /* 2 + mv_defs.h - Marvell 88SE6440 SAS/SATA support 3 + 4 + Copyright 2007 Red Hat, Inc. 5 + Copyright 2008 Marvell. <kewei@marvell.com> 6 + 7 + This program is free software; you can redistribute it and/or 8 + modify it under the terms of the GNU General Public License as 9 + published by the Free Software Foundation; either version 2, 10 + or (at your option) any later version. 11 + 12 + This program is distributed in the hope that it will be useful, 13 + but WITHOUT ANY WARRANTY; without even the implied warranty 14 + of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 15 + See the GNU General Public License for more details. 16 + 17 + You should have received a copy of the GNU General Public 18 + License along with this program; see the file COPYING. If not, 19 + write to the Free Software Foundation, 675 Mass Ave, Cambridge, 20 + MA 02139, USA. 21 + 22 + */ 23 + 24 + #ifndef _MV_DEFS_H_ 25 + #define _MV_DEFS_H_ 26 + 27 + /* driver compile-time configuration */ 28 + enum driver_configuration { 29 + MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ 30 + MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ 31 + /* software requires power-of-2 32 + ring size */ 33 + 34 + MVS_SLOTS = 512, /* command slots */ 35 + MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ 36 + MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ 37 + MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ 38 + MVS_OAF_SZ = 64, /* Open address frame buffer size */ 39 + 40 + MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */ 41 + 42 + MVS_QUEUE_SIZE = 30, /* Support Queue depth */ 43 + MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */ 44 + }; 45 + 46 + /* unchangeable hardware details */ 47 + enum hardware_details { 48 + MVS_MAX_PHYS = 8, /* max. possible phys */ 49 + MVS_MAX_PORTS = 8, /* max. possible ports */ 50 + MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100), 51 + }; 52 + 53 + /* peripheral registers (BAR2) */ 54 + enum peripheral_registers { 55 + SPI_CTL = 0x10, /* EEPROM control */ 56 + SPI_CMD = 0x14, /* EEPROM command */ 57 + SPI_DATA = 0x18, /* EEPROM data */ 58 + }; 59 + 60 + enum peripheral_register_bits { 61 + TWSI_RDY = (1U << 7), /* EEPROM interface ready */ 62 + TWSI_RD = (1U << 4), /* EEPROM read access */ 63 + 64 + SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */ 65 + }; 66 + 67 + enum hw_register_bits { 68 + /* MVS_GBL_CTL */ 69 + INT_EN = (1U << 1), /* Global int enable */ 70 + HBA_RST = (1U << 0), /* HBA reset */ 71 + 72 + /* MVS_GBL_INT_STAT */ 73 + INT_XOR = (1U << 4), /* XOR engine event */ 74 + INT_SAS_SATA = (1U << 0), /* SAS/SATA event */ 75 + 76 + /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */ 77 + SATA_TARGET = (1U << 16), /* port0 SATA target enable */ 78 + MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */ 79 + MODE_AUTO_DET_PORT6 = (1U << 14), 80 + MODE_AUTO_DET_PORT5 = (1U << 13), 81 + MODE_AUTO_DET_PORT4 = (1U << 12), 82 + MODE_AUTO_DET_PORT3 = (1U << 11), 83 + MODE_AUTO_DET_PORT2 = (1U << 10), 84 + MODE_AUTO_DET_PORT1 = (1U << 9), 85 + MODE_AUTO_DET_PORT0 = (1U << 8), 86 + MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 | 87 + MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 | 88 + MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 | 89 + MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7, 90 + MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */ 91 + MODE_SAS_PORT6_MASK = (1U << 6), 92 + MODE_SAS_PORT5_MASK = (1U << 5), 93 + MODE_SAS_PORT4_MASK = (1U << 4), 94 + MODE_SAS_PORT3_MASK = (1U << 3), 95 + MODE_SAS_PORT2_MASK = (1U << 2), 96 + MODE_SAS_PORT1_MASK = (1U << 1), 97 + MODE_SAS_PORT0_MASK = (1U << 0), 98 + MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK | 99 + MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK | 100 + MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK | 101 + MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK, 102 + 103 + /* SAS_MODE value may be 104 + * dictated (in hw) by values 105 + * of SATA_TARGET & AUTO_DET 106 + */ 107 + 108 + /* MVS_TX_CFG */ 109 + TX_EN = (1U << 16), /* Enable TX */ 110 + TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */ 111 + 112 + /* MVS_RX_CFG */ 113 + RX_EN = (1U << 16), /* Enable RX */ 114 + RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */ 115 + 116 + /* MVS_INT_COAL */ 117 + COAL_EN = (1U << 16), /* Enable int coalescing */ 118 + 119 + /* MVS_INT_STAT, MVS_INT_MASK */ 120 + CINT_I2C = (1U << 31), /* I2C event */ 121 + CINT_SW0 = (1U << 30), /* software event 0 */ 122 + CINT_SW1 = (1U << 29), /* software event 1 */ 123 + CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */ 124 + CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ 125 + CINT_MEM = (1U << 26), /* int mem parity err */ 126 + CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ 127 + CINT_SRS = (1U << 3), /* SRS event */ 128 + CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ 129 + CINT_DONE = (1U << 0), /* cmd completion */ 130 + 131 + /* shl for ports 1-3 */ 132 + CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */ 133 + CINT_PORT = (1U << 8), /* port0 event */ 134 + CINT_PORT_MASK_OFFSET = 8, 135 + CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), 136 + 137 + /* TX (delivery) ring bits */ 138 + TXQ_CMD_SHIFT = 29, 139 + TXQ_CMD_SSP = 1, /* SSP protocol */ 140 + TXQ_CMD_SMP = 2, /* SMP protocol */ 141 + TXQ_CMD_STP = 3, /* STP/SATA protocol */ 142 + TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ 143 + TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ 144 + TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ 145 + TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ 146 + TXQ_SRS_SHIFT = 20, /* SATA register set */ 147 + TXQ_SRS_MASK = 0x7f, 148 + TXQ_PHY_SHIFT = 12, /* PHY bitmap */ 149 + TXQ_PHY_MASK = 0xff, 150 + TXQ_SLOT_MASK = 0xfff, /* slot number */ 151 + 152 + /* RX (completion) ring bits */ 153 + RXQ_GOOD = (1U << 23), /* Response good */ 154 + RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */ 155 + RXQ_CMD_RX = (1U << 20), /* target cmd received */ 156 + RXQ_ATTN = (1U << 19), /* attention */ 157 + RXQ_RSP = (1U << 18), /* response frame xfer'd */ 158 + RXQ_ERR = (1U << 17), /* err info rec xfer'd */ 159 + RXQ_DONE = (1U << 16), /* cmd complete */ 160 + RXQ_SLOT_MASK = 0xfff, /* slot number */ 161 + 162 + /* mvs_cmd_hdr bits */ 163 + MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */ 164 + MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */ 165 + 166 + /* SSP initiator only */ 167 + MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */ 168 + 169 + /* SSP initiator or target */ 170 + MCH_SSP_FR_TASK = 0x1, /* TASK frame */ 171 + 172 + /* SSP target only */ 173 + MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */ 174 + MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */ 175 + MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ 176 + MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ 177 + 178 + MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ 179 + MCH_FBURST = (1U << 11), /* first burst (SSP) */ 180 + MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ 181 + MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */ 182 + MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */ 183 + MCH_RESET = (1U << 7), /* Reset (STP/SATA) */ 184 + MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */ 185 + MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */ 186 + MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */ 187 + MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/ 188 + 189 + CCTL_RST = (1U << 5), /* port logic reset */ 190 + 191 + /* 0(LSB first), 1(MSB first) */ 192 + CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */ 193 + CCTL_ENDIAN_RSP = (1U << 2), /* response frame */ 194 + CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */ 195 + CCTL_ENDIAN_CMD = (1U << 0), /* command table */ 196 + 197 + /* MVS_Px_SER_CTLSTAT (per-phy control) */ 198 + PHY_SSP_RST = (1U << 3), /* reset SSP link layer */ 199 + PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ 200 + PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ 201 + PHY_RST = (1U << 0), /* phy reset */ 202 + PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), 203 + PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), 204 + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), 205 + PHY_NEG_SPP_PHYS_LINK_RATE_MASK = 206 + (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), 207 + PHY_READY_MASK = (1U << 20), 208 + 209 + /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ 210 + PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ 211 + PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ 212 + PHYEV_AN = (1U << 18), /* SATA async notification */ 213 + PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ 214 + PHYEV_SIG_FIS = (1U << 16), /* signature FIS */ 215 + PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */ 216 + PHYEV_IU_BIG = (1U << 11), /* IU too long err */ 217 + PHYEV_IU_SMALL = (1U << 10), /* IU too short err */ 218 + PHYEV_UNK_TAG = (1U << 9), /* unknown tag */ 219 + PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */ 220 + PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */ 221 + PHYEV_PORT_SEL = (1U << 6), /* port selector present */ 222 + PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */ 223 + PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */ 224 + PHYEV_ID_FAIL = (1U << 3), /* identify failed */ 225 + PHYEV_ID_DONE = (1U << 2), /* identify done */ 226 + PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */ 227 + PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */ 228 + 229 + /* MVS_PCS */ 230 + PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ 231 + PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ 232 + PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */ 233 + PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ 234 + PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ 235 + PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ 236 + PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ 237 + PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ 238 + PCS_CMD_RST = (1U << 1), /* reset cmd issue */ 239 + PCS_CMD_EN = (1U << 0), /* enable cmd issue */ 240 + 241 + /* Port n Attached Device Info */ 242 + PORT_DEV_SSP_TRGT = (1U << 19), 243 + PORT_DEV_SMP_TRGT = (1U << 18), 244 + PORT_DEV_STP_TRGT = (1U << 17), 245 + PORT_DEV_SSP_INIT = (1U << 11), 246 + PORT_DEV_SMP_INIT = (1U << 10), 247 + PORT_DEV_STP_INIT = (1U << 9), 248 + PORT_PHY_ID_MASK = (0xFFU << 24), 249 + PORT_DEV_TRGT_MASK = (0x7U << 17), 250 + PORT_DEV_INIT_MASK = (0x7U << 9), 251 + PORT_DEV_TYPE_MASK = (0x7U << 0), 252 + 253 + /* Port n PHY Status */ 254 + PHY_RDY = (1U << 2), 255 + PHY_DW_SYNC = (1U << 1), 256 + PHY_OOB_DTCTD = (1U << 0), 257 + 258 + /* VSR */ 259 + /* PHYMODE 6 (CDB) */ 260 + PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */ 261 + PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */ 262 + PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/ 263 + PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */ 264 + PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */ 265 + PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */ 266 + PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */ 267 + PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */ 268 + PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */ 269 + PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */ 270 + PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */ 271 + PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */ 272 + PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */ 273 + PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */ 274 + }; 275 + 276 + /* SAS/SATA configuration port registers, aka phy registers */ 277 + enum sas_sata_config_port_regs { 278 + PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */ 279 + PHYR_ADDR_LO = 0x04, /* my SAS address (low) */ 280 + PHYR_ADDR_HI = 0x08, /* my SAS address (high) */ 281 + PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */ 282 + PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */ 283 + PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ 284 + PHYR_SATA_CTL = 0x18, /* SATA control */ 285 + PHYR_PHY_STAT = 0x1C, /* PHY status */ 286 + PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ 287 + PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ 288 + PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ 289 + PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ 290 + PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ 291 + PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ 292 + PHYR_WIDE_PORT = 0x38, /* wide port participating */ 293 + PHYR_CURRENT0 = 0x80, /* current connection info 0 */ 294 + PHYR_CURRENT1 = 0x84, /* current connection info 1 */ 295 + PHYR_CURRENT2 = 0x88, /* current connection info 2 */ 296 + }; 297 + 298 + enum mvs_info_flags { 299 + MVF_MSI = (1U << 0), /* MSI is enabled */ 300 + MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ 301 + }; 302 + 303 + enum sas_cmd_port_registers { 304 + CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */ 305 + CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ 306 + CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ 307 + CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ 308 + CMD_OOB_SPACE = 0x110, /* OOB space control register */ 309 + CMD_OOB_BURST = 0x114, /* OOB burst control register */ 310 + CMD_PHY_TIMER = 0x118, /* PHY timer control register */ 311 + CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ 312 + CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ 313 + CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ 314 + CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ 315 + CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ 316 + CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */ 317 + CMD_ID_TEST = 0x134, /* ID test register */ 318 + CMD_PL_TIMER = 0x138, /* PL timer register */ 319 + CMD_WD_TIMER = 0x13c, /* WD timer register */ 320 + CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ 321 + CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ 322 + CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ 323 + CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ 324 + CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ 325 + CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ 326 + CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ 327 + CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */ 328 + CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */ 329 + CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */ 330 + CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */ 331 + CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */ 332 + CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */ 333 + CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */ 334 + CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */ 335 + CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */ 336 + CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */ 337 + CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */ 338 + CMD_RESET_COUNT = 0x188, /* Reset Count */ 339 + CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */ 340 + CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */ 341 + CMD_PHY_CTL = 0x194, /* PHY Control and Status */ 342 + CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */ 343 + CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */ 344 + CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */ 345 + CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */ 346 + CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */ 347 + CMD_HOST_CTL = 0x1AC, /* Host Control Status */ 348 + CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */ 349 + CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */ 350 + CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */ 351 + CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */ 352 + CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */ 353 + CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ 354 + }; 355 + 356 + enum pci_cfg_register_bits { 357 + PCTL_PWR_ON = (0xFU << 24), 358 + PCTL_OFF = (0xFU << 12), 359 + PRD_REQ_SIZE = (0x4000), 360 + PRD_REQ_MASK = (0x00007000), 361 + }; 362 + 363 + enum nvram_layout_offsets { 364 + NVR_SIG = 0x00, /* 0xAA, 0x55 */ 365 + NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */ 366 + }; 367 + 368 + enum chip_flavors { 369 + chip_6320, 370 + chip_6440, 371 + chip_6480, 372 + }; 373 + 374 + enum port_type { 375 + PORT_TYPE_SAS = (1L << 1), 376 + PORT_TYPE_SATA = (1L << 0), 377 + }; 378 + 379 + /* Command Table Format */ 380 + enum ct_format { 381 + /* SSP */ 382 + SSP_F_H = 0x00, 383 + SSP_F_IU = 0x18, 384 + SSP_F_MAX = 0x4D, 385 + /* STP */ 386 + STP_CMD_FIS = 0x00, 387 + STP_ATAPI_CMD = 0x40, 388 + STP_F_MAX = 0x10, 389 + /* SMP */ 390 + SMP_F_T = 0x00, 391 + SMP_F_DEP = 0x01, 392 + SMP_F_MAX = 0x101, 393 + }; 394 + 395 + enum status_buffer { 396 + SB_EIR_OFF = 0x00, /* Error Information Record */ 397 + SB_RFB_OFF = 0x08, /* Response Frame Buffer */ 398 + SB_RFB_MAX = 0x400, /* RFB size*/ 399 + }; 400 + 401 + enum error_info_rec { 402 + CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */ 403 + CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */ 404 + RSP_OVER = (1U << 29), /* rsp buffer overflow */ 405 + RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */ 406 + UNK_FIS = (1U << 27), /* unknown FIS */ 407 + DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */ 408 + SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */ 409 + TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */ 410 + R_ERR = (1U << 23), /* SATA returned R_ERR prim */ 411 + RD_OFS = (1U << 20), /* Read DATA frame invalid offset */ 412 + XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */ 413 + UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */ 414 + DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */ 415 + INTERLOCK = (1U << 15), /* interlock error */ 416 + NAK = (1U << 14), /* NAK rx'd */ 417 + ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */ 418 + CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */ 419 + OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */ 420 + PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */ 421 + NO_DEST = (1U << 9), /* I_T nexus lost, no destination */ 422 + STP_RES_BSY = (1U << 8), /* STP resources busy */ 423 + BREAK = (1U << 7), /* break received */ 424 + BAD_DEST = (1U << 6), /* bad destination */ 425 + BAD_PROTO = (1U << 5), /* protocol not supported */ 426 + BAD_RATE = (1U << 4), /* cxn rate not supported */ 427 + WRONG_DEST = (1U << 3), /* wrong destination error */ 428 + CREDIT_TO = (1U << 2), /* credit timeout */ 429 + WDOG_TO = (1U << 1), /* watchdog timeout */ 430 + BUF_PAR = (1U << 0), /* buffer parity error */ 431 + }; 432 + 433 + enum error_info_rec_2 { 434 + SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */ 435 + GRD_CHK_ERR = (1U << 14), /* Guard Check Error */ 436 + APP_CHK_ERR = (1U << 13), /* Application Check error */ 437 + REF_CHK_ERR = (1U << 12), /* Reference Check Error */ 438 + USR_BLK_NM = (1U << 0), /* User Block Number */ 439 + }; 440 + 441 + #endif
+524
drivers/scsi/mvsas/mv_init.c
··· 1 + /* 2 + mv_init.c - Marvell 88SE6440 SAS/SATA init support 3 + 4 + Copyright 2007 Red Hat, Inc. 5 + Copyright 2008 Marvell. <kewei@marvell.com> 6 + 7 + This program is free software; you can redistribute it and/or 8 + modify it under the terms of the GNU General Public License as 9 + published by the Free Software Foundation; either version 2, 10 + or (at your option) any later version. 11 + 12 + This program is distributed in the hope that it will be useful, 13 + but WITHOUT ANY WARRANTY; without even the implied warranty 14 + of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 15 + See the GNU General Public License for more details. 16 + 17 + You should have received a copy of the GNU General Public 18 + License along with this program; see the file COPYING. If not, 19 + write to the Free Software Foundation, 675 Mass Ave, Cambridge, 20 + MA 02139, USA. 21 + 22 + */ 23 + 24 + #include "mv_sas.h" 25 + #include "mv_64xx.h" 26 + #include "mv_chips.h" 27 + 28 + static struct scsi_transport_template *mvs_stt; 29 + 30 + static const struct mvs_chip_info mvs_chips[] = { 31 + [chip_6320] = { 2, 16, 9 }, 32 + [chip_6440] = { 4, 16, 9 }, 33 + [chip_6480] = { 8, 32, 10 }, 34 + }; 35 + 36 + static struct scsi_host_template mvs_sht = { 37 + .module = THIS_MODULE, 38 + .name = DRV_NAME, 39 + .queuecommand = sas_queuecommand, 40 + .target_alloc = sas_target_alloc, 41 + .slave_configure = mvs_slave_configure, 42 + .slave_destroy = sas_slave_destroy, 43 + .scan_finished = mvs_scan_finished, 44 + .scan_start = mvs_scan_start, 45 + .change_queue_depth = sas_change_queue_depth, 46 + .change_queue_type = sas_change_queue_type, 47 + .bios_param = sas_bios_param, 48 + .can_queue = 1, 49 + .cmd_per_lun = 1, 50 + .this_id = -1, 51 + .sg_tablesize = SG_ALL, 52 + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 53 + .use_clustering = ENABLE_CLUSTERING, 54 + .eh_device_reset_handler = sas_eh_device_reset_handler, 55 + .eh_bus_reset_handler = sas_eh_bus_reset_handler, 56 + .slave_alloc = sas_slave_alloc, 57 + .target_destroy = sas_target_destroy, 58 + .ioctl = sas_ioctl, 59 + }; 60 + 61 + static struct sas_domain_function_template mvs_transport_ops = { 62 + .lldd_execute_task = mvs_task_exec, 63 + .lldd_control_phy = mvs_phy_control, 64 + .lldd_abort_task = mvs_task_abort, 65 + .lldd_port_formed = mvs_port_formed, 66 + .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, 67 + }; 68 + 69 + static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) 70 + { 71 + struct mvs_phy *phy = &mvi->phy[phy_id]; 72 + struct asd_sas_phy *sas_phy = &phy->sas_phy; 73 + 74 + sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; 75 + sas_phy->class = SAS; 76 + sas_phy->iproto = SAS_PROTOCOL_ALL; 77 + sas_phy->tproto = 0; 78 + sas_phy->type = PHY_TYPE_PHYSICAL; 79 + sas_phy->role = PHY_ROLE_INITIATOR; 80 + sas_phy->oob_mode = OOB_NOT_CONNECTED; 81 + sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 82 + 83 + sas_phy->id = phy_id; 84 + sas_phy->sas_addr = &mvi->sas_addr[0]; 85 + sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 86 + sas_phy->ha = &mvi->sas; 87 + sas_phy->lldd_phy = phy; 88 + } 89 + 90 + static void mvs_free(struct mvs_info *mvi) 91 + { 92 + int i; 93 + 94 + if (!mvi) 95 + return; 96 + 97 + for (i = 0; i < MVS_SLOTS; i++) { 98 + struct mvs_slot_info *slot = &mvi->slot_info[i]; 99 + 100 + if (slot->buf) 101 + dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ, 102 + slot->buf, slot->buf_dma); 103 + } 104 + 105 + if (mvi->tx) 106 + dma_free_coherent(&mvi->pdev->dev, 107 + sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, 108 + mvi->tx, mvi->tx_dma); 109 + if (mvi->rx_fis) 110 + dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ, 111 + mvi->rx_fis, mvi->rx_fis_dma); 112 + if (mvi->rx) 113 + dma_free_coherent(&mvi->pdev->dev, 114 + sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), 115 + mvi->rx, mvi->rx_dma); 116 + if (mvi->slot) 117 + dma_free_coherent(&mvi->pdev->dev, 118 + sizeof(*mvi->slot) * MVS_SLOTS, 119 + mvi->slot, mvi->slot_dma); 120 + #ifdef MVS_ENABLE_PERI 121 + if (mvi->peri_regs) 122 + iounmap(mvi->peri_regs); 123 + #endif 124 + if (mvi->regs) 125 + iounmap(mvi->regs); 126 + if (mvi->shost) 127 + scsi_host_put(mvi->shost); 128 + kfree(mvi->sas.sas_port); 129 + kfree(mvi->sas.sas_phy); 130 + kfree(mvi); 131 + } 132 + 133 + #ifdef MVS_USE_TASKLET 134 + static void mvs_tasklet(unsigned long data) 135 + { 136 + struct mvs_info *mvi = (struct mvs_info *) data; 137 + unsigned long flags; 138 + 139 + spin_lock_irqsave(&mvi->lock, flags); 140 + 141 + #ifdef MVS_DISABLE_MSI 142 + mvs_int_full(mvi); 143 + #else 144 + mvs_int_rx(mvi, true); 145 + #endif 146 + spin_unlock_irqrestore(&mvi->lock, flags); 147 + } 148 + #endif 149 + 150 + static irqreturn_t mvs_interrupt(int irq, void *opaque) 151 + { 152 + struct mvs_info *mvi = opaque; 153 + void __iomem *regs = mvi->regs; 154 + u32 stat; 155 + 156 + stat = mr32(GBL_INT_STAT); 157 + 158 + if (stat == 0 || stat == 0xffffffff) 159 + return IRQ_NONE; 160 + 161 + /* clear CMD_CMPLT ASAP */ 162 + mw32_f(INT_STAT, CINT_DONE); 163 + 164 + #ifndef MVS_USE_TASKLET 165 + spin_lock(&mvi->lock); 166 + 167 + mvs_int_full(mvi); 168 + 169 + spin_unlock(&mvi->lock); 170 + #else 171 + tasklet_schedule(&mvi->tasklet); 172 + #endif 173 + return IRQ_HANDLED; 174 + } 175 + 176 + static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, 177 + const struct pci_device_id *ent) 178 + { 179 + struct mvs_info *mvi; 180 + unsigned long res_start, res_len, res_flag; 181 + struct asd_sas_phy **arr_phy; 182 + struct asd_sas_port **arr_port; 183 + const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data]; 184 + int i; 185 + 186 + /* 187 + * alloc and init our per-HBA mvs_info struct 188 + */ 189 + 190 + mvi = kzalloc(sizeof(*mvi), GFP_KERNEL); 191 + if (!mvi) 192 + return NULL; 193 + 194 + spin_lock_init(&mvi->lock); 195 + #ifdef MVS_USE_TASKLET 196 + tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi); 197 + #endif 198 + mvi->pdev = pdev; 199 + mvi->chip = chip; 200 + 201 + if (pdev->device == 0x6440 && pdev->revision == 0) 202 + mvi->flags |= MVF_PHY_PWR_FIX; 203 + 204 + /* 205 + * alloc and init SCSI, SAS glue 206 + */ 207 + 208 + mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); 209 + if (!mvi->shost) 210 + goto err_out; 211 + 212 + arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); 213 + arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); 214 + if (!arr_phy || !arr_port) 215 + goto err_out; 216 + 217 + for (i = 0; i < MVS_MAX_PHYS; i++) { 218 + mvs_phy_init(mvi, i); 219 + arr_phy[i] = &mvi->phy[i].sas_phy; 220 + arr_port[i] = &mvi->port[i].sas_port; 221 + mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED; 222 + mvi->port[i].wide_port_phymap = 0; 223 + mvi->port[i].port_attached = 0; 224 + INIT_LIST_HEAD(&mvi->port[i].list); 225 + } 226 + 227 + SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; 228 + mvi->shost->transportt = mvs_stt; 229 + mvi->shost->max_id = 21; 230 + mvi->shost->max_lun = ~0; 231 + mvi->shost->max_channel = 0; 232 + mvi->shost->max_cmd_len = 16; 233 + 234 + mvi->sas.sas_ha_name = DRV_NAME; 235 + mvi->sas.dev = &pdev->dev; 236 + mvi->sas.lldd_module = THIS_MODULE; 237 + mvi->sas.sas_addr = &mvi->sas_addr[0]; 238 + mvi->sas.sas_phy = arr_phy; 239 + mvi->sas.sas_port = arr_port; 240 + mvi->sas.num_phys = chip->n_phy; 241 + mvi->sas.lldd_max_execute_num = 1; 242 + mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE; 243 + mvi->shost->can_queue = MVS_CAN_QUEUE; 244 + mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys; 245 + mvi->sas.lldd_ha = mvi; 246 + mvi->sas.core.shost = mvi->shost; 247 + 248 + mvs_tag_init(mvi); 249 + 250 + /* 251 + * ioremap main and peripheral registers 252 + */ 253 + 254 + #ifdef MVS_ENABLE_PERI 255 + res_start = pci_resource_start(pdev, 2); 256 + res_len = pci_resource_len(pdev, 2); 257 + if (!res_start || !res_len) 258 + goto err_out; 259 + 260 + mvi->peri_regs = ioremap_nocache(res_start, res_len); 261 + if (!mvi->peri_regs) 262 + goto err_out; 263 + #endif 264 + 265 + res_start = pci_resource_start(pdev, 4); 266 + res_len = pci_resource_len(pdev, 4); 267 + if (!res_start || !res_len) 268 + goto err_out; 269 + 270 + res_flag = pci_resource_flags(pdev, 4); 271 + if (res_flag & IORESOURCE_CACHEABLE) 272 + mvi->regs = ioremap(res_start, res_len); 273 + else 274 + mvi->regs = ioremap_nocache(res_start, res_len); 275 + 276 + if (!mvi->regs) 277 + goto err_out; 278 + 279 + /* 280 + * alloc and init our DMA areas 281 + */ 282 + 283 + mvi->tx = dma_alloc_coherent(&pdev->dev, 284 + sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, 285 + &mvi->tx_dma, GFP_KERNEL); 286 + if (!mvi->tx) 287 + goto err_out; 288 + memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); 289 + 290 + mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ, 291 + &mvi->rx_fis_dma, GFP_KERNEL); 292 + if (!mvi->rx_fis) 293 + goto err_out; 294 + memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); 295 + 296 + mvi->rx = dma_alloc_coherent(&pdev->dev, 297 + sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), 298 + &mvi->rx_dma, GFP_KERNEL); 299 + if (!mvi->rx) 300 + goto err_out; 301 + memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); 302 + 303 + mvi->rx[0] = cpu_to_le32(0xfff); 304 + mvi->rx_cons = 0xfff; 305 + 306 + mvi->slot = dma_alloc_coherent(&pdev->dev, 307 + sizeof(*mvi->slot) * MVS_SLOTS, 308 + &mvi->slot_dma, GFP_KERNEL); 309 + if (!mvi->slot) 310 + goto err_out; 311 + memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS); 312 + 313 + for (i = 0; i < MVS_SLOTS; i++) { 314 + struct mvs_slot_info *slot = &mvi->slot_info[i]; 315 + 316 + slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ, 317 + &slot->buf_dma, GFP_KERNEL); 318 + if (!slot->buf) 319 + goto err_out; 320 + memset(slot->buf, 0, MVS_SLOT_BUF_SZ); 321 + } 322 + 323 + /* finally, read NVRAM to get our SAS address */ 324 + if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8)) 325 + goto err_out; 326 + return mvi; 327 + 328 + err_out: 329 + mvs_free(mvi); 330 + return NULL; 331 + } 332 + 333 + /* move to PCI layer or libata core? */ 334 + static int pci_go_64(struct pci_dev *pdev) 335 + { 336 + int rc; 337 + 338 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 339 + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 340 + if (rc) { 341 + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 342 + if (rc) { 343 + dev_printk(KERN_ERR, &pdev->dev, 344 + "64-bit DMA enable failed\n"); 345 + return rc; 346 + } 347 + } 348 + } else { 349 + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 350 + if (rc) { 351 + dev_printk(KERN_ERR, &pdev->dev, 352 + "32-bit DMA enable failed\n"); 353 + return rc; 354 + } 355 + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 356 + if (rc) { 357 + dev_printk(KERN_ERR, &pdev->dev, 358 + "32-bit consistent DMA enable failed\n"); 359 + return rc; 360 + } 361 + } 362 + 363 + return rc; 364 + } 365 + 366 + static int __devinit mvs_pci_init(struct pci_dev *pdev, 367 + const struct pci_device_id *ent) 368 + { 369 + int rc; 370 + struct mvs_info *mvi; 371 + irq_handler_t irq_handler = mvs_interrupt; 372 + 373 + rc = pci_enable_device(pdev); 374 + if (rc) 375 + return rc; 376 + 377 + pci_set_master(pdev); 378 + 379 + rc = pci_request_regions(pdev, DRV_NAME); 380 + if (rc) 381 + goto err_out_disable; 382 + 383 + rc = pci_go_64(pdev); 384 + if (rc) 385 + goto err_out_regions; 386 + 387 + mvi = mvs_alloc(pdev, ent); 388 + if (!mvi) { 389 + rc = -ENOMEM; 390 + goto err_out_regions; 391 + } 392 + 393 + rc = mvs_hw_init(mvi); 394 + if (rc) 395 + goto err_out_mvi; 396 + 397 + #ifndef MVS_DISABLE_MSI 398 + if (!pci_enable_msi(pdev)) { 399 + u32 tmp; 400 + void __iomem *regs = mvi->regs; 401 + mvi->flags |= MVF_MSI; 402 + irq_handler = mvs_msi_interrupt; 403 + tmp = mr32(PCS); 404 + mw32(PCS, tmp | PCS_SELF_CLEAR); 405 + } 406 + #endif 407 + 408 + rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi); 409 + if (rc) 410 + goto err_out_msi; 411 + 412 + rc = scsi_add_host(mvi->shost, &pdev->dev); 413 + if (rc) 414 + goto err_out_irq; 415 + 416 + rc = sas_register_ha(&mvi->sas); 417 + if (rc) 418 + goto err_out_shost; 419 + 420 + pci_set_drvdata(pdev, mvi); 421 + 422 + mvs_print_info(mvi); 423 + 424 + mvs_hba_interrupt_enable(mvi); 425 + 426 + scsi_scan_host(mvi->shost); 427 + 428 + return 0; 429 + 430 + err_out_shost: 431 + scsi_remove_host(mvi->shost); 432 + err_out_irq: 433 + free_irq(pdev->irq, mvi); 434 + err_out_msi: 435 + if (mvi->flags |= MVF_MSI) 436 + pci_disable_msi(pdev); 437 + err_out_mvi: 438 + mvs_free(mvi); 439 + err_out_regions: 440 + pci_release_regions(pdev); 441 + err_out_disable: 442 + pci_disable_device(pdev); 443 + return rc; 444 + } 445 + 446 + static void __devexit mvs_pci_remove(struct pci_dev *pdev) 447 + { 448 + struct mvs_info *mvi = pci_get_drvdata(pdev); 449 + 450 + pci_set_drvdata(pdev, NULL); 451 + 452 + if (mvi) { 453 + sas_unregister_ha(&mvi->sas); 454 + mvs_hba_interrupt_disable(mvi); 455 + sas_remove_host(mvi->shost); 456 + scsi_remove_host(mvi->shost); 457 + 458 + free_irq(pdev->irq, mvi); 459 + if (mvi->flags & MVF_MSI) 460 + pci_disable_msi(pdev); 461 + mvs_free(mvi); 462 + pci_release_regions(pdev); 463 + } 464 + pci_disable_device(pdev); 465 + } 466 + 467 + static struct pci_device_id __devinitdata mvs_pci_table[] = { 468 + { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, 469 + { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, 470 + { 471 + .vendor = PCI_VENDOR_ID_MARVELL, 472 + .device = 0x6440, 473 + .subvendor = PCI_ANY_ID, 474 + .subdevice = 0x6480, 475 + .class = 0, 476 + .class_mask = 0, 477 + .driver_data = chip_6480, 478 + }, 479 + { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, 480 + { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 }, 481 + 482 + { } /* terminate list */ 483 + }; 484 + 485 + static struct pci_driver mvs_pci_driver = { 486 + .name = DRV_NAME, 487 + .id_table = mvs_pci_table, 488 + .probe = mvs_pci_init, 489 + .remove = __devexit_p(mvs_pci_remove), 490 + }; 491 + 492 + static int __init mvs_init(void) 493 + { 494 + int rc; 495 + 496 + mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); 497 + if (!mvs_stt) 498 + return -ENOMEM; 499 + 500 + rc = pci_register_driver(&mvs_pci_driver); 501 + if (rc) 502 + goto err_out; 503 + 504 + return 0; 505 + 506 + err_out: 507 + sas_release_transport(mvs_stt); 508 + return rc; 509 + } 510 + 511 + static void __exit mvs_exit(void) 512 + { 513 + pci_unregister_driver(&mvs_pci_driver); 514 + sas_release_transport(mvs_stt); 515 + } 516 + 517 + module_init(mvs_init); 518 + module_exit(mvs_exit); 519 + 520 + MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); 521 + MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); 522 + MODULE_VERSION(DRV_VERSION); 523 + MODULE_LICENSE("GPL"); 524 + MODULE_DEVICE_TABLE(pci, mvs_pci_table);
+711 -2115
drivers/scsi/mvsas/mv_sas.c
··· 1 1 /* 2 - mvsas.c - Marvell 88SE6440 SAS/SATA support 2 + mv_sas.c - Marvell 88SE6440 SAS/SATA support 3 3 4 4 Copyright 2007 Red Hat, Inc. 5 5 Copyright 2008 Marvell. <kewei@marvell.com> ··· 28 28 29 29 */ 30 30 31 - #include <linux/kernel.h> 32 - #include <linux/module.h> 33 - #include <linux/pci.h> 34 - #include <linux/interrupt.h> 35 - #include <linux/spinlock.h> 36 - #include <linux/delay.h> 37 - #include <linux/dma-mapping.h> 38 - #include <linux/ctype.h> 39 - #include <scsi/libsas.h> 40 - #include <scsi/scsi_tcq.h> 41 - #include <scsi/sas_ata.h> 42 - #include <asm/io.h> 43 - 44 - #define DRV_NAME "mvsas" 45 - #define DRV_VERSION "0.5.2" 46 - #define _MV_DUMP 0 47 - #define MVS_DISABLE_NVRAM 48 - #define MVS_DISABLE_MSI 49 - 50 - #define mr32(reg) readl(regs + MVS_##reg) 51 - #define mw32(reg,val) writel((val), regs + MVS_##reg) 52 - #define mw32_f(reg,val) do { \ 53 - writel((val), regs + MVS_##reg); \ 54 - readl(regs + MVS_##reg); \ 55 - } while (0) 56 - 57 - #define MVS_ID_NOT_MAPPED 0x7f 58 - #define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) 31 + #include "mv_sas.h" 32 + #include "mv_64xx.h" 33 + #include "mv_chips.h" 59 34 60 35 /* offset for D2H FIS in the Received FIS List Structure */ 61 36 #define SATA_RECEIVED_D2H_FIS(reg_set) \ ··· 40 65 #define UNASSOC_D2H_FIS(id) \ 41 66 ((void *) mvi->rx_fis + 0x100 * id) 42 67 43 - #define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \ 44 - for ((__mc) = (__lseq_mask), (__lseq) = 0; \ 45 - (__mc) != 0 && __rest; \ 46 - (++__lseq), (__mc) >>= 1) 47 - 48 - /* driver compile-time configuration */ 49 - enum driver_configuration { 50 - MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ 51 - MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ 52 - /* software requires power-of-2 53 - ring size */ 54 - 55 - MVS_SLOTS = 512, /* command slots */ 56 - MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ 57 - MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ 58 - MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ 59 - MVS_OAF_SZ = 64, /* Open address frame buffer size */ 60 - 61 - MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */ 62 - 63 - MVS_QUEUE_SIZE = 30, /* Support Queue depth */ 64 - MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */ 68 + struct mvs_task_exec_info { 69 + struct sas_task *task; 70 + struct mvs_cmd_hdr *hdr; 71 + struct mvs_port *port; 72 + u32 tag; 73 + int n_elem; 65 74 }; 66 75 67 - /* unchangeable hardware details */ 68 - enum hardware_details { 69 - MVS_MAX_PHYS = 8, /* max. possible phys */ 70 - MVS_MAX_PORTS = 8, /* max. possible ports */ 71 - MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100), 72 - }; 73 - 74 - /* peripheral registers (BAR2) */ 75 - enum peripheral_registers { 76 - SPI_CTL = 0x10, /* EEPROM control */ 77 - SPI_CMD = 0x14, /* EEPROM command */ 78 - SPI_DATA = 0x18, /* EEPROM data */ 79 - }; 80 - 81 - enum peripheral_register_bits { 82 - TWSI_RDY = (1U << 7), /* EEPROM interface ready */ 83 - TWSI_RD = (1U << 4), /* EEPROM read access */ 84 - 85 - SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */ 86 - }; 87 - 88 - /* enhanced mode registers (BAR4) */ 89 - enum hw_registers { 90 - MVS_GBL_CTL = 0x04, /* global control */ 91 - MVS_GBL_INT_STAT = 0x08, /* global irq status */ 92 - MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ 93 - MVS_GBL_PORT_TYPE = 0xa0, /* port type */ 94 - 95 - MVS_CTL = 0x100, /* SAS/SATA port configuration */ 96 - MVS_PCS = 0x104, /* SAS/SATA port control/status */ 97 - MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ 98 - MVS_CMD_LIST_HI = 0x10C, 99 - MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ 100 - MVS_RX_FIS_HI = 0x114, 101 - 102 - MVS_TX_CFG = 0x120, /* TX configuration */ 103 - MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ 104 - MVS_TX_HI = 0x128, 105 - 106 - MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ 107 - MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ 108 - MVS_RX_CFG = 0x134, /* RX configuration */ 109 - MVS_RX_LO = 0x138, /* RX (completion) ring addr */ 110 - MVS_RX_HI = 0x13C, 111 - MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ 112 - 113 - MVS_INT_COAL = 0x148, /* Int coalescing config */ 114 - MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ 115 - MVS_INT_STAT = 0x150, /* Central int status */ 116 - MVS_INT_MASK = 0x154, /* Central int enable */ 117 - MVS_INT_STAT_SRS = 0x158, /* SATA register set status */ 118 - MVS_INT_MASK_SRS = 0x15C, 119 - 120 - /* ports 1-3 follow after this */ 121 - MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ 122 - MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ 123 - MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */ 124 - MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */ 125 - 126 - /* ports 1-3 follow after this */ 127 - MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ 128 - MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ 129 - 130 - MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ 131 - MVS_CMD_DATA = 0x1BC, /* Command register port (data) */ 132 - 133 - /* ports 1-3 follow after this */ 134 - MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ 135 - MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ 136 - MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */ 137 - MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */ 138 - 139 - /* ports 1-3 follow after this */ 140 - MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ 141 - MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ 142 - MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */ 143 - MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */ 144 - }; 145 - 146 - enum hw_register_bits { 147 - /* MVS_GBL_CTL */ 148 - INT_EN = (1U << 1), /* Global int enable */ 149 - HBA_RST = (1U << 0), /* HBA reset */ 150 - 151 - /* MVS_GBL_INT_STAT */ 152 - INT_XOR = (1U << 4), /* XOR engine event */ 153 - INT_SAS_SATA = (1U << 0), /* SAS/SATA event */ 154 - 155 - /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */ 156 - SATA_TARGET = (1U << 16), /* port0 SATA target enable */ 157 - MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */ 158 - MODE_AUTO_DET_PORT6 = (1U << 14), 159 - MODE_AUTO_DET_PORT5 = (1U << 13), 160 - MODE_AUTO_DET_PORT4 = (1U << 12), 161 - MODE_AUTO_DET_PORT3 = (1U << 11), 162 - MODE_AUTO_DET_PORT2 = (1U << 10), 163 - MODE_AUTO_DET_PORT1 = (1U << 9), 164 - MODE_AUTO_DET_PORT0 = (1U << 8), 165 - MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 | 166 - MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 | 167 - MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 | 168 - MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7, 169 - MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */ 170 - MODE_SAS_PORT6_MASK = (1U << 6), 171 - MODE_SAS_PORT5_MASK = (1U << 5), 172 - MODE_SAS_PORT4_MASK = (1U << 4), 173 - MODE_SAS_PORT3_MASK = (1U << 3), 174 - MODE_SAS_PORT2_MASK = (1U << 2), 175 - MODE_SAS_PORT1_MASK = (1U << 1), 176 - MODE_SAS_PORT0_MASK = (1U << 0), 177 - MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK | 178 - MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK | 179 - MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK | 180 - MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK, 181 - 182 - /* SAS_MODE value may be 183 - * dictated (in hw) by values 184 - * of SATA_TARGET & AUTO_DET 185 - */ 186 - 187 - /* MVS_TX_CFG */ 188 - TX_EN = (1U << 16), /* Enable TX */ 189 - TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */ 190 - 191 - /* MVS_RX_CFG */ 192 - RX_EN = (1U << 16), /* Enable RX */ 193 - RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */ 194 - 195 - /* MVS_INT_COAL */ 196 - COAL_EN = (1U << 16), /* Enable int coalescing */ 197 - 198 - /* MVS_INT_STAT, MVS_INT_MASK */ 199 - CINT_I2C = (1U << 31), /* I2C event */ 200 - CINT_SW0 = (1U << 30), /* software event 0 */ 201 - CINT_SW1 = (1U << 29), /* software event 1 */ 202 - CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */ 203 - CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ 204 - CINT_MEM = (1U << 26), /* int mem parity err */ 205 - CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ 206 - CINT_SRS = (1U << 3), /* SRS event */ 207 - CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ 208 - CINT_DONE = (1U << 0), /* cmd completion */ 209 - 210 - /* shl for ports 1-3 */ 211 - CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */ 212 - CINT_PORT = (1U << 8), /* port0 event */ 213 - CINT_PORT_MASK_OFFSET = 8, 214 - CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), 215 - 216 - /* TX (delivery) ring bits */ 217 - TXQ_CMD_SHIFT = 29, 218 - TXQ_CMD_SSP = 1, /* SSP protocol */ 219 - TXQ_CMD_SMP = 2, /* SMP protocol */ 220 - TXQ_CMD_STP = 3, /* STP/SATA protocol */ 221 - TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ 222 - TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ 223 - TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ 224 - TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ 225 - TXQ_SRS_SHIFT = 20, /* SATA register set */ 226 - TXQ_SRS_MASK = 0x7f, 227 - TXQ_PHY_SHIFT = 12, /* PHY bitmap */ 228 - TXQ_PHY_MASK = 0xff, 229 - TXQ_SLOT_MASK = 0xfff, /* slot number */ 230 - 231 - /* RX (completion) ring bits */ 232 - RXQ_GOOD = (1U << 23), /* Response good */ 233 - RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */ 234 - RXQ_CMD_RX = (1U << 20), /* target cmd received */ 235 - RXQ_ATTN = (1U << 19), /* attention */ 236 - RXQ_RSP = (1U << 18), /* response frame xfer'd */ 237 - RXQ_ERR = (1U << 17), /* err info rec xfer'd */ 238 - RXQ_DONE = (1U << 16), /* cmd complete */ 239 - RXQ_SLOT_MASK = 0xfff, /* slot number */ 240 - 241 - /* mvs_cmd_hdr bits */ 242 - MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */ 243 - MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */ 244 - 245 - /* SSP initiator only */ 246 - MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */ 247 - 248 - /* SSP initiator or target */ 249 - MCH_SSP_FR_TASK = 0x1, /* TASK frame */ 250 - 251 - /* SSP target only */ 252 - MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */ 253 - MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */ 254 - MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ 255 - MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ 256 - 257 - MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ 258 - MCH_FBURST = (1U << 11), /* first burst (SSP) */ 259 - MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ 260 - MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */ 261 - MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */ 262 - MCH_RESET = (1U << 7), /* Reset (STP/SATA) */ 263 - MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */ 264 - MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */ 265 - MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */ 266 - MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/ 267 - 268 - CCTL_RST = (1U << 5), /* port logic reset */ 269 - 270 - /* 0(LSB first), 1(MSB first) */ 271 - CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */ 272 - CCTL_ENDIAN_RSP = (1U << 2), /* response frame */ 273 - CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */ 274 - CCTL_ENDIAN_CMD = (1U << 0), /* command table */ 275 - 276 - /* MVS_Px_SER_CTLSTAT (per-phy control) */ 277 - PHY_SSP_RST = (1U << 3), /* reset SSP link layer */ 278 - PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ 279 - PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ 280 - PHY_RST = (1U << 0), /* phy reset */ 281 - PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), 282 - PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), 283 - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), 284 - PHY_NEG_SPP_PHYS_LINK_RATE_MASK = 285 - (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), 286 - PHY_READY_MASK = (1U << 20), 287 - 288 - /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ 289 - PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ 290 - PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ 291 - PHYEV_AN = (1U << 18), /* SATA async notification */ 292 - PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ 293 - PHYEV_SIG_FIS = (1U << 16), /* signature FIS */ 294 - PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */ 295 - PHYEV_IU_BIG = (1U << 11), /* IU too long err */ 296 - PHYEV_IU_SMALL = (1U << 10), /* IU too short err */ 297 - PHYEV_UNK_TAG = (1U << 9), /* unknown tag */ 298 - PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */ 299 - PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */ 300 - PHYEV_PORT_SEL = (1U << 6), /* port selector present */ 301 - PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */ 302 - PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */ 303 - PHYEV_ID_FAIL = (1U << 3), /* identify failed */ 304 - PHYEV_ID_DONE = (1U << 2), /* identify done */ 305 - PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */ 306 - PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */ 307 - 308 - /* MVS_PCS */ 309 - PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ 310 - PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ 311 - PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */ 312 - PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ 313 - PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ 314 - PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ 315 - PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ 316 - PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ 317 - PCS_CMD_RST = (1U << 1), /* reset cmd issue */ 318 - PCS_CMD_EN = (1U << 0), /* enable cmd issue */ 319 - 320 - /* Port n Attached Device Info */ 321 - PORT_DEV_SSP_TRGT = (1U << 19), 322 - PORT_DEV_SMP_TRGT = (1U << 18), 323 - PORT_DEV_STP_TRGT = (1U << 17), 324 - PORT_DEV_SSP_INIT = (1U << 11), 325 - PORT_DEV_SMP_INIT = (1U << 10), 326 - PORT_DEV_STP_INIT = (1U << 9), 327 - PORT_PHY_ID_MASK = (0xFFU << 24), 328 - PORT_DEV_TRGT_MASK = (0x7U << 17), 329 - PORT_DEV_INIT_MASK = (0x7U << 9), 330 - PORT_DEV_TYPE_MASK = (0x7U << 0), 331 - 332 - /* Port n PHY Status */ 333 - PHY_RDY = (1U << 2), 334 - PHY_DW_SYNC = (1U << 1), 335 - PHY_OOB_DTCTD = (1U << 0), 336 - 337 - /* VSR */ 338 - /* PHYMODE 6 (CDB) */ 339 - PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */ 340 - PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */ 341 - PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/ 342 - PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */ 343 - PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */ 344 - PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */ 345 - PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */ 346 - PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */ 347 - PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */ 348 - PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */ 349 - PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */ 350 - PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */ 351 - PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */ 352 - PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */ 353 - }; 354 - 355 - enum mvs_info_flags { 356 - MVF_MSI = (1U << 0), /* MSI is enabled */ 357 - MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ 358 - }; 359 - 360 - enum sas_cmd_port_registers { 361 - CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */ 362 - CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ 363 - CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ 364 - CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ 365 - CMD_OOB_SPACE = 0x110, /* OOB space control register */ 366 - CMD_OOB_BURST = 0x114, /* OOB burst control register */ 367 - CMD_PHY_TIMER = 0x118, /* PHY timer control register */ 368 - CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ 369 - CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ 370 - CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ 371 - CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ 372 - CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ 373 - CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */ 374 - CMD_ID_TEST = 0x134, /* ID test register */ 375 - CMD_PL_TIMER = 0x138, /* PL timer register */ 376 - CMD_WD_TIMER = 0x13c, /* WD timer register */ 377 - CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ 378 - CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ 379 - CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ 380 - CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ 381 - CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ 382 - CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ 383 - CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ 384 - CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */ 385 - CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */ 386 - CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */ 387 - CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */ 388 - CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */ 389 - CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */ 390 - CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */ 391 - CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */ 392 - CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */ 393 - CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */ 394 - CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */ 395 - CMD_RESET_COUNT = 0x188, /* Reset Count */ 396 - CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */ 397 - CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */ 398 - CMD_PHY_CTL = 0x194, /* PHY Control and Status */ 399 - CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */ 400 - CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */ 401 - CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */ 402 - CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */ 403 - CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */ 404 - CMD_HOST_CTL = 0x1AC, /* Host Control Status */ 405 - CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */ 406 - CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */ 407 - CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */ 408 - CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */ 409 - CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */ 410 - CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ 411 - }; 412 - 413 - /* SAS/SATA configuration port registers, aka phy registers */ 414 - enum sas_sata_config_port_regs { 415 - PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */ 416 - PHYR_ADDR_LO = 0x04, /* my SAS address (low) */ 417 - PHYR_ADDR_HI = 0x08, /* my SAS address (high) */ 418 - PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */ 419 - PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */ 420 - PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ 421 - PHYR_SATA_CTL = 0x18, /* SATA control */ 422 - PHYR_PHY_STAT = 0x1C, /* PHY status */ 423 - PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ 424 - PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ 425 - PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ 426 - PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ 427 - PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ 428 - PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ 429 - PHYR_WIDE_PORT = 0x38, /* wide port participating */ 430 - PHYR_CURRENT0 = 0x80, /* current connection info 0 */ 431 - PHYR_CURRENT1 = 0x84, /* current connection info 1 */ 432 - PHYR_CURRENT2 = 0x88, /* current connection info 2 */ 433 - }; 434 - 435 - /* SAS/SATA Vendor Specific Port Registers */ 436 - enum sas_sata_vsp_regs { 437 - VSR_PHY_STAT = 0x00, /* Phy Status */ 438 - VSR_PHY_MODE1 = 0x01, /* phy tx */ 439 - VSR_PHY_MODE2 = 0x02, /* tx scc */ 440 - VSR_PHY_MODE3 = 0x03, /* pll */ 441 - VSR_PHY_MODE4 = 0x04, /* VCO */ 442 - VSR_PHY_MODE5 = 0x05, /* Rx */ 443 - VSR_PHY_MODE6 = 0x06, /* CDR */ 444 - VSR_PHY_MODE7 = 0x07, /* Impedance */ 445 - VSR_PHY_MODE8 = 0x08, /* Voltage */ 446 - VSR_PHY_MODE9 = 0x09, /* Test */ 447 - VSR_PHY_MODE10 = 0x0A, /* Power */ 448 - VSR_PHY_MODE11 = 0x0B, /* Phy Mode */ 449 - VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */ 450 - VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ 451 - }; 452 - 453 - enum pci_cfg_registers { 454 - PCR_PHY_CTL = 0x40, 455 - PCR_PHY_CTL2 = 0x90, 456 - PCR_DEV_CTRL = 0xE8, 457 - }; 458 - 459 - enum pci_cfg_register_bits { 460 - PCTL_PWR_ON = (0xFU << 24), 461 - PCTL_OFF = (0xFU << 12), 462 - PRD_REQ_SIZE = (0x4000), 463 - PRD_REQ_MASK = (0x00007000), 464 - }; 465 - 466 - enum nvram_layout_offsets { 467 - NVR_SIG = 0x00, /* 0xAA, 0x55 */ 468 - NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */ 469 - }; 470 - 471 - enum chip_flavors { 472 - chip_6320, 473 - chip_6440, 474 - chip_6480, 475 - }; 476 - 477 - enum port_type { 478 - PORT_TYPE_SAS = (1L << 1), 479 - PORT_TYPE_SATA = (1L << 0), 480 - }; 481 - 482 - /* Command Table Format */ 483 - enum ct_format { 484 - /* SSP */ 485 - SSP_F_H = 0x00, 486 - SSP_F_IU = 0x18, 487 - SSP_F_MAX = 0x4D, 488 - /* STP */ 489 - STP_CMD_FIS = 0x00, 490 - STP_ATAPI_CMD = 0x40, 491 - STP_F_MAX = 0x10, 492 - /* SMP */ 493 - SMP_F_T = 0x00, 494 - SMP_F_DEP = 0x01, 495 - SMP_F_MAX = 0x101, 496 - }; 497 - 498 - enum status_buffer { 499 - SB_EIR_OFF = 0x00, /* Error Information Record */ 500 - SB_RFB_OFF = 0x08, /* Response Frame Buffer */ 501 - SB_RFB_MAX = 0x400, /* RFB size*/ 502 - }; 503 - 504 - enum error_info_rec { 505 - CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */ 506 - CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */ 507 - RSP_OVER = (1U << 29), /* rsp buffer overflow */ 508 - RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */ 509 - UNK_FIS = (1U << 27), /* unknown FIS */ 510 - DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */ 511 - SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */ 512 - TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */ 513 - R_ERR = (1U << 23), /* SATA returned R_ERR prim */ 514 - RD_OFS = (1U << 20), /* Read DATA frame invalid offset */ 515 - XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */ 516 - UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */ 517 - DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */ 518 - INTERLOCK = (1U << 15), /* interlock error */ 519 - NAK = (1U << 14), /* NAK rx'd */ 520 - ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */ 521 - CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */ 522 - OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */ 523 - PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */ 524 - NO_DEST = (1U << 9), /* I_T nexus lost, no destination */ 525 - STP_RES_BSY = (1U << 8), /* STP resources busy */ 526 - BREAK = (1U << 7), /* break received */ 527 - BAD_DEST = (1U << 6), /* bad destination */ 528 - BAD_PROTO = (1U << 5), /* protocol not supported */ 529 - BAD_RATE = (1U << 4), /* cxn rate not supported */ 530 - WRONG_DEST = (1U << 3), /* wrong destination error */ 531 - CREDIT_TO = (1U << 2), /* credit timeout */ 532 - WDOG_TO = (1U << 1), /* watchdog timeout */ 533 - BUF_PAR = (1U << 0), /* buffer parity error */ 534 - }; 535 - 536 - enum error_info_rec_2 { 537 - SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */ 538 - GRD_CHK_ERR = (1U << 14), /* Guard Check Error */ 539 - APP_CHK_ERR = (1U << 13), /* Application Check error */ 540 - REF_CHK_ERR = (1U << 12), /* Reference Check Error */ 541 - USR_BLK_NM = (1U << 0), /* User Block Number */ 542 - }; 543 - 544 - struct mvs_chip_info { 545 - u32 n_phy; 546 - u32 srs_sz; 547 - u32 slot_width; 548 - }; 549 - 550 - struct mvs_err_info { 551 - __le32 flags; 552 - __le32 flags2; 553 - }; 554 - 555 - struct mvs_prd { 556 - __le64 addr; /* 64-bit buffer address */ 557 - __le32 reserved; 558 - __le32 len; /* 16-bit length */ 559 - }; 560 - 561 - struct mvs_cmd_hdr { 562 - __le32 flags; /* PRD tbl len; SAS, SATA ctl */ 563 - __le32 lens; /* cmd, max resp frame len */ 564 - __le32 tags; /* targ port xfer tag; tag */ 565 - __le32 data_len; /* data xfer len */ 566 - __le64 cmd_tbl; /* command table address */ 567 - __le64 open_frame; /* open addr frame address */ 568 - __le64 status_buf; /* status buffer address */ 569 - __le64 prd_tbl; /* PRD tbl address */ 570 - __le32 reserved[4]; 571 - }; 572 - 573 - struct mvs_port { 574 - struct asd_sas_port sas_port; 575 - u8 port_attached; 576 - u8 taskfileset; 577 - u8 wide_port_phymap; 578 - struct list_head list; 579 - }; 580 - 581 - struct mvs_phy { 582 - struct mvs_port *port; 583 - struct asd_sas_phy sas_phy; 584 - struct sas_identify identify; 585 - struct scsi_device *sdev; 586 - u64 dev_sas_addr; 587 - u64 att_dev_sas_addr; 588 - u32 att_dev_info; 589 - u32 dev_info; 590 - u32 phy_type; 591 - u32 phy_status; 592 - u32 irq_status; 593 - u32 frame_rcvd_size; 594 - u8 frame_rcvd[32]; 595 - u8 phy_attached; 596 - enum sas_linkrate minimum_linkrate; 597 - enum sas_linkrate maximum_linkrate; 598 - }; 599 - 600 - struct mvs_slot_info { 601 - struct list_head list; 602 - struct sas_task *task; 603 - u32 n_elem; 604 - u32 tx; 605 - 606 - /* DMA buffer for storing cmd tbl, open addr frame, status buffer, 607 - * and PRD table 608 - */ 609 - void *buf; 610 - dma_addr_t buf_dma; 611 - #if _MV_DUMP 612 - u32 cmd_size; 613 - #endif 614 - 615 - void *response; 616 - struct mvs_port *port; 617 - }; 618 - 619 - struct mvs_info { 620 - unsigned long flags; 621 - 622 - spinlock_t lock; /* host-wide lock */ 623 - struct pci_dev *pdev; /* our device */ 624 - void __iomem *regs; /* enhanced mode registers */ 625 - void __iomem *peri_regs; /* peripheral registers */ 626 - 627 - u8 sas_addr[SAS_ADDR_SIZE]; 628 - struct sas_ha_struct sas; /* SCSI/SAS glue */ 629 - struct Scsi_Host *shost; 630 - 631 - __le32 *tx; /* TX (delivery) DMA ring */ 632 - dma_addr_t tx_dma; 633 - u32 tx_prod; /* cached next-producer idx */ 634 - 635 - __le32 *rx; /* RX (completion) DMA ring */ 636 - dma_addr_t rx_dma; 637 - u32 rx_cons; /* RX consumer idx */ 638 - 639 - __le32 *rx_fis; /* RX'd FIS area */ 640 - dma_addr_t rx_fis_dma; 641 - 642 - struct mvs_cmd_hdr *slot; /* DMA command header slots */ 643 - dma_addr_t slot_dma; 644 - 645 - const struct mvs_chip_info *chip; 646 - 647 - u8 tags[MVS_SLOTS]; 648 - struct mvs_slot_info slot_info[MVS_SLOTS]; 649 - /* further per-slot information */ 650 - struct mvs_phy phy[MVS_MAX_PHYS]; 651 - struct mvs_port port[MVS_MAX_PHYS]; 652 - #ifdef MVS_USE_TASKLET 653 - struct tasklet_struct tasklet; 654 - #endif 655 - }; 656 - 657 - static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 658 - void *funcdata); 659 - static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port); 660 - static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val); 661 - static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port); 662 - static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val); 663 - static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val); 664 - static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port); 665 - 666 - static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i); 667 - static void mvs_detect_porttype(struct mvs_info *mvi, int i); 668 - static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); 669 76 static void mvs_release_task(struct mvs_info *mvi, int phy_no); 77 + static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i); 78 + static void mvs_update_phyinfo(struct mvs_info *mvi, int i, 79 + int get_st); 80 + static int mvs_int_rx(struct mvs_info *mvi, bool self_clear); 81 + static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, 82 + u32 slot_idx); 670 83 671 - static int mvs_scan_finished(struct Scsi_Host *, unsigned long); 672 - static void mvs_scan_start(struct Scsi_Host *); 673 - static int mvs_slave_configure(struct scsi_device *sdev); 84 + static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) 85 + { 86 + if (task->lldd_task) { 87 + struct mvs_slot_info *slot; 88 + slot = (struct mvs_slot_info *) task->lldd_task; 89 + *tag = slot - mvi->slot_info; 90 + return 1; 91 + } 92 + return 0; 93 + } 674 94 675 - static struct scsi_transport_template *mvs_stt; 95 + static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) 96 + { 97 + void *bitmap = (void *) &mvi->tags; 98 + clear_bit(tag, bitmap); 99 + } 676 100 677 - static const struct mvs_chip_info mvs_chips[] = { 678 - [chip_6320] = { 2, 16, 9 }, 679 - [chip_6440] = { 4, 16, 9 }, 680 - [chip_6480] = { 8, 32, 10 }, 681 - }; 101 + static void mvs_tag_free(struct mvs_info *mvi, u32 tag) 102 + { 103 + mvs_tag_clear(mvi, tag); 104 + } 682 105 683 - static struct scsi_host_template mvs_sht = { 684 - .module = THIS_MODULE, 685 - .name = DRV_NAME, 686 - .queuecommand = sas_queuecommand, 687 - .target_alloc = sas_target_alloc, 688 - .slave_configure = mvs_slave_configure, 689 - .slave_destroy = sas_slave_destroy, 690 - .scan_finished = mvs_scan_finished, 691 - .scan_start = mvs_scan_start, 692 - .change_queue_depth = sas_change_queue_depth, 693 - .change_queue_type = sas_change_queue_type, 694 - .bios_param = sas_bios_param, 695 - .can_queue = 1, 696 - .cmd_per_lun = 1, 697 - .this_id = -1, 698 - .sg_tablesize = SG_ALL, 699 - .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 700 - .use_clustering = ENABLE_CLUSTERING, 701 - .eh_device_reset_handler = sas_eh_device_reset_handler, 702 - .eh_bus_reset_handler = sas_eh_bus_reset_handler, 703 - .slave_alloc = sas_slave_alloc, 704 - .target_destroy = sas_target_destroy, 705 - .ioctl = sas_ioctl, 706 - }; 106 + static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) 107 + { 108 + void *bitmap = (void *) &mvi->tags; 109 + set_bit(tag, bitmap); 110 + } 111 + 112 + static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) 113 + { 114 + unsigned int index, tag; 115 + void *bitmap = (void *) &mvi->tags; 116 + 117 + index = find_first_zero_bit(bitmap, MVS_SLOTS); 118 + tag = index; 119 + if (tag >= MVS_SLOTS) 120 + return -SAS_QUEUE_FULL; 121 + mvs_tag_set(mvi, tag); 122 + *tag_out = tag; 123 + return 0; 124 + } 125 + 126 + void mvs_tag_init(struct mvs_info *mvi) 127 + { 128 + int i; 129 + for (i = 0; i < MVS_SLOTS; ++i) 130 + mvs_tag_clear(mvi, i); 131 + } 707 132 708 133 static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) 709 134 { ··· 223 848 #endif 224 849 } 225 850 226 - static void mvs_hba_interrupt_enable(struct mvs_info *mvi) 851 + /* FIXME: locking? */ 852 + int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata) 227 853 { 228 - void __iomem *regs = mvi->regs; 854 + struct mvs_info *mvi = sas_phy->ha->lldd_ha; 855 + int rc = 0, phy_id = sas_phy->id; 229 856 u32 tmp; 230 857 231 - tmp = mr32(GBL_CTL); 858 + tmp = mvs_read_phy_ctl(mvi, phy_id); 232 859 233 - mw32(GBL_CTL, tmp | INT_EN); 234 - } 860 + switch (func) { 861 + case PHY_FUNC_SET_LINK_RATE:{ 862 + struct sas_phy_linkrates *rates = funcdata; 863 + u32 lrmin = 0, lrmax = 0; 235 864 236 - static void mvs_hba_interrupt_disable(struct mvs_info *mvi) 237 - { 238 - void __iomem *regs = mvi->regs; 239 - u32 tmp; 865 + lrmin = (rates->minimum_linkrate << 8); 866 + lrmax = (rates->maximum_linkrate << 12); 240 867 241 - tmp = mr32(GBL_CTL); 242 - 243 - mw32(GBL_CTL, tmp & ~INT_EN); 244 - } 245 - 246 - static int mvs_int_rx(struct mvs_info *mvi, bool self_clear); 247 - 248 - /* move to PCI layer or libata core? */ 249 - static int pci_go_64(struct pci_dev *pdev) 250 - { 251 - int rc; 252 - 253 - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 254 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 255 - if (rc) { 256 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 257 - if (rc) { 258 - dev_printk(KERN_ERR, &pdev->dev, 259 - "64-bit DMA enable failed\n"); 260 - return rc; 868 + if (lrmin) { 869 + tmp &= ~(0xf << 8); 870 + tmp |= lrmin; 261 871 } 872 + if (lrmax) { 873 + tmp &= ~(0xf << 12); 874 + tmp |= lrmax; 875 + } 876 + mvs_write_phy_ctl(mvi, phy_id, tmp); 877 + break; 262 878 } 263 - } else { 264 - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 265 - if (rc) { 266 - dev_printk(KERN_ERR, &pdev->dev, 267 - "32-bit DMA enable failed\n"); 268 - return rc; 269 - } 270 - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 271 - if (rc) { 272 - dev_printk(KERN_ERR, &pdev->dev, 273 - "32-bit consistent DMA enable failed\n"); 274 - return rc; 275 - } 879 + 880 + case PHY_FUNC_HARD_RESET: 881 + if (tmp & PHY_RST_HARD) 882 + break; 883 + mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD); 884 + break; 885 + 886 + case PHY_FUNC_LINK_RESET: 887 + mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST); 888 + break; 889 + 890 + case PHY_FUNC_DISABLE: 891 + case PHY_FUNC_RELEASE_SPINUP_HOLD: 892 + default: 893 + rc = -EOPNOTSUPP; 276 894 } 277 895 278 896 return rc; 279 - } 280 - 281 - static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) 282 - { 283 - if (task->lldd_task) { 284 - struct mvs_slot_info *slot; 285 - slot = (struct mvs_slot_info *) task->lldd_task; 286 - *tag = slot - mvi->slot_info; 287 - return 1; 288 - } 289 - return 0; 290 - } 291 - 292 - static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) 293 - { 294 - void *bitmap = (void *) &mvi->tags; 295 - clear_bit(tag, bitmap); 296 - } 297 - 298 - static void mvs_tag_free(struct mvs_info *mvi, u32 tag) 299 - { 300 - mvs_tag_clear(mvi, tag); 301 - } 302 - 303 - static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) 304 - { 305 - void *bitmap = (void *) &mvi->tags; 306 - set_bit(tag, bitmap); 307 - } 308 - 309 - static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) 310 - { 311 - unsigned int index, tag; 312 - void *bitmap = (void *) &mvi->tags; 313 - 314 - index = find_first_zero_bit(bitmap, MVS_SLOTS); 315 - tag = index; 316 - if (tag >= MVS_SLOTS) 317 - return -SAS_QUEUE_FULL; 318 - mvs_tag_set(mvi, tag); 319 - *tag_out = tag; 320 - return 0; 321 - } 322 - 323 - static void mvs_tag_init(struct mvs_info *mvi) 324 - { 325 - int i; 326 - for (i = 0; i < MVS_SLOTS; ++i) 327 - mvs_tag_clear(mvi, i); 328 - } 329 - 330 - #ifndef MVS_DISABLE_NVRAM 331 - static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data) 332 - { 333 - int timeout = 1000; 334 - 335 - if (addr & ~SPI_ADDR_MASK) 336 - return -EINVAL; 337 - 338 - writel(addr, regs + SPI_CMD); 339 - writel(TWSI_RD, regs + SPI_CTL); 340 - 341 - while (timeout-- > 0) { 342 - if (readl(regs + SPI_CTL) & TWSI_RDY) { 343 - *data = readl(regs + SPI_DATA); 344 - return 0; 345 - } 346 - 347 - udelay(10); 348 - } 349 - 350 - return -EBUSY; 351 - } 352 - 353 - static int mvs_eep_read_buf(void __iomem *regs, u32 addr, 354 - void *buf, u32 buflen) 355 - { 356 - u32 addr_end, tmp_addr, i, j; 357 - u32 tmp = 0; 358 - int rc; 359 - u8 *tmp8, *buf8 = buf; 360 - 361 - addr_end = addr + buflen; 362 - tmp_addr = ALIGN(addr, 4); 363 - if (addr > 0xff) 364 - return -EINVAL; 365 - 366 - j = addr & 0x3; 367 - if (j) { 368 - rc = mvs_eep_read(regs, tmp_addr, &tmp); 369 - if (rc) 370 - return rc; 371 - 372 - tmp8 = (u8 *)&tmp; 373 - for (i = j; i < 4; i++) 374 - *buf8++ = tmp8[i]; 375 - 376 - tmp_addr += 4; 377 - } 378 - 379 - for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) { 380 - rc = mvs_eep_read(regs, tmp_addr, &tmp); 381 - if (rc) 382 - return rc; 383 - 384 - memcpy(buf8, &tmp, 4); 385 - buf8 += 4; 386 - } 387 - 388 - if (tmp_addr < addr_end) { 389 - rc = mvs_eep_read(regs, tmp_addr, &tmp); 390 - if (rc) 391 - return rc; 392 - 393 - tmp8 = (u8 *)&tmp; 394 - j = addr_end - tmp_addr; 395 - for (i = 0; i < j; i++) 396 - *buf8++ = tmp8[i]; 397 - 398 - tmp_addr += 4; 399 - } 400 - 401 - return 0; 402 - } 403 - #endif 404 - 405 - static int mvs_nvram_read(struct mvs_info *mvi, u32 addr, 406 - void *buf, u32 buflen) 407 - { 408 - #ifndef MVS_DISABLE_NVRAM 409 - void __iomem *regs = mvi->regs; 410 - int rc, i; 411 - u32 sum; 412 - u8 hdr[2], *tmp; 413 - const char *msg; 414 - 415 - rc = mvs_eep_read_buf(regs, addr, &hdr, 2); 416 - if (rc) { 417 - msg = "nvram hdr read failed"; 418 - goto err_out; 419 - } 420 - rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen); 421 - if (rc) { 422 - msg = "nvram read failed"; 423 - goto err_out; 424 - } 425 - 426 - if (hdr[0] != 0x5A) { 427 - /* entry id */ 428 - msg = "invalid nvram entry id"; 429 - rc = -ENOENT; 430 - goto err_out; 431 - } 432 - 433 - tmp = buf; 434 - sum = ((u32)hdr[0]) + ((u32)hdr[1]); 435 - for (i = 0; i < buflen; i++) 436 - sum += ((u32)tmp[i]); 437 - 438 - if (sum) { 439 - msg = "nvram checksum failure"; 440 - rc = -EILSEQ; 441 - goto err_out; 442 - } 443 - 444 - return 0; 445 - 446 - err_out: 447 - dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg); 448 - return rc; 449 - #else 450 - /* FIXME , For SAS target mode */ 451 - memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8); 452 - return 0; 453 - #endif 454 897 } 455 898 456 899 static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) ··· 304 1111 PORTE_BYTES_DMAED); 305 1112 } 306 1113 307 - static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) 308 - { 309 - /* give the phy enabling interrupt event time to come in (1s 310 - * is empirically about all it takes) */ 311 - if (time < HZ) 312 - return 0; 313 - /* Wait for discovery to finish */ 314 - scsi_flush_work(shost); 315 - return 1; 316 - } 317 - 318 - static void mvs_scan_start(struct Scsi_Host *shost) 319 - { 320 - int i; 321 - struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha; 322 - 323 - for (i = 0; i < mvi->chip->n_phy; ++i) { 324 - mvs_bytes_dmaed(mvi, i); 325 - } 326 - } 327 - 328 - static int mvs_slave_configure(struct scsi_device *sdev) 1114 + int mvs_slave_configure(struct scsi_device *sdev) 329 1115 { 330 1116 struct domain_device *dev = sdev_to_domain_dev(sdev); 331 1117 int ret = sas_slave_configure(sdev); ··· 323 1151 return 0; 324 1152 } 325 1153 326 - static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) 1154 + void mvs_scan_start(struct Scsi_Host *shost) 327 1155 { 328 - struct pci_dev *pdev = mvi->pdev; 329 - struct sas_ha_struct *sas_ha = &mvi->sas; 330 - struct mvs_phy *phy = &mvi->phy[phy_no]; 331 - struct asd_sas_phy *sas_phy = &phy->sas_phy; 332 - 333 - phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no); 334 - /* 335 - * events is port event now , 336 - * we need check the interrupt status which belongs to per port. 337 - */ 338 - dev_printk(KERN_DEBUG, &pdev->dev, 339 - "Port %d Event = %X\n", 340 - phy_no, phy->irq_status); 341 - 342 - if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { 343 - mvs_release_task(mvi, phy_no); 344 - if (!mvs_is_phy_ready(mvi, phy_no)) { 345 - sas_phy_disconnected(sas_phy); 346 - sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); 347 - dev_printk(KERN_INFO, &pdev->dev, 348 - "Port %d Unplug Notice\n", phy_no); 349 - 350 - } else 351 - mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); 352 - } 353 - if (!(phy->irq_status & PHYEV_DEC_ERR)) { 354 - if (phy->irq_status & PHYEV_COMWAKE) { 355 - u32 tmp = mvs_read_port_irq_mask(mvi, phy_no); 356 - mvs_write_port_irq_mask(mvi, phy_no, 357 - tmp | PHYEV_SIG_FIS); 358 - } 359 - if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { 360 - phy->phy_status = mvs_is_phy_ready(mvi, phy_no); 361 - if (phy->phy_status) { 362 - mvs_detect_porttype(mvi, phy_no); 363 - 364 - if (phy->phy_type & PORT_TYPE_SATA) { 365 - u32 tmp = mvs_read_port_irq_mask(mvi, 366 - phy_no); 367 - tmp &= ~PHYEV_SIG_FIS; 368 - mvs_write_port_irq_mask(mvi, 369 - phy_no, tmp); 370 - } 371 - 372 - mvs_update_phyinfo(mvi, phy_no, 0); 373 - sas_ha->notify_phy_event(sas_phy, 374 - PHYE_OOB_DONE); 375 - mvs_bytes_dmaed(mvi, phy_no); 376 - } else { 377 - dev_printk(KERN_DEBUG, &pdev->dev, 378 - "plugin interrupt but phy is gone\n"); 379 - mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, 380 - NULL); 381 - } 382 - } else if (phy->irq_status & PHYEV_BROAD_CH) { 383 - mvs_release_task(mvi, phy_no); 384 - sas_ha->notify_port_event(sas_phy, 385 - PORTE_BROADCAST_RCVD); 386 - } 387 - } 388 - mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status); 389 - } 390 - 391 - static void mvs_int_sata(struct mvs_info *mvi) 392 - { 393 - u32 tmp; 394 - void __iomem *regs = mvi->regs; 395 - tmp = mr32(INT_STAT_SRS); 396 - mw32(INT_STAT_SRS, tmp & 0xFFFF); 397 - } 398 - 399 - static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, 400 - u32 slot_idx) 401 - { 402 - void __iomem *regs = mvi->regs; 403 - struct domain_device *dev = task->dev; 404 - struct asd_sas_port *sas_port = dev->port; 405 - struct mvs_port *port = mvi->slot_info[slot_idx].port; 406 - u32 reg_set, phy_mask; 407 - 408 - if (!sas_protocol_ata(task->task_proto)) { 409 - reg_set = 0; 410 - phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : 411 - sas_port->phy_mask; 412 - } else { 413 - reg_set = port->taskfileset; 414 - phy_mask = sas_port->phy_mask; 415 - } 416 - mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx | 417 - (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) | 418 - (phy_mask << TXQ_PHY_SHIFT) | 419 - (reg_set << TXQ_SRS_SHIFT)); 420 - 421 - mw32(TX_PROD_IDX, mvi->tx_prod); 422 - mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); 423 - } 424 - 425 - static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, 426 - u32 slot_idx, int err) 427 - { 428 - struct mvs_port *port = mvi->slot_info[slot_idx].port; 429 - struct task_status_struct *tstat = &task->task_status; 430 - struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; 431 - int stat = SAM_GOOD; 432 - 433 - resp->frame_len = sizeof(struct dev_to_host_fis); 434 - memcpy(&resp->ending_fis[0], 435 - SATA_RECEIVED_D2H_FIS(port->taskfileset), 436 - sizeof(struct dev_to_host_fis)); 437 - tstat->buf_valid_size = sizeof(*resp); 438 - if (unlikely(err)) 439 - stat = SAS_PROTO_RESPONSE; 440 - return stat; 441 - } 442 - 443 - static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) 444 - { 445 - u32 slot_idx = rx_desc & RXQ_SLOT_MASK; 446 - mvs_tag_clear(mvi, slot_idx); 447 - } 448 - 449 - static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, 450 - struct mvs_slot_info *slot, u32 slot_idx) 451 - { 452 - if (!sas_protocol_ata(task->task_proto)) 453 - if (slot->n_elem) 454 - pci_unmap_sg(mvi->pdev, task->scatter, 455 - slot->n_elem, task->data_dir); 456 - 457 - switch (task->task_proto) { 458 - case SAS_PROTOCOL_SMP: 459 - pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1, 460 - PCI_DMA_FROMDEVICE); 461 - pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1, 462 - PCI_DMA_TODEVICE); 463 - break; 464 - 465 - case SAS_PROTOCOL_SATA: 466 - case SAS_PROTOCOL_STP: 467 - case SAS_PROTOCOL_SSP: 468 - default: 469 - /* do nothing */ 470 - break; 471 - } 472 - list_del(&slot->list); 473 - task->lldd_task = NULL; 474 - slot->task = NULL; 475 - slot->port = NULL; 476 - } 477 - 478 - static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, 479 - u32 slot_idx) 480 - { 481 - struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 482 - u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); 483 - u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4)); 484 - int stat = SAM_CHECK_COND; 485 - 486 - if (err_dw1 & SLOT_BSY_ERR) { 487 - stat = SAS_QUEUE_FULL; 488 - mvs_slot_reset(mvi, task, slot_idx); 489 - } 490 - switch (task->task_proto) { 491 - case SAS_PROTOCOL_SSP: 492 - break; 493 - case SAS_PROTOCOL_SMP: 494 - break; 495 - case SAS_PROTOCOL_SATA: 496 - case SAS_PROTOCOL_STP: 497 - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 498 - if (err_dw0 & TFILE_ERR) 499 - stat = mvs_sata_done(mvi, task, slot_idx, 1); 500 - break; 501 - default: 502 - break; 503 - } 504 - 505 - mvs_hexdump(16, (u8 *) slot->response, 0); 506 - return stat; 507 - } 508 - 509 - static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) 510 - { 511 - u32 slot_idx = rx_desc & RXQ_SLOT_MASK; 512 - struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 513 - struct sas_task *task = slot->task; 514 - struct task_status_struct *tstat; 515 - struct mvs_port *port; 516 - bool aborted; 517 - void *to; 518 - 519 - if (unlikely(!task || !task->lldd_task)) 520 - return -1; 521 - 522 - mvs_hba_cq_dump(mvi); 523 - 524 - spin_lock(&task->task_state_lock); 525 - aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; 526 - if (!aborted) { 527 - task->task_state_flags &= 528 - ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 529 - task->task_state_flags |= SAS_TASK_STATE_DONE; 530 - } 531 - spin_unlock(&task->task_state_lock); 532 - 533 - if (aborted) { 534 - mvs_slot_task_free(mvi, task, slot, slot_idx); 535 - mvs_slot_free(mvi, rx_desc); 536 - return -1; 537 - } 538 - 539 - port = slot->port; 540 - tstat = &task->task_status; 541 - memset(tstat, 0, sizeof(*tstat)); 542 - tstat->resp = SAS_TASK_COMPLETE; 543 - 544 - if (unlikely(!port->port_attached || flags)) { 545 - mvs_slot_err(mvi, task, slot_idx); 546 - if (!sas_protocol_ata(task->task_proto)) 547 - tstat->stat = SAS_PHY_DOWN; 548 - goto out; 549 - } 550 - 551 - /* error info record present */ 552 - if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { 553 - tstat->stat = mvs_slot_err(mvi, task, slot_idx); 554 - goto out; 555 - } 556 - 557 - switch (task->task_proto) { 558 - case SAS_PROTOCOL_SSP: 559 - /* hw says status == 0, datapres == 0 */ 560 - if (rx_desc & RXQ_GOOD) { 561 - tstat->stat = SAM_GOOD; 562 - tstat->resp = SAS_TASK_COMPLETE; 563 - } 564 - /* response frame present */ 565 - else if (rx_desc & RXQ_RSP) { 566 - struct ssp_response_iu *iu = 567 - slot->response + sizeof(struct mvs_err_info); 568 - sas_ssp_task_response(&mvi->pdev->dev, task, iu); 569 - } 570 - 571 - /* should never happen? */ 572 - else 573 - tstat->stat = SAM_CHECK_COND; 574 - break; 575 - 576 - case SAS_PROTOCOL_SMP: { 577 - struct scatterlist *sg_resp = &task->smp_task.smp_resp; 578 - tstat->stat = SAM_GOOD; 579 - to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); 580 - memcpy(to + sg_resp->offset, 581 - slot->response + sizeof(struct mvs_err_info), 582 - sg_dma_len(sg_resp)); 583 - kunmap_atomic(to, KM_IRQ0); 584 - break; 585 - } 586 - 587 - case SAS_PROTOCOL_SATA: 588 - case SAS_PROTOCOL_STP: 589 - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { 590 - tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); 591 - break; 592 - } 593 - 594 - default: 595 - tstat->stat = SAM_CHECK_COND; 596 - break; 597 - } 598 - 599 - out: 600 - mvs_slot_task_free(mvi, task, slot, slot_idx); 601 - if (unlikely(tstat->stat != SAS_QUEUE_FULL)) 602 - mvs_slot_free(mvi, rx_desc); 603 - 604 - spin_unlock(&mvi->lock); 605 - task->task_done(task); 606 - spin_lock(&mvi->lock); 607 - return tstat->stat; 608 - } 609 - 610 - static void mvs_release_task(struct mvs_info *mvi, int phy_no) 611 - { 612 - struct list_head *pos, *n; 613 - struct mvs_slot_info *slot; 614 - struct mvs_phy *phy = &mvi->phy[phy_no]; 615 - struct mvs_port *port = phy->port; 616 - u32 rx_desc; 617 - 618 - if (!port) 619 - return; 620 - 621 - list_for_each_safe(pos, n, &port->list) { 622 - slot = container_of(pos, struct mvs_slot_info, list); 623 - rx_desc = (u32) (slot - mvi->slot_info); 624 - mvs_slot_complete(mvi, rx_desc, 1); 625 - } 626 - } 627 - 628 - static void mvs_int_full(struct mvs_info *mvi) 629 - { 630 - void __iomem *regs = mvi->regs; 631 - u32 tmp, stat; 632 1156 int i; 1157 + struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha; 633 1158 634 - stat = mr32(INT_STAT); 635 - 636 - mvs_int_rx(mvi, false); 637 - 638 - for (i = 0; i < MVS_MAX_PORTS; i++) { 639 - tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); 640 - if (tmp) 641 - mvs_int_port(mvi, i, tmp); 1159 + for (i = 0; i < mvi->chip->n_phy; ++i) { 1160 + mvs_bytes_dmaed(mvi, i); 642 1161 } 643 - 644 - if (stat & CINT_SRS) 645 - mvs_int_sata(mvi); 646 - 647 - mw32(INT_STAT, stat); 648 1162 } 649 1163 650 - static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) 1164 + int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) 651 1165 { 652 - void __iomem *regs = mvi->regs; 653 - u32 rx_prod_idx, rx_desc; 654 - bool attn = false; 655 - struct pci_dev *pdev = mvi->pdev; 656 - 657 - /* the first dword in the RX ring is special: it contains 658 - * a mirror of the hardware's RX producer index, so that 659 - * we don't have to stall the CPU reading that register. 660 - * The actual RX ring is offset by one dword, due to this. 661 - */ 662 - rx_prod_idx = mvi->rx_cons; 663 - mvi->rx_cons = le32_to_cpu(mvi->rx[0]); 664 - if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ 1166 + /* give the phy enabling interrupt event time to come in (1s 1167 + * is empirically about all it takes) */ 1168 + if (time < HZ) 665 1169 return 0; 666 - 667 - /* The CMPL_Q may come late, read from register and try again 668 - * note: if coalescing is enabled, 669 - * it will need to read from register every time for sure 670 - */ 671 - if (mvi->rx_cons == rx_prod_idx) 672 - mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; 673 - 674 - if (mvi->rx_cons == rx_prod_idx) 675 - return 0; 676 - 677 - while (mvi->rx_cons != rx_prod_idx) { 678 - 679 - /* increment our internal RX consumer pointer */ 680 - rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); 681 - 682 - rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); 683 - 684 - if (likely(rx_desc & RXQ_DONE)) 685 - mvs_slot_complete(mvi, rx_desc, 0); 686 - if (rx_desc & RXQ_ATTN) { 687 - attn = true; 688 - dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", 689 - rx_desc); 690 - } else if (rx_desc & RXQ_ERR) { 691 - if (!(rx_desc & RXQ_DONE)) 692 - mvs_slot_complete(mvi, rx_desc, 0); 693 - dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", 694 - rx_desc); 695 - } else if (rx_desc & RXQ_SLOT_RESET) { 696 - dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n", 697 - rx_desc); 698 - mvs_slot_free(mvi, rx_desc); 699 - } 700 - } 701 - 702 - if (attn && self_clear) 703 - mvs_int_full(mvi); 704 - 705 - return 0; 1170 + /* Wait for discovery to finish */ 1171 + scsi_flush_work(shost); 1172 + return 1; 706 1173 } 707 - 708 - #ifdef MVS_USE_TASKLET 709 - static void mvs_tasklet(unsigned long data) 710 - { 711 - struct mvs_info *mvi = (struct mvs_info *) data; 712 - unsigned long flags; 713 - 714 - spin_lock_irqsave(&mvi->lock, flags); 715 - 716 - #ifdef MVS_DISABLE_MSI 717 - mvs_int_full(mvi); 718 - #else 719 - mvs_int_rx(mvi, true); 720 - #endif 721 - spin_unlock_irqrestore(&mvi->lock, flags); 722 - } 723 - #endif 724 - 725 - static irqreturn_t mvs_interrupt(int irq, void *opaque) 726 - { 727 - struct mvs_info *mvi = opaque; 728 - void __iomem *regs = mvi->regs; 729 - u32 stat; 730 - 731 - stat = mr32(GBL_INT_STAT); 732 - 733 - if (stat == 0 || stat == 0xffffffff) 734 - return IRQ_NONE; 735 - 736 - /* clear CMD_CMPLT ASAP */ 737 - mw32_f(INT_STAT, CINT_DONE); 738 - 739 - #ifndef MVS_USE_TASKLET 740 - spin_lock(&mvi->lock); 741 - 742 - mvs_int_full(mvi); 743 - 744 - spin_unlock(&mvi->lock); 745 - #else 746 - tasklet_schedule(&mvi->tasklet); 747 - #endif 748 - return IRQ_HANDLED; 749 - } 750 - 751 - #ifndef MVS_DISABLE_MSI 752 - static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) 753 - { 754 - struct mvs_info *mvi = opaque; 755 - 756 - #ifndef MVS_USE_TASKLET 757 - spin_lock(&mvi->lock); 758 - 759 - mvs_int_rx(mvi, true); 760 - 761 - spin_unlock(&mvi->lock); 762 - #else 763 - tasklet_schedule(&mvi->tasklet); 764 - #endif 765 - return IRQ_HANDLED; 766 - } 767 - #endif 768 - 769 - struct mvs_task_exec_info { 770 - struct sas_task *task; 771 - struct mvs_cmd_hdr *hdr; 772 - struct mvs_port *port; 773 - u32 tag; 774 - int n_elem; 775 - }; 776 1174 777 1175 static int mvs_task_prep_smp(struct mvs_info *mvi, 778 1176 struct mvs_task_exec_info *tei) ··· 468 1726 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1, 469 1727 PCI_DMA_TODEVICE); 470 1728 return rc; 471 - } 472 - 473 - static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port) 474 - { 475 - void __iomem *regs = mvi->regs; 476 - u32 tmp, offs; 477 - u8 *tfs = &port->taskfileset; 478 - 479 - if (*tfs == MVS_ID_NOT_MAPPED) 480 - return; 481 - 482 - offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); 483 - if (*tfs < 16) { 484 - tmp = mr32(PCS); 485 - mw32(PCS, tmp & ~offs); 486 - } else { 487 - tmp = mr32(CTL); 488 - mw32(CTL, tmp & ~offs); 489 - } 490 - 491 - tmp = mr32(INT_STAT_SRS) & (1U << *tfs); 492 - if (tmp) 493 - mw32(INT_STAT_SRS, tmp); 494 - 495 - *tfs = MVS_ID_NOT_MAPPED; 496 - } 497 - 498 - static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port) 499 - { 500 - int i; 501 - u32 tmp, offs; 502 - void __iomem *regs = mvi->regs; 503 - 504 - if (port->taskfileset != MVS_ID_NOT_MAPPED) 505 - return 0; 506 - 507 - tmp = mr32(PCS); 508 - 509 - for (i = 0; i < mvi->chip->srs_sz; i++) { 510 - if (i == 16) 511 - tmp = mr32(CTL); 512 - offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); 513 - if (!(tmp & offs)) { 514 - port->taskfileset = i; 515 - 516 - if (i < 16) 517 - mw32(PCS, tmp | offs); 518 - else 519 - mw32(CTL, tmp | offs); 520 - tmp = mr32(INT_STAT_SRS) & (1U << i); 521 - if (tmp) 522 - mw32(INT_STAT_SRS, tmp); 523 - return 0; 524 - } 525 - } 526 - return MVS_ID_NOT_MAPPED; 527 1729 } 528 1730 529 1731 static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) ··· 723 2037 return 0; 724 2038 } 725 2039 726 - static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) 2040 + int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) 727 2041 { 728 2042 struct domain_device *dev = task->dev; 729 2043 struct mvs_info *mvi = dev->port->ha->lldd_ha; ··· 843 2157 return rc; 844 2158 } 845 2159 846 - static int mvs_task_abort(struct sas_task *task) 2160 + static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) 847 2161 { 848 - int rc; 849 - unsigned long flags; 850 - struct mvs_info *mvi = task->dev->port->ha->lldd_ha; 851 - struct pci_dev *pdev = mvi->pdev; 852 - int tag; 2162 + u32 slot_idx = rx_desc & RXQ_SLOT_MASK; 2163 + mvs_tag_clear(mvi, slot_idx); 2164 + } 853 2165 854 - spin_lock_irqsave(&task->task_state_lock, flags); 855 - if (task->task_state_flags & SAS_TASK_STATE_DONE) { 856 - rc = TMF_RESP_FUNC_COMPLETE; 857 - spin_unlock_irqrestore(&task->task_state_lock, flags); 858 - goto out_done; 859 - } 860 - spin_unlock_irqrestore(&task->task_state_lock, flags); 2166 + static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, 2167 + struct mvs_slot_info *slot, u32 slot_idx) 2168 + { 2169 + if (!sas_protocol_ata(task->task_proto)) 2170 + if (slot->n_elem) 2171 + pci_unmap_sg(mvi->pdev, task->scatter, 2172 + slot->n_elem, task->data_dir); 861 2173 862 2174 switch (task->task_proto) { 863 2175 case SAS_PROTOCOL_SMP: 864 - dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n"); 2176 + pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1, 2177 + PCI_DMA_FROMDEVICE); 2178 + pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1, 2179 + PCI_DMA_TODEVICE); 865 2180 break; 866 - case SAS_PROTOCOL_SSP: 867 - dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n"); 868 - break; 2181 + 869 2182 case SAS_PROTOCOL_SATA: 870 2183 case SAS_PROTOCOL_STP: 871 - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{ 872 - dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n"); 873 - #if _MV_DUMP 874 - dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n"); 875 - mvs_hexdump(sizeof(struct host_to_dev_fis), 876 - (void *)&task->ata_task.fis, 0); 877 - dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n"); 878 - mvs_hexdump(16, task->ata_task.atapi_packet, 0); 879 - #endif 880 - spin_lock_irqsave(&task->task_state_lock, flags); 881 - if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { 882 - /* TODO */ 883 - ; 884 - } 885 - spin_unlock_irqrestore(&task->task_state_lock, flags); 886 - break; 887 - } 2184 + case SAS_PROTOCOL_SSP: 888 2185 default: 2186 + /* do nothing */ 889 2187 break; 890 2188 } 891 - 892 - if (mvs_find_tag(mvi, task, &tag)) { 893 - spin_lock_irqsave(&mvi->lock, flags); 894 - mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag); 895 - spin_unlock_irqrestore(&mvi->lock, flags); 896 - } 897 - if (!mvs_task_exec(task, 1, GFP_ATOMIC)) 898 - rc = TMF_RESP_FUNC_COMPLETE; 899 - else 900 - rc = TMF_RESP_FUNC_FAILED; 901 - out_done: 902 - return rc; 903 - } 904 - 905 - static void mvs_free(struct mvs_info *mvi) 906 - { 907 - int i; 908 - 909 - if (!mvi) 910 - return; 911 - 912 - for (i = 0; i < MVS_SLOTS; i++) { 913 - struct mvs_slot_info *slot = &mvi->slot_info[i]; 914 - 915 - if (slot->buf) 916 - dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ, 917 - slot->buf, slot->buf_dma); 918 - } 919 - 920 - if (mvi->tx) 921 - dma_free_coherent(&mvi->pdev->dev, 922 - sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, 923 - mvi->tx, mvi->tx_dma); 924 - if (mvi->rx_fis) 925 - dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ, 926 - mvi->rx_fis, mvi->rx_fis_dma); 927 - if (mvi->rx) 928 - dma_free_coherent(&mvi->pdev->dev, 929 - sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), 930 - mvi->rx, mvi->rx_dma); 931 - if (mvi->slot) 932 - dma_free_coherent(&mvi->pdev->dev, 933 - sizeof(*mvi->slot) * MVS_SLOTS, 934 - mvi->slot, mvi->slot_dma); 935 - #ifdef MVS_ENABLE_PERI 936 - if (mvi->peri_regs) 937 - iounmap(mvi->peri_regs); 938 - #endif 939 - if (mvi->regs) 940 - iounmap(mvi->regs); 941 - if (mvi->shost) 942 - scsi_host_put(mvi->shost); 943 - kfree(mvi->sas.sas_port); 944 - kfree(mvi->sas.sas_phy); 945 - kfree(mvi); 946 - } 947 - 948 - /* FIXME: locking? */ 949 - static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 950 - void *funcdata) 951 - { 952 - struct mvs_info *mvi = sas_phy->ha->lldd_ha; 953 - int rc = 0, phy_id = sas_phy->id; 954 - u32 tmp; 955 - 956 - tmp = mvs_read_phy_ctl(mvi, phy_id); 957 - 958 - switch (func) { 959 - case PHY_FUNC_SET_LINK_RATE:{ 960 - struct sas_phy_linkrates *rates = funcdata; 961 - u32 lrmin = 0, lrmax = 0; 962 - 963 - lrmin = (rates->minimum_linkrate << 8); 964 - lrmax = (rates->maximum_linkrate << 12); 965 - 966 - if (lrmin) { 967 - tmp &= ~(0xf << 8); 968 - tmp |= lrmin; 969 - } 970 - if (lrmax) { 971 - tmp &= ~(0xf << 12); 972 - tmp |= lrmax; 973 - } 974 - mvs_write_phy_ctl(mvi, phy_id, tmp); 975 - break; 976 - } 977 - 978 - case PHY_FUNC_HARD_RESET: 979 - if (tmp & PHY_RST_HARD) 980 - break; 981 - mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD); 982 - break; 983 - 984 - case PHY_FUNC_LINK_RESET: 985 - mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST); 986 - break; 987 - 988 - case PHY_FUNC_DISABLE: 989 - case PHY_FUNC_RELEASE_SPINUP_HOLD: 990 - default: 991 - rc = -EOPNOTSUPP; 992 - } 993 - 994 - return rc; 995 - } 996 - 997 - static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) 998 - { 999 - struct mvs_phy *phy = &mvi->phy[phy_id]; 1000 - struct asd_sas_phy *sas_phy = &phy->sas_phy; 1001 - 1002 - sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; 1003 - sas_phy->class = SAS; 1004 - sas_phy->iproto = SAS_PROTOCOL_ALL; 1005 - sas_phy->tproto = 0; 1006 - sas_phy->type = PHY_TYPE_PHYSICAL; 1007 - sas_phy->role = PHY_ROLE_INITIATOR; 1008 - sas_phy->oob_mode = OOB_NOT_CONNECTED; 1009 - sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 1010 - 1011 - sas_phy->id = phy_id; 1012 - sas_phy->sas_addr = &mvi->sas_addr[0]; 1013 - sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 1014 - sas_phy->ha = &mvi->sas; 1015 - sas_phy->lldd_phy = phy; 1016 - } 1017 - 1018 - static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, 1019 - const struct pci_device_id *ent) 1020 - { 1021 - struct mvs_info *mvi; 1022 - unsigned long res_start, res_len, res_flag; 1023 - struct asd_sas_phy **arr_phy; 1024 - struct asd_sas_port **arr_port; 1025 - const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data]; 1026 - int i; 1027 - 1028 - /* 1029 - * alloc and init our per-HBA mvs_info struct 1030 - */ 1031 - 1032 - mvi = kzalloc(sizeof(*mvi), GFP_KERNEL); 1033 - if (!mvi) 1034 - return NULL; 1035 - 1036 - spin_lock_init(&mvi->lock); 1037 - #ifdef MVS_USE_TASKLET 1038 - tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi); 1039 - #endif 1040 - mvi->pdev = pdev; 1041 - mvi->chip = chip; 1042 - 1043 - if (pdev->device == 0x6440 && pdev->revision == 0) 1044 - mvi->flags |= MVF_PHY_PWR_FIX; 1045 - 1046 - /* 1047 - * alloc and init SCSI, SAS glue 1048 - */ 1049 - 1050 - mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); 1051 - if (!mvi->shost) 1052 - goto err_out; 1053 - 1054 - arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); 1055 - arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); 1056 - if (!arr_phy || !arr_port) 1057 - goto err_out; 1058 - 1059 - for (i = 0; i < MVS_MAX_PHYS; i++) { 1060 - mvs_phy_init(mvi, i); 1061 - arr_phy[i] = &mvi->phy[i].sas_phy; 1062 - arr_port[i] = &mvi->port[i].sas_port; 1063 - mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED; 1064 - mvi->port[i].wide_port_phymap = 0; 1065 - mvi->port[i].port_attached = 0; 1066 - INIT_LIST_HEAD(&mvi->port[i].list); 1067 - } 1068 - 1069 - SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; 1070 - mvi->shost->transportt = mvs_stt; 1071 - mvi->shost->max_id = 21; 1072 - mvi->shost->max_lun = ~0; 1073 - mvi->shost->max_channel = 0; 1074 - mvi->shost->max_cmd_len = 16; 1075 - 1076 - mvi->sas.sas_ha_name = DRV_NAME; 1077 - mvi->sas.dev = &pdev->dev; 1078 - mvi->sas.lldd_module = THIS_MODULE; 1079 - mvi->sas.sas_addr = &mvi->sas_addr[0]; 1080 - mvi->sas.sas_phy = arr_phy; 1081 - mvi->sas.sas_port = arr_port; 1082 - mvi->sas.num_phys = chip->n_phy; 1083 - mvi->sas.lldd_max_execute_num = 1; 1084 - mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE; 1085 - mvi->shost->can_queue = MVS_CAN_QUEUE; 1086 - mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys; 1087 - mvi->sas.lldd_ha = mvi; 1088 - mvi->sas.core.shost = mvi->shost; 1089 - 1090 - mvs_tag_init(mvi); 1091 - 1092 - /* 1093 - * ioremap main and peripheral registers 1094 - */ 1095 - 1096 - #ifdef MVS_ENABLE_PERI 1097 - res_start = pci_resource_start(pdev, 2); 1098 - res_len = pci_resource_len(pdev, 2); 1099 - if (!res_start || !res_len) 1100 - goto err_out; 1101 - 1102 - mvi->peri_regs = ioremap_nocache(res_start, res_len); 1103 - if (!mvi->peri_regs) 1104 - goto err_out; 1105 - #endif 1106 - 1107 - res_start = pci_resource_start(pdev, 4); 1108 - res_len = pci_resource_len(pdev, 4); 1109 - if (!res_start || !res_len) 1110 - goto err_out; 1111 - 1112 - res_flag = pci_resource_flags(pdev, 4); 1113 - if (res_flag & IORESOURCE_CACHEABLE) 1114 - mvi->regs = ioremap(res_start, res_len); 1115 - else 1116 - mvi->regs = ioremap_nocache(res_start, res_len); 1117 - 1118 - if (!mvi->regs) 1119 - goto err_out; 1120 - 1121 - /* 1122 - * alloc and init our DMA areas 1123 - */ 1124 - 1125 - mvi->tx = dma_alloc_coherent(&pdev->dev, 1126 - sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, 1127 - &mvi->tx_dma, GFP_KERNEL); 1128 - if (!mvi->tx) 1129 - goto err_out; 1130 - memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); 1131 - 1132 - mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ, 1133 - &mvi->rx_fis_dma, GFP_KERNEL); 1134 - if (!mvi->rx_fis) 1135 - goto err_out; 1136 - memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); 1137 - 1138 - mvi->rx = dma_alloc_coherent(&pdev->dev, 1139 - sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), 1140 - &mvi->rx_dma, GFP_KERNEL); 1141 - if (!mvi->rx) 1142 - goto err_out; 1143 - memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); 1144 - 1145 - mvi->rx[0] = cpu_to_le32(0xfff); 1146 - mvi->rx_cons = 0xfff; 1147 - 1148 - mvi->slot = dma_alloc_coherent(&pdev->dev, 1149 - sizeof(*mvi->slot) * MVS_SLOTS, 1150 - &mvi->slot_dma, GFP_KERNEL); 1151 - if (!mvi->slot) 1152 - goto err_out; 1153 - memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS); 1154 - 1155 - for (i = 0; i < MVS_SLOTS; i++) { 1156 - struct mvs_slot_info *slot = &mvi->slot_info[i]; 1157 - 1158 - slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ, 1159 - &slot->buf_dma, GFP_KERNEL); 1160 - if (!slot->buf) 1161 - goto err_out; 1162 - memset(slot->buf, 0, MVS_SLOT_BUF_SZ); 1163 - } 1164 - 1165 - /* finally, read NVRAM to get our SAS address */ 1166 - if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8)) 1167 - goto err_out; 1168 - return mvi; 1169 - 1170 - err_out: 1171 - mvs_free(mvi); 1172 - return NULL; 1173 - } 1174 - 1175 - static u32 mvs_cr32(void __iomem *regs, u32 addr) 1176 - { 1177 - mw32(CMD_ADDR, addr); 1178 - return mr32(CMD_DATA); 1179 - } 1180 - 1181 - static void mvs_cw32(void __iomem *regs, u32 addr, u32 val) 1182 - { 1183 - mw32(CMD_ADDR, addr); 1184 - mw32(CMD_DATA, val); 1185 - } 1186 - 1187 - static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) 1188 - { 1189 - void __iomem *regs = mvi->regs; 1190 - return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4): 1191 - mr32(P4_SER_CTLSTAT + (port - 4) * 4); 1192 - } 1193 - 1194 - static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) 1195 - { 1196 - void __iomem *regs = mvi->regs; 1197 - if (port < 4) 1198 - mw32(P0_SER_CTLSTAT + port * 4, val); 1199 - else 1200 - mw32(P4_SER_CTLSTAT + (port - 4) * 4, val); 1201 - } 1202 - 1203 - static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port) 1204 - { 1205 - void __iomem *regs = mvi->regs + off; 1206 - void __iomem *regs2 = mvi->regs + off2; 1207 - return (port < 4)?readl(regs + port * 8): 1208 - readl(regs2 + (port - 4) * 8); 1209 - } 1210 - 1211 - static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2, 1212 - u32 port, u32 val) 1213 - { 1214 - void __iomem *regs = mvi->regs + off; 1215 - void __iomem *regs2 = mvi->regs + off2; 1216 - if (port < 4) 1217 - writel(val, regs + port * 8); 1218 - else 1219 - writel(val, regs2 + (port - 4) * 8); 1220 - } 1221 - 1222 - static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port) 1223 - { 1224 - return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port); 1225 - } 1226 - 1227 - static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val) 1228 - { 1229 - mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val); 1230 - } 1231 - 1232 - static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr) 1233 - { 1234 - mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr); 1235 - } 1236 - 1237 - static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) 1238 - { 1239 - return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port); 1240 - } 1241 - 1242 - static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val) 1243 - { 1244 - mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val); 1245 - } 1246 - 1247 - static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr) 1248 - { 1249 - mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr); 1250 - } 1251 - 1252 - static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) 1253 - { 1254 - return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port); 1255 - } 1256 - 1257 - static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val) 1258 - { 1259 - mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val); 1260 - } 1261 - 1262 - static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port) 1263 - { 1264 - return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port); 1265 - } 1266 - 1267 - static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val) 1268 - { 1269 - mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val); 1270 - } 1271 - 1272 - static void __devinit mvs_phy_hacks(struct mvs_info *mvi) 1273 - { 1274 - void __iomem *regs = mvi->regs; 1275 - u32 tmp; 1276 - 1277 - /* workaround for SATA R-ERR, to ignore phy glitch */ 1278 - tmp = mvs_cr32(regs, CMD_PHY_TIMER); 1279 - tmp &= ~(1 << 9); 1280 - tmp |= (1 << 10); 1281 - mvs_cw32(regs, CMD_PHY_TIMER, tmp); 1282 - 1283 - /* enable retry 127 times */ 1284 - mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f); 1285 - 1286 - /* extend open frame timeout to max */ 1287 - tmp = mvs_cr32(regs, CMD_SAS_CTL0); 1288 - tmp &= ~0xffff; 1289 - tmp |= 0x3fff; 1290 - mvs_cw32(regs, CMD_SAS_CTL0, tmp); 1291 - 1292 - /* workaround for WDTIMEOUT , set to 550 ms */ 1293 - mvs_cw32(regs, CMD_WD_TIMER, 0x86470); 1294 - 1295 - /* not to halt for different port op during wideport link change */ 1296 - mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); 1297 - 1298 - /* workaround for Seagate disk not-found OOB sequence, recv 1299 - * COMINIT before sending out COMWAKE */ 1300 - tmp = mvs_cr32(regs, CMD_PHY_MODE_21); 1301 - tmp &= 0x0000ffff; 1302 - tmp |= 0x00fa0000; 1303 - mvs_cw32(regs, CMD_PHY_MODE_21, tmp); 1304 - 1305 - tmp = mvs_cr32(regs, CMD_PHY_TIMER); 1306 - tmp &= 0x1fffffff; 1307 - tmp |= (2U << 29); /* 8 ms retry */ 1308 - mvs_cw32(regs, CMD_PHY_TIMER, tmp); 1309 - 1310 - /* TEST - for phy decoding error, adjust voltage levels */ 1311 - mw32(P0_VSR_ADDR + 0, 0x8); 1312 - mw32(P0_VSR_DATA + 0, 0x2F0); 1313 - 1314 - mw32(P0_VSR_ADDR + 8, 0x8); 1315 - mw32(P0_VSR_DATA + 8, 0x2F0); 1316 - 1317 - mw32(P0_VSR_ADDR + 16, 0x8); 1318 - mw32(P0_VSR_DATA + 16, 0x2F0); 1319 - 1320 - mw32(P0_VSR_ADDR + 24, 0x8); 1321 - mw32(P0_VSR_DATA + 24, 0x2F0); 1322 - 1323 - } 1324 - 1325 - static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId) 1326 - { 1327 - void __iomem *regs = mvi->regs; 1328 - u32 tmp; 1329 - 1330 - tmp = mr32(PCS); 1331 - if (mvi->chip->n_phy <= 4) 1332 - tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT); 1333 - else 1334 - tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2); 1335 - mw32(PCS, tmp); 1336 - } 1337 - 1338 - static void mvs_detect_porttype(struct mvs_info *mvi, int i) 1339 - { 1340 - void __iomem *regs = mvi->regs; 1341 - u32 reg; 1342 - struct mvs_phy *phy = &mvi->phy[i]; 1343 - 1344 - /* TODO check & save device type */ 1345 - reg = mr32(GBL_PORT_TYPE); 1346 - 1347 - if (reg & MODE_SAS_SATA & (1 << i)) 1348 - phy->phy_type |= PORT_TYPE_SAS; 1349 - else 1350 - phy->phy_type |= PORT_TYPE_SATA; 1351 - } 1352 - 1353 - static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) 1354 - { 1355 - u32 *s = (u32 *) buf; 1356 - 1357 - if (!s) 1358 - return NULL; 1359 - 1360 - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); 1361 - s[3] = mvs_read_port_cfg_data(mvi, i); 1362 - 1363 - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); 1364 - s[2] = mvs_read_port_cfg_data(mvi, i); 1365 - 1366 - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); 1367 - s[1] = mvs_read_port_cfg_data(mvi, i); 1368 - 1369 - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); 1370 - s[0] = mvs_read_port_cfg_data(mvi, i); 1371 - 1372 - return (void *)s; 1373 - } 1374 - 1375 - static u32 mvs_is_sig_fis_received(u32 irq_status) 1376 - { 1377 - return irq_status & PHYEV_SIG_FIS; 2189 + list_del(&slot->list); 2190 + task->lldd_task = NULL; 2191 + slot->task = NULL; 2192 + slot->port = NULL; 1378 2193 } 1379 2194 1380 2195 static void mvs_update_wideport(struct mvs_info *mvi, int i) ··· 923 2736 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 924 2737 } 925 2738 return 0; 2739 + } 2740 + 2741 + static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) 2742 + { 2743 + u32 *s = (u32 *) buf; 2744 + 2745 + if (!s) 2746 + return NULL; 2747 + 2748 + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); 2749 + s[3] = mvs_read_port_cfg_data(mvi, i); 2750 + 2751 + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); 2752 + s[2] = mvs_read_port_cfg_data(mvi, i); 2753 + 2754 + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); 2755 + s[1] = mvs_read_port_cfg_data(mvi, i); 2756 + 2757 + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); 2758 + s[0] = mvs_read_port_cfg_data(mvi, i); 2759 + 2760 + return (void *)s; 2761 + } 2762 + 2763 + static u32 mvs_is_sig_fis_received(u32 irq_status) 2764 + { 2765 + return irq_status & PHYEV_SIG_FIS; 926 2766 } 927 2767 928 2768 static void mvs_update_phyinfo(struct mvs_info *mvi, int i, ··· 1060 2846 mvs_write_port_irq_stat(mvi, i, phy->irq_status); 1061 2847 } 1062 2848 1063 - static void mvs_port_formed(struct asd_sas_phy *sas_phy) 2849 + void mvs_port_formed(struct asd_sas_phy *sas_phy) 1064 2850 { 1065 2851 struct sas_ha_struct *sas_ha = sas_phy->ha; 1066 2852 struct mvs_info *mvi = sas_ha->lldd_ha; ··· 1080 2866 spin_unlock_irqrestore(&mvi->lock, flags); 1081 2867 } 1082 2868 1083 - static int mvs_I_T_nexus_reset(struct domain_device *dev) 2869 + int mvs_I_T_nexus_reset(struct domain_device *dev) 1084 2870 { 1085 2871 return TMF_RESP_FUNC_FAILED; 1086 2872 } 1087 2873 1088 - static int __devinit mvs_hw_init(struct mvs_info *mvi) 2874 + static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, 2875 + u32 slot_idx, int err) 2876 + { 2877 + struct mvs_port *port = mvi->slot_info[slot_idx].port; 2878 + struct task_status_struct *tstat = &task->task_status; 2879 + struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; 2880 + int stat = SAM_GOOD; 2881 + 2882 + resp->frame_len = sizeof(struct dev_to_host_fis); 2883 + memcpy(&resp->ending_fis[0], 2884 + SATA_RECEIVED_D2H_FIS(port->taskfileset), 2885 + sizeof(struct dev_to_host_fis)); 2886 + tstat->buf_valid_size = sizeof(*resp); 2887 + if (unlikely(err)) 2888 + stat = SAS_PROTO_RESPONSE; 2889 + return stat; 2890 + } 2891 + 2892 + static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, 2893 + u32 slot_idx) 2894 + { 2895 + struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 2896 + u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); 2897 + u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4)); 2898 + int stat = SAM_CHECK_COND; 2899 + 2900 + if (err_dw1 & SLOT_BSY_ERR) { 2901 + stat = SAS_QUEUE_FULL; 2902 + mvs_slot_reset(mvi, task, slot_idx); 2903 + } 2904 + switch (task->task_proto) { 2905 + case SAS_PROTOCOL_SSP: 2906 + break; 2907 + case SAS_PROTOCOL_SMP: 2908 + break; 2909 + case SAS_PROTOCOL_SATA: 2910 + case SAS_PROTOCOL_STP: 2911 + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 2912 + if (err_dw0 & TFILE_ERR) 2913 + stat = mvs_sata_done(mvi, task, slot_idx, 1); 2914 + break; 2915 + default: 2916 + break; 2917 + } 2918 + 2919 + mvs_hexdump(16, (u8 *) slot->response, 0); 2920 + return stat; 2921 + } 2922 + 2923 + static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) 2924 + { 2925 + u32 slot_idx = rx_desc & RXQ_SLOT_MASK; 2926 + struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 2927 + struct sas_task *task = slot->task; 2928 + struct task_status_struct *tstat; 2929 + struct mvs_port *port; 2930 + bool aborted; 2931 + void *to; 2932 + 2933 + if (unlikely(!task || !task->lldd_task)) 2934 + return -1; 2935 + 2936 + mvs_hba_cq_dump(mvi); 2937 + 2938 + spin_lock(&task->task_state_lock); 2939 + aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; 2940 + if (!aborted) { 2941 + task->task_state_flags &= 2942 + ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 2943 + task->task_state_flags |= SAS_TASK_STATE_DONE; 2944 + } 2945 + spin_unlock(&task->task_state_lock); 2946 + 2947 + if (aborted) { 2948 + mvs_slot_task_free(mvi, task, slot, slot_idx); 2949 + mvs_slot_free(mvi, rx_desc); 2950 + return -1; 2951 + } 2952 + 2953 + port = slot->port; 2954 + tstat = &task->task_status; 2955 + memset(tstat, 0, sizeof(*tstat)); 2956 + tstat->resp = SAS_TASK_COMPLETE; 2957 + 2958 + if (unlikely(!port->port_attached || flags)) { 2959 + mvs_slot_err(mvi, task, slot_idx); 2960 + if (!sas_protocol_ata(task->task_proto)) 2961 + tstat->stat = SAS_PHY_DOWN; 2962 + goto out; 2963 + } 2964 + 2965 + /* error info record present */ 2966 + if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { 2967 + tstat->stat = mvs_slot_err(mvi, task, slot_idx); 2968 + goto out; 2969 + } 2970 + 2971 + switch (task->task_proto) { 2972 + case SAS_PROTOCOL_SSP: 2973 + /* hw says status == 0, datapres == 0 */ 2974 + if (rx_desc & RXQ_GOOD) { 2975 + tstat->stat = SAM_GOOD; 2976 + tstat->resp = SAS_TASK_COMPLETE; 2977 + } 2978 + /* response frame present */ 2979 + else if (rx_desc & RXQ_RSP) { 2980 + struct ssp_response_iu *iu = 2981 + slot->response + sizeof(struct mvs_err_info); 2982 + sas_ssp_task_response(&mvi->pdev->dev, task, iu); 2983 + } 2984 + 2985 + /* should never happen? */ 2986 + else 2987 + tstat->stat = SAM_CHECK_COND; 2988 + break; 2989 + 2990 + case SAS_PROTOCOL_SMP: { 2991 + struct scatterlist *sg_resp = &task->smp_task.smp_resp; 2992 + tstat->stat = SAM_GOOD; 2993 + to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); 2994 + memcpy(to + sg_resp->offset, 2995 + slot->response + sizeof(struct mvs_err_info), 2996 + sg_dma_len(sg_resp)); 2997 + kunmap_atomic(to, KM_IRQ0); 2998 + break; 2999 + } 3000 + 3001 + case SAS_PROTOCOL_SATA: 3002 + case SAS_PROTOCOL_STP: 3003 + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { 3004 + tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); 3005 + break; 3006 + } 3007 + 3008 + default: 3009 + tstat->stat = SAM_CHECK_COND; 3010 + break; 3011 + } 3012 + 3013 + out: 3014 + mvs_slot_task_free(mvi, task, slot, slot_idx); 3015 + if (unlikely(tstat->stat != SAS_QUEUE_FULL)) 3016 + mvs_slot_free(mvi, rx_desc); 3017 + 3018 + spin_unlock(&mvi->lock); 3019 + task->task_done(task); 3020 + spin_lock(&mvi->lock); 3021 + return tstat->stat; 3022 + } 3023 + 3024 + static void mvs_release_task(struct mvs_info *mvi, int phy_no) 3025 + { 3026 + struct list_head *pos, *n; 3027 + struct mvs_slot_info *slot; 3028 + struct mvs_phy *phy = &mvi->phy[phy_no]; 3029 + struct mvs_port *port = phy->port; 3030 + u32 rx_desc; 3031 + 3032 + if (!port) 3033 + return; 3034 + 3035 + list_for_each_safe(pos, n, &port->list) { 3036 + slot = container_of(pos, struct mvs_slot_info, list); 3037 + rx_desc = (u32) (slot - mvi->slot_info); 3038 + mvs_slot_complete(mvi, rx_desc, 1); 3039 + } 3040 + } 3041 + 3042 + static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) 3043 + { 3044 + struct pci_dev *pdev = mvi->pdev; 3045 + struct sas_ha_struct *sas_ha = &mvi->sas; 3046 + struct mvs_phy *phy = &mvi->phy[phy_no]; 3047 + struct asd_sas_phy *sas_phy = &phy->sas_phy; 3048 + 3049 + phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no); 3050 + /* 3051 + * events is port event now , 3052 + * we need check the interrupt status which belongs to per port. 3053 + */ 3054 + dev_printk(KERN_DEBUG, &pdev->dev, 3055 + "Port %d Event = %X\n", 3056 + phy_no, phy->irq_status); 3057 + 3058 + if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { 3059 + mvs_release_task(mvi, phy_no); 3060 + if (!mvs_is_phy_ready(mvi, phy_no)) { 3061 + sas_phy_disconnected(sas_phy); 3062 + sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); 3063 + dev_printk(KERN_INFO, &pdev->dev, 3064 + "Port %d Unplug Notice\n", phy_no); 3065 + 3066 + } else 3067 + mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); 3068 + } 3069 + if (!(phy->irq_status & PHYEV_DEC_ERR)) { 3070 + if (phy->irq_status & PHYEV_COMWAKE) { 3071 + u32 tmp = mvs_read_port_irq_mask(mvi, phy_no); 3072 + mvs_write_port_irq_mask(mvi, phy_no, 3073 + tmp | PHYEV_SIG_FIS); 3074 + } 3075 + if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { 3076 + phy->phy_status = mvs_is_phy_ready(mvi, phy_no); 3077 + if (phy->phy_status) { 3078 + mvs_detect_porttype(mvi, phy_no); 3079 + 3080 + if (phy->phy_type & PORT_TYPE_SATA) { 3081 + u32 tmp = mvs_read_port_irq_mask(mvi, 3082 + phy_no); 3083 + tmp &= ~PHYEV_SIG_FIS; 3084 + mvs_write_port_irq_mask(mvi, 3085 + phy_no, tmp); 3086 + } 3087 + 3088 + mvs_update_phyinfo(mvi, phy_no, 0); 3089 + sas_ha->notify_phy_event(sas_phy, 3090 + PHYE_OOB_DONE); 3091 + mvs_bytes_dmaed(mvi, phy_no); 3092 + } else { 3093 + dev_printk(KERN_DEBUG, &pdev->dev, 3094 + "plugin interrupt but phy is gone\n"); 3095 + mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, 3096 + NULL); 3097 + } 3098 + } else if (phy->irq_status & PHYEV_BROAD_CH) { 3099 + mvs_release_task(mvi, phy_no); 3100 + sas_ha->notify_port_event(sas_phy, 3101 + PORTE_BROADCAST_RCVD); 3102 + } 3103 + } 3104 + mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status); 3105 + } 3106 + 3107 + static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) 3108 + { 3109 + void __iomem *regs = mvi->regs; 3110 + u32 rx_prod_idx, rx_desc; 3111 + bool attn = false; 3112 + struct pci_dev *pdev = mvi->pdev; 3113 + 3114 + /* the first dword in the RX ring is special: it contains 3115 + * a mirror of the hardware's RX producer index, so that 3116 + * we don't have to stall the CPU reading that register. 3117 + * The actual RX ring is offset by one dword, due to this. 3118 + */ 3119 + rx_prod_idx = mvi->rx_cons; 3120 + mvi->rx_cons = le32_to_cpu(mvi->rx[0]); 3121 + if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ 3122 + return 0; 3123 + 3124 + /* The CMPL_Q may come late, read from register and try again 3125 + * note: if coalescing is enabled, 3126 + * it will need to read from register every time for sure 3127 + */ 3128 + if (mvi->rx_cons == rx_prod_idx) 3129 + mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; 3130 + 3131 + if (mvi->rx_cons == rx_prod_idx) 3132 + return 0; 3133 + 3134 + while (mvi->rx_cons != rx_prod_idx) { 3135 + 3136 + /* increment our internal RX consumer pointer */ 3137 + rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); 3138 + 3139 + rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); 3140 + 3141 + if (likely(rx_desc & RXQ_DONE)) 3142 + mvs_slot_complete(mvi, rx_desc, 0); 3143 + if (rx_desc & RXQ_ATTN) { 3144 + attn = true; 3145 + dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", 3146 + rx_desc); 3147 + } else if (rx_desc & RXQ_ERR) { 3148 + if (!(rx_desc & RXQ_DONE)) 3149 + mvs_slot_complete(mvi, rx_desc, 0); 3150 + dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", 3151 + rx_desc); 3152 + } else if (rx_desc & RXQ_SLOT_RESET) { 3153 + dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n", 3154 + rx_desc); 3155 + mvs_slot_free(mvi, rx_desc); 3156 + } 3157 + } 3158 + 3159 + if (attn && self_clear) 3160 + mvs_int_full(mvi); 3161 + 3162 + return 0; 3163 + } 3164 + 3165 + #ifndef MVS_DISABLE_NVRAM 3166 + static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data) 3167 + { 3168 + int timeout = 1000; 3169 + 3170 + if (addr & ~SPI_ADDR_MASK) 3171 + return -EINVAL; 3172 + 3173 + writel(addr, regs + SPI_CMD); 3174 + writel(TWSI_RD, regs + SPI_CTL); 3175 + 3176 + while (timeout-- > 0) { 3177 + if (readl(regs + SPI_CTL) & TWSI_RDY) { 3178 + *data = readl(regs + SPI_DATA); 3179 + return 0; 3180 + } 3181 + 3182 + udelay(10); 3183 + } 3184 + 3185 + return -EBUSY; 3186 + } 3187 + 3188 + static int mvs_eep_read_buf(void __iomem *regs, u32 addr, 3189 + void *buf, u32 buflen) 3190 + { 3191 + u32 addr_end, tmp_addr, i, j; 3192 + u32 tmp = 0; 3193 + int rc; 3194 + u8 *tmp8, *buf8 = buf; 3195 + 3196 + addr_end = addr + buflen; 3197 + tmp_addr = ALIGN(addr, 4); 3198 + if (addr > 0xff) 3199 + return -EINVAL; 3200 + 3201 + j = addr & 0x3; 3202 + if (j) { 3203 + rc = mvs_eep_read(regs, tmp_addr, &tmp); 3204 + if (rc) 3205 + return rc; 3206 + 3207 + tmp8 = (u8 *)&tmp; 3208 + for (i = j; i < 4; i++) 3209 + *buf8++ = tmp8[i]; 3210 + 3211 + tmp_addr += 4; 3212 + } 3213 + 3214 + for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) { 3215 + rc = mvs_eep_read(regs, tmp_addr, &tmp); 3216 + if (rc) 3217 + return rc; 3218 + 3219 + memcpy(buf8, &tmp, 4); 3220 + buf8 += 4; 3221 + } 3222 + 3223 + if (tmp_addr < addr_end) { 3224 + rc = mvs_eep_read(regs, tmp_addr, &tmp); 3225 + if (rc) 3226 + return rc; 3227 + 3228 + tmp8 = (u8 *)&tmp; 3229 + j = addr_end - tmp_addr; 3230 + for (i = 0; i < j; i++) 3231 + *buf8++ = tmp8[i]; 3232 + 3233 + tmp_addr += 4; 3234 + } 3235 + 3236 + return 0; 3237 + } 3238 + #endif 3239 + 3240 + int mvs_nvram_read(struct mvs_info *mvi, u32 addr, void *buf, u32 buflen) 3241 + { 3242 + #ifndef MVS_DISABLE_NVRAM 3243 + void __iomem *regs = mvi->regs; 3244 + int rc, i; 3245 + u32 sum; 3246 + u8 hdr[2], *tmp; 3247 + const char *msg; 3248 + 3249 + rc = mvs_eep_read_buf(regs, addr, &hdr, 2); 3250 + if (rc) { 3251 + msg = "nvram hdr read failed"; 3252 + goto err_out; 3253 + } 3254 + rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen); 3255 + if (rc) { 3256 + msg = "nvram read failed"; 3257 + goto err_out; 3258 + } 3259 + 3260 + if (hdr[0] != 0x5A) { 3261 + /* entry id */ 3262 + msg = "invalid nvram entry id"; 3263 + rc = -ENOENT; 3264 + goto err_out; 3265 + } 3266 + 3267 + tmp = buf; 3268 + sum = ((u32)hdr[0]) + ((u32)hdr[1]); 3269 + for (i = 0; i < buflen; i++) 3270 + sum += ((u32)tmp[i]); 3271 + 3272 + if (sum) { 3273 + msg = "nvram checksum failure"; 3274 + rc = -EILSEQ; 3275 + goto err_out; 3276 + } 3277 + 3278 + return 0; 3279 + 3280 + err_out: 3281 + dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg); 3282 + return rc; 3283 + #else 3284 + /* FIXME , For SAS target mode */ 3285 + memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8); 3286 + return 0; 3287 + #endif 3288 + } 3289 + 3290 + static void mvs_int_sata(struct mvs_info *mvi) 3291 + { 3292 + u32 tmp; 3293 + void __iomem *regs = mvi->regs; 3294 + tmp = mr32(INT_STAT_SRS); 3295 + mw32(INT_STAT_SRS, tmp & 0xFFFF); 3296 + } 3297 + 3298 + static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, 3299 + u32 slot_idx) 3300 + { 3301 + void __iomem *regs = mvi->regs; 3302 + struct domain_device *dev = task->dev; 3303 + struct asd_sas_port *sas_port = dev->port; 3304 + struct mvs_port *port = mvi->slot_info[slot_idx].port; 3305 + u32 reg_set, phy_mask; 3306 + 3307 + if (!sas_protocol_ata(task->task_proto)) { 3308 + reg_set = 0; 3309 + phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : 3310 + sas_port->phy_mask; 3311 + } else { 3312 + reg_set = port->taskfileset; 3313 + phy_mask = sas_port->phy_mask; 3314 + } 3315 + mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx | 3316 + (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) | 3317 + (phy_mask << TXQ_PHY_SHIFT) | 3318 + (reg_set << TXQ_SRS_SHIFT)); 3319 + 3320 + mw32(TX_PROD_IDX, mvi->tx_prod); 3321 + mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); 3322 + } 3323 + 3324 + void mvs_int_full(struct mvs_info *mvi) 3325 + { 3326 + void __iomem *regs = mvi->regs; 3327 + u32 tmp, stat; 3328 + int i; 3329 + 3330 + stat = mr32(INT_STAT); 3331 + 3332 + mvs_int_rx(mvi, false); 3333 + 3334 + for (i = 0; i < MVS_MAX_PORTS; i++) { 3335 + tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); 3336 + if (tmp) 3337 + mvs_int_port(mvi, i, tmp); 3338 + } 3339 + 3340 + if (stat & CINT_SRS) 3341 + mvs_int_sata(mvi); 3342 + 3343 + mw32(INT_STAT, stat); 3344 + } 3345 + 3346 + #ifndef MVS_DISABLE_MSI 3347 + static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) 3348 + { 3349 + struct mvs_info *mvi = opaque; 3350 + 3351 + #ifndef MVS_USE_TASKLET 3352 + spin_lock(&mvi->lock); 3353 + 3354 + mvs_int_rx(mvi, true); 3355 + 3356 + spin_unlock(&mvi->lock); 3357 + #else 3358 + tasklet_schedule(&mvi->tasklet); 3359 + #endif 3360 + return IRQ_HANDLED; 3361 + } 3362 + #endif 3363 + 3364 + int mvs_task_abort(struct sas_task *task) 3365 + { 3366 + int rc; 3367 + unsigned long flags; 3368 + struct mvs_info *mvi = task->dev->port->ha->lldd_ha; 3369 + struct pci_dev *pdev = mvi->pdev; 3370 + int tag; 3371 + 3372 + spin_lock_irqsave(&task->task_state_lock, flags); 3373 + if (task->task_state_flags & SAS_TASK_STATE_DONE) { 3374 + rc = TMF_RESP_FUNC_COMPLETE; 3375 + spin_unlock_irqrestore(&task->task_state_lock, flags); 3376 + goto out_done; 3377 + } 3378 + spin_unlock_irqrestore(&task->task_state_lock, flags); 3379 + 3380 + switch (task->task_proto) { 3381 + case SAS_PROTOCOL_SMP: 3382 + dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n"); 3383 + break; 3384 + case SAS_PROTOCOL_SSP: 3385 + dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n"); 3386 + break; 3387 + case SAS_PROTOCOL_SATA: 3388 + case SAS_PROTOCOL_STP: 3389 + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{ 3390 + dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n"); 3391 + #if _MV_DUMP 3392 + dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n"); 3393 + mvs_hexdump(sizeof(struct host_to_dev_fis), 3394 + (void *)&task->ata_task.fis, 0); 3395 + dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n"); 3396 + mvs_hexdump(16, task->ata_task.atapi_packet, 0); 3397 + #endif 3398 + spin_lock_irqsave(&task->task_state_lock, flags); 3399 + if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { 3400 + /* TODO */ 3401 + ; 3402 + } 3403 + spin_unlock_irqrestore(&task->task_state_lock, flags); 3404 + break; 3405 + } 3406 + default: 3407 + break; 3408 + } 3409 + 3410 + if (mvs_find_tag(mvi, task, &tag)) { 3411 + spin_lock_irqsave(&mvi->lock, flags); 3412 + mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag); 3413 + spin_unlock_irqrestore(&mvi->lock, flags); 3414 + } 3415 + if (!mvs_task_exec(task, 1, GFP_ATOMIC)) 3416 + rc = TMF_RESP_FUNC_COMPLETE; 3417 + else 3418 + rc = TMF_RESP_FUNC_FAILED; 3419 + out_done: 3420 + return rc; 3421 + } 3422 + 3423 + int __devinit mvs_hw_init(struct mvs_info *mvi) 1089 3424 { 1090 3425 void __iomem *regs = mvi->regs; 1091 3426 int i; ··· 1804 3041 return 0; 1805 3042 } 1806 3043 1807 - static void __devinit mvs_print_info(struct mvs_info *mvi) 3044 + void __devinit mvs_print_info(struct mvs_info *mvi) 1808 3045 { 1809 3046 struct pci_dev *pdev = mvi->pdev; 1810 3047 static int printed_version; ··· 1816 3053 mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr)); 1817 3054 } 1818 3055 1819 - static int __devinit mvs_pci_init(struct pci_dev *pdev, 1820 - const struct pci_device_id *ent) 1821 - { 1822 - int rc; 1823 - struct mvs_info *mvi; 1824 - irq_handler_t irq_handler = mvs_interrupt; 1825 - 1826 - rc = pci_enable_device(pdev); 1827 - if (rc) 1828 - return rc; 1829 - 1830 - pci_set_master(pdev); 1831 - 1832 - rc = pci_request_regions(pdev, DRV_NAME); 1833 - if (rc) 1834 - goto err_out_disable; 1835 - 1836 - rc = pci_go_64(pdev); 1837 - if (rc) 1838 - goto err_out_regions; 1839 - 1840 - mvi = mvs_alloc(pdev, ent); 1841 - if (!mvi) { 1842 - rc = -ENOMEM; 1843 - goto err_out_regions; 1844 - } 1845 - 1846 - rc = mvs_hw_init(mvi); 1847 - if (rc) 1848 - goto err_out_mvi; 1849 - 1850 - #ifndef MVS_DISABLE_MSI 1851 - if (!pci_enable_msi(pdev)) { 1852 - u32 tmp; 1853 - void __iomem *regs = mvi->regs; 1854 - mvi->flags |= MVF_MSI; 1855 - irq_handler = mvs_msi_interrupt; 1856 - tmp = mr32(PCS); 1857 - mw32(PCS, tmp | PCS_SELF_CLEAR); 1858 - } 1859 - #endif 1860 - 1861 - rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi); 1862 - if (rc) 1863 - goto err_out_msi; 1864 - 1865 - rc = scsi_add_host(mvi->shost, &pdev->dev); 1866 - if (rc) 1867 - goto err_out_irq; 1868 - 1869 - rc = sas_register_ha(&mvi->sas); 1870 - if (rc) 1871 - goto err_out_shost; 1872 - 1873 - pci_set_drvdata(pdev, mvi); 1874 - 1875 - mvs_print_info(mvi); 1876 - 1877 - mvs_hba_interrupt_enable(mvi); 1878 - 1879 - scsi_scan_host(mvi->shost); 1880 - 1881 - return 0; 1882 - 1883 - err_out_shost: 1884 - scsi_remove_host(mvi->shost); 1885 - err_out_irq: 1886 - free_irq(pdev->irq, mvi); 1887 - err_out_msi: 1888 - if (mvi->flags |= MVF_MSI) 1889 - pci_disable_msi(pdev); 1890 - err_out_mvi: 1891 - mvs_free(mvi); 1892 - err_out_regions: 1893 - pci_release_regions(pdev); 1894 - err_out_disable: 1895 - pci_disable_device(pdev); 1896 - return rc; 1897 - } 1898 - 1899 - static void __devexit mvs_pci_remove(struct pci_dev *pdev) 1900 - { 1901 - struct mvs_info *mvi = pci_get_drvdata(pdev); 1902 - 1903 - pci_set_drvdata(pdev, NULL); 1904 - 1905 - if (mvi) { 1906 - sas_unregister_ha(&mvi->sas); 1907 - mvs_hba_interrupt_disable(mvi); 1908 - sas_remove_host(mvi->shost); 1909 - scsi_remove_host(mvi->shost); 1910 - 1911 - free_irq(pdev->irq, mvi); 1912 - if (mvi->flags & MVF_MSI) 1913 - pci_disable_msi(pdev); 1914 - mvs_free(mvi); 1915 - pci_release_regions(pdev); 1916 - } 1917 - pci_disable_device(pdev); 1918 - } 1919 - 1920 - static struct sas_domain_function_template mvs_transport_ops = { 1921 - .lldd_execute_task = mvs_task_exec, 1922 - .lldd_control_phy = mvs_phy_control, 1923 - .lldd_abort_task = mvs_task_abort, 1924 - .lldd_port_formed = mvs_port_formed, 1925 - .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, 1926 - }; 1927 - 1928 - static struct pci_device_id __devinitdata mvs_pci_table[] = { 1929 - { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, 1930 - { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, 1931 - { 1932 - .vendor = PCI_VENDOR_ID_MARVELL, 1933 - .device = 0x6440, 1934 - .subvendor = PCI_ANY_ID, 1935 - .subdevice = 0x6480, 1936 - .class = 0, 1937 - .class_mask = 0, 1938 - .driver_data = chip_6480, 1939 - }, 1940 - { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, 1941 - { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 }, 1942 - 1943 - { } /* terminate list */ 1944 - }; 1945 - 1946 - static struct pci_driver mvs_pci_driver = { 1947 - .name = DRV_NAME, 1948 - .id_table = mvs_pci_table, 1949 - .probe = mvs_pci_init, 1950 - .remove = __devexit_p(mvs_pci_remove), 1951 - }; 1952 - 1953 - static int __init mvs_init(void) 1954 - { 1955 - int rc; 1956 - 1957 - mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); 1958 - if (!mvs_stt) 1959 - return -ENOMEM; 1960 - 1961 - rc = pci_register_driver(&mvs_pci_driver); 1962 - if (rc) 1963 - goto err_out; 1964 - 1965 - return 0; 1966 - 1967 - err_out: 1968 - sas_release_transport(mvs_stt); 1969 - return rc; 1970 - } 1971 - 1972 - static void __exit mvs_exit(void) 1973 - { 1974 - pci_unregister_driver(&mvs_pci_driver); 1975 - sas_release_transport(mvs_stt); 1976 - } 1977 - 1978 - module_init(mvs_init); 1979 - module_exit(mvs_exit); 1980 - 1981 - MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); 1982 - MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); 1983 - MODULE_VERSION(DRV_VERSION); 1984 - MODULE_LICENSE("GPL"); 1985 - MODULE_DEVICE_TABLE(pci, mvs_pci_table);
+205
drivers/scsi/mvsas/mv_sas.h
··· 1 + /* 2 + mv_sas.h - Marvell 88SE6440 SAS/SATA support 3 + 4 + Copyright 2007 Red Hat, Inc. 5 + Copyright 2008 Marvell. <kewei@marvell.com> 6 + 7 + This program is free software; you can redistribute it and/or 8 + modify it under the terms of the GNU General Public License as 9 + published by the Free Software Foundation; either version 2, 10 + or (at your option) any later version. 11 + 12 + This program is distributed in the hope that it will be useful, 13 + but WITHOUT ANY WARRANTY; without even the implied warranty 14 + of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 15 + See the GNU General Public License for more details. 16 + 17 + You should have received a copy of the GNU General Public 18 + License along with this program; see the file COPYING. If not, 19 + write to the Free Software Foundation, 675 Mass Ave, Cambridge, 20 + MA 02139, USA. 21 + 22 + */ 23 + 24 + #ifndef _MV_SAS_H_ 25 + #define _MV_SAS_H_ 26 + 27 + #include <linux/kernel.h> 28 + #include <linux/module.h> 29 + #include <linux/spinlock.h> 30 + #include <linux/delay.h> 31 + #include <linux/types.h> 32 + #include <linux/ctype.h> 33 + #include <linux/dma-mapping.h> 34 + #include <linux/pci.h> 35 + #include <linux/platform_device.h> 36 + #include <linux/interrupt.h> 37 + #include <linux/irq.h> 38 + #include <linux/vmalloc.h> 39 + #include <scsi/libsas.h> 40 + #include <scsi/scsi_tcq.h> 41 + #include <scsi/sas_ata.h> 42 + #include <linux/version.h> 43 + #include "mv_defs.h" 44 + 45 + #define DRV_NAME "mvsas" 46 + #define DRV_VERSION "0.5.2" 47 + #define _MV_DUMP 0 48 + #define MVS_DISABLE_NVRAM 49 + #define MVS_DISABLE_MSI 50 + 51 + #define MVS_ID_NOT_MAPPED 0x7f 52 + #define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) 53 + 54 + #define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \ 55 + for ((__mc) = (__lseq_mask), (__lseq) = 0; \ 56 + (__mc) != 0 && __rest; \ 57 + (++__lseq), (__mc) >>= 1) 58 + 59 + struct mvs_chip_info { 60 + u32 n_phy; 61 + u32 srs_sz; 62 + u32 slot_width; 63 + }; 64 + 65 + struct mvs_err_info { 66 + __le32 flags; 67 + __le32 flags2; 68 + }; 69 + 70 + struct mvs_cmd_hdr { 71 + __le32 flags; /* PRD tbl len; SAS, SATA ctl */ 72 + __le32 lens; /* cmd, max resp frame len */ 73 + __le32 tags; /* targ port xfer tag; tag */ 74 + __le32 data_len; /* data xfer len */ 75 + __le64 cmd_tbl; /* command table address */ 76 + __le64 open_frame; /* open addr frame address */ 77 + __le64 status_buf; /* status buffer address */ 78 + __le64 prd_tbl; /* PRD tbl address */ 79 + __le32 reserved[4]; 80 + }; 81 + 82 + struct mvs_port { 83 + struct asd_sas_port sas_port; 84 + u8 port_attached; 85 + u8 taskfileset; 86 + u8 wide_port_phymap; 87 + struct list_head list; 88 + }; 89 + 90 + struct mvs_phy { 91 + struct mvs_port *port; 92 + struct asd_sas_phy sas_phy; 93 + struct sas_identify identify; 94 + struct scsi_device *sdev; 95 + u64 dev_sas_addr; 96 + u64 att_dev_sas_addr; 97 + u32 att_dev_info; 98 + u32 dev_info; 99 + u32 phy_type; 100 + u32 phy_status; 101 + u32 irq_status; 102 + u32 frame_rcvd_size; 103 + u8 frame_rcvd[32]; 104 + u8 phy_attached; 105 + enum sas_linkrate minimum_linkrate; 106 + enum sas_linkrate maximum_linkrate; 107 + }; 108 + 109 + struct mvs_slot_info { 110 + struct list_head list; 111 + struct sas_task *task; 112 + u32 n_elem; 113 + u32 tx; 114 + 115 + /* DMA buffer for storing cmd tbl, open addr frame, status buffer, 116 + * and PRD table 117 + */ 118 + void *buf; 119 + dma_addr_t buf_dma; 120 + #if _MV_DUMP 121 + u32 cmd_size; 122 + #endif 123 + 124 + void *response; 125 + struct mvs_port *port; 126 + }; 127 + 128 + struct mvs_info { 129 + unsigned long flags; 130 + 131 + /* host-wide lock */ 132 + spinlock_t lock; 133 + 134 + /* our device */ 135 + struct pci_dev *pdev; 136 + 137 + /* enhanced mode registers */ 138 + void __iomem *regs; 139 + 140 + /* peripheral registers */ 141 + void __iomem *peri_regs; 142 + 143 + u8 sas_addr[SAS_ADDR_SIZE]; 144 + 145 + /* SCSI/SAS glue */ 146 + struct sas_ha_struct sas; 147 + struct Scsi_Host *shost; 148 + 149 + /* TX (delivery) DMA ring */ 150 + __le32 *tx; 151 + dma_addr_t tx_dma; 152 + 153 + /* cached next-producer idx */ 154 + u32 tx_prod; 155 + 156 + /* RX (completion) DMA ring */ 157 + __le32 *rx; 158 + dma_addr_t rx_dma; 159 + 160 + /* RX consumer idx */ 161 + u32 rx_cons; 162 + 163 + /* RX'd FIS area */ 164 + __le32 *rx_fis; 165 + dma_addr_t rx_fis_dma; 166 + 167 + /* DMA command header slots */ 168 + struct mvs_cmd_hdr *slot; 169 + dma_addr_t slot_dma; 170 + 171 + const struct mvs_chip_info *chip; 172 + 173 + u8 tags[MVS_SLOTS]; 174 + struct mvs_slot_info slot_info[MVS_SLOTS]; 175 + /* further per-slot information */ 176 + struct mvs_phy phy[MVS_MAX_PHYS]; 177 + struct mvs_port port[MVS_MAX_PHYS]; 178 + #ifdef MVS_USE_TASKLET 179 + struct tasklet_struct tasklet; 180 + #endif 181 + }; 182 + 183 + int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 184 + void *funcdata); 185 + int mvs_slave_configure(struct scsi_device *sdev); 186 + void mvs_scan_start(struct Scsi_Host *shost); 187 + int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time); 188 + int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags); 189 + int mvs_task_abort(struct sas_task *task); 190 + void mvs_port_formed(struct asd_sas_phy *sas_phy); 191 + int mvs_I_T_nexus_reset(struct domain_device *dev); 192 + void mvs_int_full(struct mvs_info *mvi); 193 + void mvs_tag_init(struct mvs_info *mvi); 194 + int mvs_nvram_read(struct mvs_info *mvi, u32 addr, void *buf, u32 buflen); 195 + int __devinit mvs_hw_init(struct mvs_info *mvi); 196 + void __devinit mvs_print_info(struct mvs_info *mvi); 197 + void mvs_hba_interrupt_enable(struct mvs_info *mvi); 198 + void mvs_hba_interrupt_disable(struct mvs_info *mvi); 199 + void mvs_detect_porttype(struct mvs_info *mvi, int i); 200 + u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port); 201 + void mvs_enable_xmt(struct mvs_info *mvi, int PhyId); 202 + void __devinit mvs_phy_hacks(struct mvs_info *mvi); 203 + void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port); 204 + 205 + #endif