Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SCSI] mvsas: add support for 94xx; layout change; bug fixes

This version contains following main changes
- Switch to new layout to support more types of ASIC.
- SSP TMF supported and related Error Handing enhanced.
- Support flash feature with delay 2*HZ when PHY changed.
- Support Marvell 94xx series ASIC for 6G SAS/SATA, which has 2
88SE64xx chips but any different register description.
- Support SPI flash for HBA-related configuration info.
- Other patch enhanced from kernel side such as increasing PHY type

[jejb: fold back in DMA_BIT_MASK changes]
Signed-off-by: Ying Chu <jasonchu@marvell.com>
Signed-off-by: Andy Yan <ayan@marvell.com>
Signed-off-by: Ke Wei <kewei@marvell.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>

authored by

Andy Yan and committed by
James Bottomley
20b09c29 dd4969a8

+3915 -1399
+15 -8
drivers/scsi/mvsas/Kconfig
··· 1 1 # 2 - # Kernel configuration file for 88SE64XX SAS/SATA driver. 2 + # Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver. 3 3 # 4 4 # Copyright 2007 Red Hat, Inc. 5 5 # Copyright 2008 Marvell. <kewei@marvell.com> 6 6 # 7 7 # This file is licensed under GPLv2. 8 8 # 9 - # This file is part of the 88SE64XX driver. 9 + # This file is part of the 88SE64XX/88SE94XX driver. 10 10 # 11 - # The 88SE64XX driver is free software; you can redistribute 11 + # The 88SE64XX/88SE94XX driver is free software; you can redistribute 12 12 # it and/or modify it under the terms of the GNU General Public License 13 13 # as published by the Free Software Foundation; version 2 of the 14 14 # License. 15 15 # 16 - # The 88SE64XX driver is distributed in the hope that it will be 16 + # The 88SE64XX/88SE94XX driver is distributed in the hope that it will be 17 17 # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of 18 18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 19 # General Public License for more details. 20 20 # 21 21 # You should have received a copy of the GNU General Public License 22 - # along with 88SE64XX Driver; if not, write to the Free Software 22 + # along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software 23 23 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 24 # 25 25 # 26 26 27 27 config SCSI_MVSAS 28 - tristate "Marvell 88SE64XX SAS/SATA support" 28 + tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support" 29 29 depends on PCI 30 30 select SCSI_SAS_LIBSAS 31 31 select FW_LOADER 32 32 help 33 - This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX 34 - chip based host adapters. 33 + This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s 34 + PCI-E 88SE94XX chip based host adapters. 35 35 36 + config SCSI_MVSAS_DEBUG 37 + bool "Compile in debug mode" 38 + default y 39 + depends on SCSI_MVSAS 40 + help 41 + Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode, 42 + the driver prints some messages to the console.
+7 -2
drivers/scsi/mvsas/Makefile
··· 1 1 # 2 - # Makefile for Marvell 88SE64xx SAS/SATA driver. 2 + # Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver. 3 3 # 4 4 # Copyright 2007 Red Hat, Inc. 5 5 # Copyright 2008 Marvell. <kewei@marvell.com> ··· 21 21 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 22 22 # USA 23 23 24 + ifeq ($(CONFIG_SCSI_MVSAS_DEBUG),y) 25 + EXTRA_CFLAGS += -DMV_DEBUG 26 + endif 27 + 24 28 obj-$(CONFIG_SCSI_MVSAS) += mvsas.o 25 29 mvsas-y += mv_init.o \ 26 30 mv_sas.o \ 27 - mv_64xx.o 31 + mv_64xx.o \ 32 + mv_94xx.o
+710 -101
drivers/scsi/mvsas/mv_64xx.c
··· 1 1 /* 2 - mv_64xx.c - Marvell 88SE6440 SAS/SATA support 3 - 4 - Copyright 2007 Red Hat, Inc. 5 - Copyright 2008 Marvell. <kewei@marvell.com> 6 - 7 - This program is free software; you can redistribute it and/or 8 - modify it under the terms of the GNU General Public License as 9 - published by the Free Software Foundation; either version 2, 10 - or (at your option) any later version. 11 - 12 - This program is distributed in the hope that it will be useful, 13 - but WITHOUT ANY WARRANTY; without even the implied warranty 14 - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 15 - See the GNU General Public License for more details. 16 - 17 - You should have received a copy of the GNU General Public 18 - License along with this program; see the file COPYING. If not, 19 - write to the Free Software Foundation, 675 Mass Ave, Cambridge, 20 - MA 02139, USA. 21 - 22 - */ 2 + * Marvell 88SE64xx hardware specific 3 + * 4 + * Copyright 2007 Red Hat, Inc. 5 + * Copyright 2008 Marvell. <kewei@marvell.com> 6 + * 7 + * This file is licensed under GPLv2. 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public License as 11 + * published by the Free Software Foundation; version 2 of the 12 + * License. 13 + * 14 + * This program is distributed in the hope that it will be useful, 15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 + * General Public License for more details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this program; if not, write to the Free Software 21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 22 + * USA 23 + */ 23 24 24 25 #include "mv_sas.h" 25 26 #include "mv_64xx.h" 26 27 #include "mv_chips.h" 27 28 28 - void mvs_detect_porttype(struct mvs_info *mvi, int i) 29 + static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i) 29 30 { 30 31 void __iomem *regs = mvi->regs; 31 32 u32 reg; 32 33 struct mvs_phy *phy = &mvi->phy[i]; 33 34 34 35 /* TODO check & save device type */ 35 - reg = mr32(GBL_PORT_TYPE); 36 - 36 + reg = mr32(MVS_GBL_PORT_TYPE); 37 + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 37 38 if (reg & MODE_SAS_SATA & (1 << i)) 38 39 phy->phy_type |= PORT_TYPE_SAS; 39 40 else 40 41 phy->phy_type |= PORT_TYPE_SATA; 41 42 } 42 43 43 - void mvs_enable_xmt(struct mvs_info *mvi, int PhyId) 44 + static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id) 44 45 { 45 46 void __iomem *regs = mvi->regs; 46 47 u32 tmp; 47 48 48 - tmp = mr32(PCS); 49 + tmp = mr32(MVS_PCS); 49 50 if (mvi->chip->n_phy <= 4) 50 - tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT); 51 + tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT); 51 52 else 52 - tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2); 53 - mw32(PCS, tmp); 53 + tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); 54 + mw32(MVS_PCS, tmp); 54 55 } 55 56 56 - void __devinit mvs_phy_hacks(struct mvs_info *mvi) 57 + static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi) 58 + { 59 + void __iomem *regs = mvi->regs; 60 + 61 + mvs_phy_hacks(mvi); 62 + 63 + if (!(mvi->flags & MVF_FLAG_SOC)) { 64 + /* TEST - for phy decoding error, adjust voltage levels */ 65 + mw32(MVS_P0_VSR_ADDR + 0, 0x8); 66 + mw32(MVS_P0_VSR_DATA + 0, 0x2F0); 67 + 68 + mw32(MVS_P0_VSR_ADDR + 8, 0x8); 69 + mw32(MVS_P0_VSR_DATA + 8, 0x2F0); 70 + 71 + mw32(MVS_P0_VSR_ADDR + 16, 0x8); 72 + mw32(MVS_P0_VSR_DATA + 16, 0x2F0); 73 + 74 + mw32(MVS_P0_VSR_ADDR + 24, 0x8); 75 + mw32(MVS_P0_VSR_DATA + 24, 0x2F0); 76 + } else { 77 + int i; 78 + /* disable auto port detection */ 79 + mw32(MVS_GBL_PORT_TYPE, 0); 80 + for (i = 0; i < mvi->chip->n_phy; i++) { 81 + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7); 82 + mvs_write_port_vsr_data(mvi, i, 0x90000000); 83 + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9); 84 + mvs_write_port_vsr_data(mvi, i, 0x50f2); 85 + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11); 86 + mvs_write_port_vsr_data(mvi, i, 0x0e); 87 + } 88 + } 89 + } 90 + 91 + static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id) 92 + { 93 + void __iomem *regs = mvi->regs; 94 + u32 reg, tmp; 95 + 96 + if (!(mvi->flags & MVF_FLAG_SOC)) { 97 + if (phy_id < 4) 98 + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg); 99 + else 100 + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg); 101 + 102 + } else 103 + reg = mr32(MVS_PHY_CTL); 104 + 105 + tmp = reg; 106 + if (phy_id < 4) 107 + tmp |= (1U << phy_id) << PCTL_LINK_OFFS; 108 + else 109 + tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS; 110 + 111 + if (!(mvi->flags & MVF_FLAG_SOC)) { 112 + if (phy_id < 4) { 113 + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); 114 + mdelay(10); 115 + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg); 116 + } else { 117 + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); 118 + mdelay(10); 119 + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg); 120 + } 121 + } else { 122 + mw32(MVS_PHY_CTL, tmp); 123 + mdelay(10); 124 + mw32(MVS_PHY_CTL, reg); 125 + } 126 + } 127 + 128 + static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) 129 + { 130 + u32 tmp; 131 + tmp = mvs_read_port_irq_stat(mvi, phy_id); 132 + tmp &= ~PHYEV_RDY_CH; 133 + mvs_write_port_irq_stat(mvi, phy_id, tmp); 134 + tmp = mvs_read_phy_ctl(mvi, phy_id); 135 + if (hard) 136 + tmp |= PHY_RST_HARD; 137 + else 138 + tmp |= PHY_RST; 139 + mvs_write_phy_ctl(mvi, phy_id, tmp); 140 + if (hard) { 141 + do { 142 + tmp = mvs_read_phy_ctl(mvi, phy_id); 143 + } while (tmp & PHY_RST_HARD); 144 + } 145 + } 146 + 147 + static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi) 148 + { 149 + void __iomem *regs = mvi->regs; 150 + u32 tmp; 151 + int i; 152 + 153 + /* make sure interrupts are masked immediately (paranoia) */ 154 + mw32(MVS_GBL_CTL, 0); 155 + tmp = mr32(MVS_GBL_CTL); 156 + 157 + /* Reset Controller */ 158 + if (!(tmp & HBA_RST)) { 159 + if (mvi->flags & MVF_PHY_PWR_FIX) { 160 + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); 161 + tmp &= ~PCTL_PWR_OFF; 162 + tmp |= PCTL_PHY_DSBL; 163 + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); 164 + 165 + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); 166 + tmp &= ~PCTL_PWR_OFF; 167 + tmp |= PCTL_PHY_DSBL; 168 + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); 169 + } 170 + } 171 + 172 + /* make sure interrupts are masked immediately (paranoia) */ 173 + mw32(MVS_GBL_CTL, 0); 174 + tmp = mr32(MVS_GBL_CTL); 175 + 176 + /* Reset Controller */ 177 + if (!(tmp & HBA_RST)) { 178 + /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ 179 + mw32_f(MVS_GBL_CTL, HBA_RST); 180 + } 181 + 182 + /* wait for reset to finish; timeout is just a guess */ 183 + i = 1000; 184 + while (i-- > 0) { 185 + msleep(10); 186 + 187 + if (!(mr32(MVS_GBL_CTL) & HBA_RST)) 188 + break; 189 + } 190 + if (mr32(MVS_GBL_CTL) & HBA_RST) { 191 + dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n"); 192 + return -EBUSY; 193 + } 194 + return 0; 195 + } 196 + 197 + static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id) 198 + { 199 + void __iomem *regs = mvi->regs; 200 + u32 tmp; 201 + if (!(mvi->flags & MVF_FLAG_SOC)) { 202 + u32 offs; 203 + if (phy_id < 4) 204 + offs = PCR_PHY_CTL; 205 + else { 206 + offs = PCR_PHY_CTL2; 207 + phy_id -= 4; 208 + } 209 + pci_read_config_dword(mvi->pdev, offs, &tmp); 210 + tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id); 211 + pci_write_config_dword(mvi->pdev, offs, tmp); 212 + } else { 213 + tmp = mr32(MVS_PHY_CTL); 214 + tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id); 215 + mw32(MVS_PHY_CTL, tmp); 216 + } 217 + } 218 + 219 + static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id) 220 + { 221 + void __iomem *regs = mvi->regs; 222 + u32 tmp; 223 + if (!(mvi->flags & MVF_FLAG_SOC)) { 224 + u32 offs; 225 + if (phy_id < 4) 226 + offs = PCR_PHY_CTL; 227 + else { 228 + offs = PCR_PHY_CTL2; 229 + phy_id -= 4; 230 + } 231 + pci_read_config_dword(mvi->pdev, offs, &tmp); 232 + tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id)); 233 + pci_write_config_dword(mvi->pdev, offs, tmp); 234 + } else { 235 + tmp = mr32(MVS_PHY_CTL); 236 + tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id)); 237 + mw32(MVS_PHY_CTL, tmp); 238 + } 239 + } 240 + 241 + static int __devinit mvs_64xx_init(struct mvs_info *mvi) 242 + { 243 + void __iomem *regs = mvi->regs; 244 + int i; 245 + u32 tmp, cctl; 246 + 247 + if (mvi->pdev && mvi->pdev->revision == 0) 248 + mvi->flags |= MVF_PHY_PWR_FIX; 249 + if (!(mvi->flags & MVF_FLAG_SOC)) { 250 + mvs_show_pcie_usage(mvi); 251 + tmp = mvs_64xx_chip_reset(mvi); 252 + if (tmp) 253 + return tmp; 254 + } else { 255 + tmp = mr32(MVS_PHY_CTL); 256 + tmp &= ~PCTL_PWR_OFF; 257 + tmp |= PCTL_PHY_DSBL; 258 + mw32(MVS_PHY_CTL, tmp); 259 + } 260 + 261 + /* Init Chip */ 262 + /* make sure RST is set; HBA_RST /should/ have done that for us */ 263 + cctl = mr32(MVS_CTL) & 0xFFFF; 264 + if (cctl & CCTL_RST) 265 + cctl &= ~CCTL_RST; 266 + else 267 + mw32_f(MVS_CTL, cctl | CCTL_RST); 268 + 269 + if (!(mvi->flags & MVF_FLAG_SOC)) { 270 + /* write to device control _AND_ device status register */ 271 + pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); 272 + tmp &= ~PRD_REQ_MASK; 273 + tmp |= PRD_REQ_SIZE; 274 + pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); 275 + 276 + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); 277 + tmp &= ~PCTL_PWR_OFF; 278 + tmp &= ~PCTL_PHY_DSBL; 279 + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); 280 + 281 + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); 282 + tmp &= PCTL_PWR_OFF; 283 + tmp &= ~PCTL_PHY_DSBL; 284 + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); 285 + } else { 286 + tmp = mr32(MVS_PHY_CTL); 287 + tmp &= ~PCTL_PWR_OFF; 288 + tmp |= PCTL_COM_ON; 289 + tmp &= ~PCTL_PHY_DSBL; 290 + tmp |= PCTL_LINK_RST; 291 + mw32(MVS_PHY_CTL, tmp); 292 + msleep(100); 293 + tmp &= ~PCTL_LINK_RST; 294 + mw32(MVS_PHY_CTL, tmp); 295 + msleep(100); 296 + } 297 + 298 + /* reset control */ 299 + mw32(MVS_PCS, 0); /* MVS_PCS */ 300 + /* init phys */ 301 + mvs_64xx_phy_hacks(mvi); 302 + 303 + /* enable auto port detection */ 304 + mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN); 305 + 306 + mw32(MVS_CMD_LIST_LO, mvi->slot_dma); 307 + mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); 308 + 309 + mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma); 310 + mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); 311 + 312 + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ); 313 + mw32(MVS_TX_LO, mvi->tx_dma); 314 + mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16); 315 + 316 + mw32(MVS_RX_CFG, MVS_RX_RING_SZ); 317 + mw32(MVS_RX_LO, mvi->rx_dma); 318 + mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16); 319 + 320 + for (i = 0; i < mvi->chip->n_phy; i++) { 321 + /* set phy local SAS address */ 322 + /* should set little endian SAS address to 64xx chip */ 323 + mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI, 324 + cpu_to_be64(mvi->phy[i].dev_sas_addr)); 325 + 326 + mvs_64xx_enable_xmt(mvi, i); 327 + 328 + mvs_64xx_phy_reset(mvi, i, 1); 329 + msleep(500); 330 + mvs_64xx_detect_porttype(mvi, i); 331 + } 332 + if (mvi->flags & MVF_FLAG_SOC) { 333 + /* set select registers */ 334 + writel(0x0E008000, regs + 0x000); 335 + writel(0x59000008, regs + 0x004); 336 + writel(0x20, regs + 0x008); 337 + writel(0x20, regs + 0x00c); 338 + writel(0x20, regs + 0x010); 339 + writel(0x20, regs + 0x014); 340 + writel(0x20, regs + 0x018); 341 + writel(0x20, regs + 0x01c); 342 + } 343 + for (i = 0; i < mvi->chip->n_phy; i++) { 344 + /* clear phy int status */ 345 + tmp = mvs_read_port_irq_stat(mvi, i); 346 + tmp &= ~PHYEV_SIG_FIS; 347 + mvs_write_port_irq_stat(mvi, i, tmp); 348 + 349 + /* set phy int mask */ 350 + tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | 351 + PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR | 352 + PHYEV_DEC_ERR; 353 + mvs_write_port_irq_mask(mvi, i, tmp); 354 + 355 + msleep(100); 356 + mvs_update_phyinfo(mvi, i, 1); 357 + } 358 + 359 + /* FIXME: update wide port bitmaps */ 360 + 361 + /* little endian for open address and command table, etc. */ 362 + /* 363 + * it seems that ( from the spec ) turning on big-endian won't 364 + * do us any good on big-endian machines, need further confirmation 365 + */ 366 + cctl = mr32(MVS_CTL); 367 + cctl |= CCTL_ENDIAN_CMD; 368 + cctl |= CCTL_ENDIAN_DATA; 369 + cctl &= ~CCTL_ENDIAN_OPEN; 370 + cctl |= CCTL_ENDIAN_RSP; 371 + mw32_f(MVS_CTL, cctl); 372 + 373 + /* reset CMD queue */ 374 + tmp = mr32(MVS_PCS); 375 + tmp |= PCS_CMD_RST; 376 + mw32(MVS_PCS, tmp); 377 + /* interrupt coalescing may cause missing HW interrput in some case, 378 + * and the max count is 0x1ff, while our max slot is 0x200, 379 + * it will make count 0. 380 + */ 381 + tmp = 0; 382 + mw32(MVS_INT_COAL, tmp); 383 + 384 + tmp = 0x100; 385 + mw32(MVS_INT_COAL_TMOUT, tmp); 386 + 387 + /* ladies and gentlemen, start your engines */ 388 + mw32(MVS_TX_CFG, 0); 389 + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); 390 + mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN); 391 + /* enable CMD/CMPL_Q/RESP mode */ 392 + mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | 393 + PCS_CMD_EN | PCS_CMD_STOP_ERR); 394 + 395 + /* enable completion queue interrupt */ 396 + tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | 397 + CINT_DMA_PCIE); 398 + 399 + mw32(MVS_INT_MASK, tmp); 400 + 401 + /* Enable SRS interrupt */ 402 + mw32(MVS_INT_MASK_SRS_0, 0xFFFF); 403 + 404 + return 0; 405 + } 406 + 407 + static int mvs_64xx_ioremap(struct mvs_info *mvi) 408 + { 409 + if (!mvs_ioremap(mvi, 4, 2)) 410 + return 0; 411 + return -1; 412 + } 413 + 414 + static void mvs_64xx_iounmap(struct mvs_info *mvi) 415 + { 416 + mvs_iounmap(mvi->regs); 417 + mvs_iounmap(mvi->regs_ex); 418 + } 419 + 420 + static void mvs_64xx_interrupt_enable(struct mvs_info *mvi) 57 421 { 58 422 void __iomem *regs = mvi->regs; 59 423 u32 tmp; 60 424 61 - /* workaround for SATA R-ERR, to ignore phy glitch */ 62 - tmp = mvs_cr32(regs, CMD_PHY_TIMER); 63 - tmp &= ~(1 << 9); 64 - tmp |= (1 << 10); 65 - mvs_cw32(regs, CMD_PHY_TIMER, tmp); 66 - 67 - /* enable retry 127 times */ 68 - mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f); 69 - 70 - /* extend open frame timeout to max */ 71 - tmp = mvs_cr32(regs, CMD_SAS_CTL0); 72 - tmp &= ~0xffff; 73 - tmp |= 0x3fff; 74 - mvs_cw32(regs, CMD_SAS_CTL0, tmp); 75 - 76 - /* workaround for WDTIMEOUT , set to 550 ms */ 77 - mvs_cw32(regs, CMD_WD_TIMER, 0x86470); 78 - 79 - /* not to halt for different port op during wideport link change */ 80 - mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); 81 - 82 - /* workaround for Seagate disk not-found OOB sequence, recv 83 - * COMINIT before sending out COMWAKE */ 84 - tmp = mvs_cr32(regs, CMD_PHY_MODE_21); 85 - tmp &= 0x0000ffff; 86 - tmp |= 0x00fa0000; 87 - mvs_cw32(regs, CMD_PHY_MODE_21, tmp); 88 - 89 - tmp = mvs_cr32(regs, CMD_PHY_TIMER); 90 - tmp &= 0x1fffffff; 91 - tmp |= (2U << 29); /* 8 ms retry */ 92 - mvs_cw32(regs, CMD_PHY_TIMER, tmp); 93 - 94 - /* TEST - for phy decoding error, adjust voltage levels */ 95 - mw32(P0_VSR_ADDR + 0, 0x8); 96 - mw32(P0_VSR_DATA + 0, 0x2F0); 97 - 98 - mw32(P0_VSR_ADDR + 8, 0x8); 99 - mw32(P0_VSR_DATA + 8, 0x2F0); 100 - 101 - mw32(P0_VSR_ADDR + 16, 0x8); 102 - mw32(P0_VSR_DATA + 16, 0x2F0); 103 - 104 - mw32(P0_VSR_ADDR + 24, 0x8); 105 - mw32(P0_VSR_DATA + 24, 0x2F0); 106 - 425 + tmp = mr32(MVS_GBL_CTL); 426 + mw32(MVS_GBL_CTL, tmp | INT_EN); 107 427 } 108 428 109 - void mvs_hba_interrupt_enable(struct mvs_info *mvi) 429 + static void mvs_64xx_interrupt_disable(struct mvs_info *mvi) 110 430 { 111 431 void __iomem *regs = mvi->regs; 112 432 u32 tmp; 113 433 114 - tmp = mr32(GBL_CTL); 115 - 116 - mw32(GBL_CTL, tmp | INT_EN); 434 + tmp = mr32(MVS_GBL_CTL); 435 + mw32(MVS_GBL_CTL, tmp & ~INT_EN); 117 436 } 118 437 119 - void mvs_hba_interrupt_disable(struct mvs_info *mvi) 438 + static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq) 439 + { 440 + void __iomem *regs = mvi->regs; 441 + u32 stat; 442 + 443 + if (!(mvi->flags & MVF_FLAG_SOC)) { 444 + stat = mr32(MVS_GBL_INT_STAT); 445 + 446 + if (stat == 0 || stat == 0xffffffff) 447 + return 0; 448 + } else 449 + stat = 1; 450 + return stat; 451 + } 452 + 453 + static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat) 454 + { 455 + void __iomem *regs = mvi->regs; 456 + 457 + /* clear CMD_CMPLT ASAP */ 458 + mw32_f(MVS_INT_STAT, CINT_DONE); 459 + #ifndef MVS_USE_TASKLET 460 + spin_lock(&mvi->lock); 461 + #endif 462 + mvs_int_full(mvi); 463 + #ifndef MVS_USE_TASKLET 464 + spin_unlock(&mvi->lock); 465 + #endif 466 + return IRQ_HANDLED; 467 + } 468 + 469 + static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx) 470 + { 471 + u32 tmp; 472 + mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32)); 473 + mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32)); 474 + do { 475 + tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3)); 476 + } while (tmp & 1 << (slot_idx % 32)); 477 + do { 478 + tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3)); 479 + } while (tmp & 1 << (slot_idx % 32)); 480 + } 481 + 482 + static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, 483 + u32 tfs) 120 484 { 121 485 void __iomem *regs = mvi->regs; 122 486 u32 tmp; 123 487 124 - tmp = mr32(GBL_CTL); 125 - 126 - mw32(GBL_CTL, tmp & ~INT_EN); 488 + if (type == PORT_TYPE_SATA) { 489 + tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs); 490 + mw32(MVS_INT_STAT_SRS_0, tmp); 491 + } 492 + mw32(MVS_INT_STAT, CINT_CI_STOP); 493 + tmp = mr32(MVS_PCS) | 0xFF00; 494 + mw32(MVS_PCS, tmp); 127 495 } 128 496 129 - void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port) 497 + static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) 130 498 { 131 499 void __iomem *regs = mvi->regs; 132 500 u32 tmp, offs; 133 - u8 *tfs = &port->taskfileset; 134 501 135 502 if (*tfs == MVS_ID_NOT_MAPPED) 136 503 return; 137 504 138 505 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); 139 506 if (*tfs < 16) { 140 - tmp = mr32(PCS); 141 - mw32(PCS, tmp & ~offs); 507 + tmp = mr32(MVS_PCS); 508 + mw32(MVS_PCS, tmp & ~offs); 142 509 } else { 143 - tmp = mr32(CTL); 144 - mw32(CTL, tmp & ~offs); 510 + tmp = mr32(MVS_CTL); 511 + mw32(MVS_CTL, tmp & ~offs); 145 512 } 146 513 147 - tmp = mr32(INT_STAT_SRS) & (1U << *tfs); 514 + tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs); 148 515 if (tmp) 149 - mw32(INT_STAT_SRS, tmp); 516 + mw32(MVS_INT_STAT_SRS_0, tmp); 150 517 151 518 *tfs = MVS_ID_NOT_MAPPED; 519 + return; 152 520 } 153 521 154 - u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port) 522 + static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs) 155 523 { 156 524 int i; 157 525 u32 tmp, offs; 158 526 void __iomem *regs = mvi->regs; 159 527 160 - if (port->taskfileset != MVS_ID_NOT_MAPPED) 528 + if (*tfs != MVS_ID_NOT_MAPPED) 161 529 return 0; 162 530 163 - tmp = mr32(PCS); 531 + tmp = mr32(MVS_PCS); 164 532 165 533 for (i = 0; i < mvi->chip->srs_sz; i++) { 166 534 if (i == 16) 167 - tmp = mr32(CTL); 535 + tmp = mr32(MVS_CTL); 168 536 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); 169 537 if (!(tmp & offs)) { 170 - port->taskfileset = i; 538 + *tfs = i; 171 539 172 540 if (i < 16) 173 - mw32(PCS, tmp | offs); 541 + mw32(MVS_PCS, tmp | offs); 174 542 else 175 - mw32(CTL, tmp | offs); 176 - tmp = mr32(INT_STAT_SRS) & (1U << i); 543 + mw32(MVS_CTL, tmp | offs); 544 + tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i); 177 545 if (tmp) 178 - mw32(INT_STAT_SRS, tmp); 546 + mw32(MVS_INT_STAT_SRS_0, tmp); 179 547 return 0; 180 548 } 181 549 } 182 550 return MVS_ID_NOT_MAPPED; 183 551 } 552 + 553 + void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd) 554 + { 555 + int i; 556 + struct scatterlist *sg; 557 + struct mvs_prd *buf_prd = prd; 558 + for_each_sg(scatter, sg, nr, i) { 559 + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); 560 + buf_prd->len = cpu_to_le32(sg_dma_len(sg)); 561 + buf_prd++; 562 + } 563 + } 564 + 565 + static int mvs_64xx_oob_done(struct mvs_info *mvi, int i) 566 + { 567 + u32 phy_st; 568 + mvs_write_port_cfg_addr(mvi, i, 569 + PHYR_PHY_STAT); 570 + phy_st = mvs_read_port_cfg_data(mvi, i); 571 + if (phy_st & PHY_OOB_DTCTD) 572 + return 1; 573 + return 0; 574 + } 575 + 576 + static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i, 577 + struct sas_identify_frame *id) 578 + 579 + { 580 + struct mvs_phy *phy = &mvi->phy[i]; 581 + struct asd_sas_phy *sas_phy = &phy->sas_phy; 582 + 583 + sas_phy->linkrate = 584 + (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> 585 + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; 586 + 587 + phy->minimum_linkrate = 588 + (phy->phy_status & 589 + PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; 590 + phy->maximum_linkrate = 591 + (phy->phy_status & 592 + PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; 593 + 594 + mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); 595 + phy->dev_info = mvs_read_port_cfg_data(mvi, i); 596 + 597 + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); 598 + phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); 599 + 600 + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); 601 + phy->att_dev_sas_addr = 602 + (u64) mvs_read_port_cfg_data(mvi, i) << 32; 603 + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); 604 + phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); 605 + phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr); 606 + } 607 + 608 + static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i) 609 + { 610 + u32 tmp; 611 + struct mvs_phy *phy = &mvi->phy[i]; 612 + /* workaround for HW phy decoding error on 1.5g disk drive */ 613 + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); 614 + tmp = mvs_read_port_vsr_data(mvi, i); 615 + if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> 616 + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == 617 + SAS_LINK_RATE_1_5_GBPS) 618 + tmp &= ~PHY_MODE6_LATECLK; 619 + else 620 + tmp |= PHY_MODE6_LATECLK; 621 + mvs_write_port_vsr_data(mvi, i, tmp); 622 + } 623 + 624 + void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, 625 + struct sas_phy_linkrates *rates) 626 + { 627 + u32 lrmin = 0, lrmax = 0; 628 + u32 tmp; 629 + 630 + tmp = mvs_read_phy_ctl(mvi, phy_id); 631 + lrmin = (rates->minimum_linkrate << 8); 632 + lrmax = (rates->maximum_linkrate << 12); 633 + 634 + if (lrmin) { 635 + tmp &= ~(0xf << 8); 636 + tmp |= lrmin; 637 + } 638 + if (lrmax) { 639 + tmp &= ~(0xf << 12); 640 + tmp |= lrmax; 641 + } 642 + mvs_write_phy_ctl(mvi, phy_id, tmp); 643 + mvs_64xx_phy_reset(mvi, phy_id, 1); 644 + } 645 + 646 + static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi) 647 + { 648 + u32 tmp; 649 + void __iomem *regs = mvi->regs; 650 + tmp = mr32(MVS_PCS); 651 + mw32(MVS_PCS, tmp & 0xFFFF); 652 + mw32(MVS_PCS, tmp); 653 + tmp = mr32(MVS_CTL); 654 + mw32(MVS_CTL, tmp & 0xFFFF); 655 + mw32(MVS_CTL, tmp); 656 + } 657 + 658 + 659 + u32 mvs_64xx_spi_read_data(struct mvs_info *mvi) 660 + { 661 + void __iomem *regs = mvi->regs_ex; 662 + return ior32(SPI_DATA_REG_64XX); 663 + } 664 + 665 + void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data) 666 + { 667 + void __iomem *regs = mvi->regs_ex; 668 + iow32(SPI_DATA_REG_64XX, data); 669 + } 670 + 671 + 672 + int mvs_64xx_spi_buildcmd(struct mvs_info *mvi, 673 + u32 *dwCmd, 674 + u8 cmd, 675 + u8 read, 676 + u8 length, 677 + u32 addr 678 + ) 679 + { 680 + u32 dwTmp; 681 + 682 + dwTmp = ((u32)cmd << 24) | ((u32)length << 19); 683 + if (read) 684 + dwTmp |= 1U<<23; 685 + 686 + if (addr != MV_MAX_U32) { 687 + dwTmp |= 1U<<22; 688 + dwTmp |= (addr & 0x0003FFFF); 689 + } 690 + 691 + *dwCmd = dwTmp; 692 + return 0; 693 + } 694 + 695 + 696 + int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd) 697 + { 698 + void __iomem *regs = mvi->regs_ex; 699 + int retry; 700 + 701 + for (retry = 0; retry < 1; retry++) { 702 + iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE); 703 + iow32(SPI_CMD_REG_64XX, cmd); 704 + iow32(SPI_CTRL_REG_64XX, 705 + SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART); 706 + } 707 + 708 + return 0; 709 + } 710 + 711 + int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) 712 + { 713 + void __iomem *regs = mvi->regs_ex; 714 + u32 i, dwTmp; 715 + 716 + for (i = 0; i < timeout; i++) { 717 + dwTmp = ior32(SPI_CTRL_REG_64XX); 718 + if (!(dwTmp & SPI_CTRL_SPISTART)) 719 + return 0; 720 + msleep(10); 721 + } 722 + 723 + return -1; 724 + } 725 + 726 + #ifndef DISABLE_HOTPLUG_DMA_FIX 727 + void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) 728 + { 729 + int i; 730 + struct mvs_prd *buf_prd = prd; 731 + buf_prd += from; 732 + for (i = 0; i < MAX_SG_ENTRY - from; i++) { 733 + buf_prd->addr = cpu_to_le64(buf_dma); 734 + buf_prd->len = cpu_to_le32(buf_len); 735 + ++buf_prd; 736 + } 737 + } 738 + #endif 739 + 740 + const struct mvs_dispatch mvs_64xx_dispatch = { 741 + "mv64xx", 742 + mvs_64xx_init, 743 + NULL, 744 + mvs_64xx_ioremap, 745 + mvs_64xx_iounmap, 746 + mvs_64xx_isr, 747 + mvs_64xx_isr_status, 748 + mvs_64xx_interrupt_enable, 749 + mvs_64xx_interrupt_disable, 750 + mvs_read_phy_ctl, 751 + mvs_write_phy_ctl, 752 + mvs_read_port_cfg_data, 753 + mvs_write_port_cfg_data, 754 + mvs_write_port_cfg_addr, 755 + mvs_read_port_vsr_data, 756 + mvs_write_port_vsr_data, 757 + mvs_write_port_vsr_addr, 758 + mvs_read_port_irq_stat, 759 + mvs_write_port_irq_stat, 760 + mvs_read_port_irq_mask, 761 + mvs_write_port_irq_mask, 762 + mvs_get_sas_addr, 763 + mvs_64xx_command_active, 764 + mvs_64xx_issue_stop, 765 + mvs_start_delivery, 766 + mvs_rx_update, 767 + mvs_int_full, 768 + mvs_64xx_assign_reg_set, 769 + mvs_64xx_free_reg_set, 770 + mvs_get_prd_size, 771 + mvs_get_prd_count, 772 + mvs_64xx_make_prd, 773 + mvs_64xx_detect_porttype, 774 + mvs_64xx_oob_done, 775 + mvs_64xx_fix_phy_info, 776 + mvs_64xx_phy_work_around, 777 + mvs_64xx_phy_set_link_rate, 778 + mvs_hw_max_link_rate, 779 + mvs_64xx_phy_disable, 780 + mvs_64xx_phy_enable, 781 + mvs_64xx_phy_reset, 782 + mvs_64xx_stp_reset, 783 + mvs_64xx_clear_active_cmds, 784 + mvs_64xx_spi_read_data, 785 + mvs_64xx_spi_write_data, 786 + mvs_64xx_spi_buildcmd, 787 + mvs_64xx_spi_issuecmd, 788 + mvs_64xx_spi_waitdataready, 789 + #ifndef DISABLE_HOTPLUG_DMA_FIX 790 + mvs_64xx_fix_dma, 791 + #endif 792 + }; 184 793
+67 -8
drivers/scsi/mvsas/mv_64xx.h
··· 1 + /* 2 + * Marvell 88SE64xx hardware specific head file 3 + * 4 + * Copyright 2007 Red Hat, Inc. 5 + * Copyright 2008 Marvell. <kewei@marvell.com> 6 + * 7 + * This file is licensed under GPLv2. 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public License as 11 + * published by the Free Software Foundation; version 2 of the 12 + * License. 13 + * 14 + * This program is distributed in the hope that it will be useful, 15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 + * General Public License for more details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this program; if not, write to the Free Software 21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 22 + * USA 23 + */ 24 + 1 25 #ifndef _MVS64XX_REG_H_ 2 26 #define _MVS64XX_REG_H_ 27 + 28 + #include <linux/types.h> 29 + 30 + #define MAX_LINK_RATE SAS_LINK_RATE_3_0_GBPS 3 31 4 32 /* enhanced mode registers (BAR4) */ 5 33 enum hw_registers { 6 34 MVS_GBL_CTL = 0x04, /* global control */ 7 35 MVS_GBL_INT_STAT = 0x08, /* global irq status */ 8 36 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ 37 + 38 + MVS_PHY_CTL = 0x40, /* SOC PHY Control */ 39 + MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */ 40 + 9 41 MVS_GBL_PORT_TYPE = 0xa0, /* port type */ 10 42 11 43 MVS_CTL = 0x100, /* SAS/SATA port configuration */ ··· 62 30 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ 63 31 MVS_INT_STAT = 0x150, /* Central int status */ 64 32 MVS_INT_MASK = 0x154, /* Central int enable */ 65 - MVS_INT_STAT_SRS = 0x158, /* SATA register set status */ 66 - MVS_INT_MASK_SRS = 0x15C, 33 + MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */ 34 + MVS_INT_MASK_SRS_0 = 0x15C, 67 35 68 36 /* ports 1-3 follow after this */ 69 37 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ 70 38 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ 71 - MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */ 72 - MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */ 39 + /* ports 5-7 follow after this */ 40 + MVS_P4_INT_STAT = 0x200, /* Port4 interrupt status */ 41 + MVS_P4_INT_MASK = 0x204, /* Port4 interrupt enable mask */ 73 42 74 43 /* ports 1-3 follow after this */ 75 44 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ 45 + /* ports 5-7 follow after this */ 76 46 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ 77 47 78 48 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ ··· 83 49 /* ports 1-3 follow after this */ 84 50 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ 85 51 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ 86 - MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */ 87 - MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */ 52 + /* ports 5-7 follow after this */ 53 + MVS_P4_CFG_ADDR = 0x230, /* Port4 config address */ 54 + MVS_P4_CFG_DATA = 0x234, /* Port4 config data */ 88 55 89 56 /* ports 1-3 follow after this */ 90 57 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ 91 58 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ 92 - MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */ 93 - MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */ 59 + /* ports 5-7 follow after this */ 60 + MVS_P4_VSR_ADDR = 0x250, /* port4 VSR addr */ 61 + MVS_P4_VSR_DATA = 0x254, /* port4 VSR data */ 94 62 }; 95 63 96 64 enum pci_cfg_registers { 97 65 PCR_PHY_CTL = 0x40, 98 66 PCR_PHY_CTL2 = 0x90, 99 67 PCR_DEV_CTRL = 0xE8, 68 + PCR_LINK_STAT = 0xF2, 100 69 }; 101 70 102 71 /* SAS/SATA Vendor Specific Port Registers */ ··· 120 83 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ 121 84 }; 122 85 86 + enum chip_register_bits { 87 + PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), 88 + PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), 89 + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), 90 + PHY_NEG_SPP_PHYS_LINK_RATE_MASK = 91 + (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), 92 + }; 93 + 94 + #define MAX_SG_ENTRY 64 95 + 123 96 struct mvs_prd { 124 97 __le64 addr; /* 64-bit buffer address */ 125 98 __le32 reserved; 126 99 __le32 len; /* 16-bit length */ 127 100 }; 101 + 102 + #define SPI_CTRL_REG 0xc0 103 + #define SPI_CTRL_VENDOR_ENABLE (1U<<29) 104 + #define SPI_CTRL_SPIRDY (1U<<22) 105 + #define SPI_CTRL_SPISTART (1U<<20) 106 + 107 + #define SPI_CMD_REG 0xc4 108 + #define SPI_DATA_REG 0xc8 109 + 110 + #define SPI_CTRL_REG_64XX 0x10 111 + #define SPI_CMD_REG_64XX 0x14 112 + #define SPI_DATA_REG_64XX 0x18 128 113 129 114 #endif
+672
drivers/scsi/mvsas/mv_94xx.c
··· 1 + /* 2 + * Marvell 88SE94xx hardware specific 3 + * 4 + * Copyright 2007 Red Hat, Inc. 5 + * Copyright 2008 Marvell. <kewei@marvell.com> 6 + * 7 + * This file is licensed under GPLv2. 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public License as 11 + * published by the Free Software Foundation; version 2 of the 12 + * License. 13 + * 14 + * This program is distributed in the hope that it will be useful, 15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 + * General Public License for more details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this program; if not, write to the Free Software 21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 22 + * USA 23 + */ 24 + 25 + #include "mv_sas.h" 26 + #include "mv_94xx.h" 27 + #include "mv_chips.h" 28 + 29 + static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i) 30 + { 31 + u32 reg; 32 + struct mvs_phy *phy = &mvi->phy[i]; 33 + u32 phy_status; 34 + 35 + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3); 36 + reg = mvs_read_port_vsr_data(mvi, i); 37 + phy_status = ((reg & 0x3f0000) >> 16) & 0xff; 38 + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 39 + switch (phy_status) { 40 + case 0x10: 41 + phy->phy_type |= PORT_TYPE_SAS; 42 + break; 43 + case 0x1d: 44 + default: 45 + phy->phy_type |= PORT_TYPE_SATA; 46 + break; 47 + } 48 + } 49 + 50 + static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id) 51 + { 52 + void __iomem *regs = mvi->regs; 53 + u32 tmp; 54 + 55 + tmp = mr32(MVS_PCS); 56 + tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); 57 + mw32(MVS_PCS, tmp); 58 + } 59 + 60 + static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) 61 + { 62 + u32 tmp; 63 + 64 + tmp = mvs_read_port_irq_stat(mvi, phy_id); 65 + tmp &= ~PHYEV_RDY_CH; 66 + mvs_write_port_irq_stat(mvi, phy_id, tmp); 67 + if (hard) { 68 + tmp = mvs_read_phy_ctl(mvi, phy_id); 69 + tmp |= PHY_RST_HARD; 70 + mvs_write_phy_ctl(mvi, phy_id, tmp); 71 + do { 72 + tmp = mvs_read_phy_ctl(mvi, phy_id); 73 + } while (tmp & PHY_RST_HARD); 74 + } else { 75 + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT); 76 + tmp = mvs_read_port_vsr_data(mvi, phy_id); 77 + tmp |= PHY_RST; 78 + mvs_write_port_vsr_data(mvi, phy_id, tmp); 79 + } 80 + } 81 + 82 + static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id) 83 + { 84 + u32 tmp; 85 + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); 86 + tmp = mvs_read_port_vsr_data(mvi, phy_id); 87 + mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000); 88 + } 89 + 90 + static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id) 91 + { 92 + mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4); 93 + mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1); 94 + mvs_write_port_vsr_addr(mvi, phy_id, 0x104); 95 + mvs_write_port_vsr_data(mvi, phy_id, 0x00018080); 96 + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); 97 + mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff); 98 + } 99 + 100 + static int __devinit mvs_94xx_init(struct mvs_info *mvi) 101 + { 102 + void __iomem *regs = mvi->regs; 103 + int i; 104 + u32 tmp, cctl; 105 + 106 + mvs_show_pcie_usage(mvi); 107 + if (mvi->flags & MVF_FLAG_SOC) { 108 + tmp = mr32(MVS_PHY_CTL); 109 + tmp &= ~PCTL_PWR_OFF; 110 + tmp |= PCTL_PHY_DSBL; 111 + mw32(MVS_PHY_CTL, tmp); 112 + } 113 + 114 + /* Init Chip */ 115 + /* make sure RST is set; HBA_RST /should/ have done that for us */ 116 + cctl = mr32(MVS_CTL) & 0xFFFF; 117 + if (cctl & CCTL_RST) 118 + cctl &= ~CCTL_RST; 119 + else 120 + mw32_f(MVS_CTL, cctl | CCTL_RST); 121 + 122 + if (mvi->flags & MVF_FLAG_SOC) { 123 + tmp = mr32(MVS_PHY_CTL); 124 + tmp &= ~PCTL_PWR_OFF; 125 + tmp |= PCTL_COM_ON; 126 + tmp &= ~PCTL_PHY_DSBL; 127 + tmp |= PCTL_LINK_RST; 128 + mw32(MVS_PHY_CTL, tmp); 129 + msleep(100); 130 + tmp &= ~PCTL_LINK_RST; 131 + mw32(MVS_PHY_CTL, tmp); 132 + msleep(100); 133 + } 134 + 135 + /* reset control */ 136 + mw32(MVS_PCS, 0); /* MVS_PCS */ 137 + mw32(MVS_STP_REG_SET_0, 0); 138 + mw32(MVS_STP_REG_SET_1, 0); 139 + 140 + /* init phys */ 141 + mvs_phy_hacks(mvi); 142 + 143 + /* disable Multiplexing, enable phy implemented */ 144 + mw32(MVS_PORTS_IMP, 0xFF); 145 + 146 + 147 + mw32(MVS_PA_VSR_ADDR, 0x00000104); 148 + mw32(MVS_PA_VSR_PORT, 0x00018080); 149 + mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8); 150 + mw32(MVS_PA_VSR_PORT, 0x0084ffff); 151 + 152 + /* set LED blink when IO*/ 153 + mw32(MVS_PA_VSR_ADDR, 0x00000030); 154 + tmp = mr32(MVS_PA_VSR_PORT); 155 + tmp &= 0xFFFF00FF; 156 + tmp |= 0x00003300; 157 + mw32(MVS_PA_VSR_PORT, tmp); 158 + 159 + mw32(MVS_CMD_LIST_LO, mvi->slot_dma); 160 + mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); 161 + 162 + mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma); 163 + mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); 164 + 165 + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ); 166 + mw32(MVS_TX_LO, mvi->tx_dma); 167 + mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16); 168 + 169 + mw32(MVS_RX_CFG, MVS_RX_RING_SZ); 170 + mw32(MVS_RX_LO, mvi->rx_dma); 171 + mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16); 172 + 173 + for (i = 0; i < mvi->chip->n_phy; i++) { 174 + mvs_94xx_phy_disable(mvi, i); 175 + /* set phy local SAS address */ 176 + mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4, 177 + (mvi->phy[i].dev_sas_addr)); 178 + 179 + mvs_94xx_enable_xmt(mvi, i); 180 + mvs_94xx_phy_enable(mvi, i); 181 + 182 + mvs_94xx_phy_reset(mvi, i, 1); 183 + msleep(500); 184 + mvs_94xx_detect_porttype(mvi, i); 185 + } 186 + 187 + if (mvi->flags & MVF_FLAG_SOC) { 188 + /* set select registers */ 189 + writel(0x0E008000, regs + 0x000); 190 + writel(0x59000008, regs + 0x004); 191 + writel(0x20, regs + 0x008); 192 + writel(0x20, regs + 0x00c); 193 + writel(0x20, regs + 0x010); 194 + writel(0x20, regs + 0x014); 195 + writel(0x20, regs + 0x018); 196 + writel(0x20, regs + 0x01c); 197 + } 198 + for (i = 0; i < mvi->chip->n_phy; i++) { 199 + /* clear phy int status */ 200 + tmp = mvs_read_port_irq_stat(mvi, i); 201 + tmp &= ~PHYEV_SIG_FIS; 202 + mvs_write_port_irq_stat(mvi, i, tmp); 203 + 204 + /* set phy int mask */ 205 + tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | 206 + PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ; 207 + mvs_write_port_irq_mask(mvi, i, tmp); 208 + 209 + msleep(100); 210 + mvs_update_phyinfo(mvi, i, 1); 211 + } 212 + 213 + /* FIXME: update wide port bitmaps */ 214 + 215 + /* little endian for open address and command table, etc. */ 216 + /* 217 + * it seems that ( from the spec ) turning on big-endian won't 218 + * do us any good on big-endian machines, need further confirmation 219 + */ 220 + cctl = mr32(MVS_CTL); 221 + cctl |= CCTL_ENDIAN_CMD; 222 + cctl |= CCTL_ENDIAN_DATA; 223 + cctl &= ~CCTL_ENDIAN_OPEN; 224 + cctl |= CCTL_ENDIAN_RSP; 225 + mw32_f(MVS_CTL, cctl); 226 + 227 + /* reset CMD queue */ 228 + tmp = mr32(MVS_PCS); 229 + tmp |= PCS_CMD_RST; 230 + mw32(MVS_PCS, tmp); 231 + /* interrupt coalescing may cause missing HW interrput in some case, 232 + * and the max count is 0x1ff, while our max slot is 0x200, 233 + * it will make count 0. 234 + */ 235 + tmp = 0; 236 + mw32(MVS_INT_COAL, tmp); 237 + 238 + tmp = 0x100; 239 + mw32(MVS_INT_COAL_TMOUT, tmp); 240 + 241 + /* ladies and gentlemen, start your engines */ 242 + mw32(MVS_TX_CFG, 0); 243 + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); 244 + mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN); 245 + /* enable CMD/CMPL_Q/RESP mode */ 246 + mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN | 247 + PCS_CMD_EN | PCS_CMD_STOP_ERR); 248 + 249 + /* enable completion queue interrupt */ 250 + tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | 251 + CINT_DMA_PCIE); 252 + tmp |= CINT_PHY_MASK; 253 + mw32(MVS_INT_MASK, tmp); 254 + 255 + /* Enable SRS interrupt */ 256 + mw32(MVS_INT_MASK_SRS_0, 0xFFFF); 257 + 258 + return 0; 259 + } 260 + 261 + static int mvs_94xx_ioremap(struct mvs_info *mvi) 262 + { 263 + if (!mvs_ioremap(mvi, 2, -1)) { 264 + mvi->regs_ex = mvi->regs + 0x10200; 265 + mvi->regs += 0x20000; 266 + if (mvi->id == 1) 267 + mvi->regs += 0x4000; 268 + return 0; 269 + } 270 + return -1; 271 + } 272 + 273 + static void mvs_94xx_iounmap(struct mvs_info *mvi) 274 + { 275 + if (mvi->regs) { 276 + mvi->regs -= 0x20000; 277 + if (mvi->id == 1) 278 + mvi->regs -= 0x4000; 279 + mvs_iounmap(mvi->regs); 280 + } 281 + } 282 + 283 + static void mvs_94xx_interrupt_enable(struct mvs_info *mvi) 284 + { 285 + void __iomem *regs = mvi->regs_ex; 286 + u32 tmp; 287 + 288 + tmp = mr32(MVS_GBL_CTL); 289 + tmp |= (IRQ_SAS_A | IRQ_SAS_B); 290 + mw32(MVS_GBL_INT_STAT, tmp); 291 + writel(tmp, regs + 0x0C); 292 + writel(tmp, regs + 0x10); 293 + writel(tmp, regs + 0x14); 294 + writel(tmp, regs + 0x18); 295 + mw32(MVS_GBL_CTL, tmp); 296 + } 297 + 298 + static void mvs_94xx_interrupt_disable(struct mvs_info *mvi) 299 + { 300 + void __iomem *regs = mvi->regs_ex; 301 + u32 tmp; 302 + 303 + tmp = mr32(MVS_GBL_CTL); 304 + 305 + tmp &= ~(IRQ_SAS_A | IRQ_SAS_B); 306 + mw32(MVS_GBL_INT_STAT, tmp); 307 + writel(tmp, regs + 0x0C); 308 + writel(tmp, regs + 0x10); 309 + writel(tmp, regs + 0x14); 310 + writel(tmp, regs + 0x18); 311 + mw32(MVS_GBL_CTL, tmp); 312 + } 313 + 314 + static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq) 315 + { 316 + void __iomem *regs = mvi->regs_ex; 317 + u32 stat = 0; 318 + if (!(mvi->flags & MVF_FLAG_SOC)) { 319 + stat = mr32(MVS_GBL_INT_STAT); 320 + 321 + if (!(stat & (IRQ_SAS_A | IRQ_SAS_B))) 322 + return 0; 323 + } 324 + return stat; 325 + } 326 + 327 + static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat) 328 + { 329 + void __iomem *regs = mvi->regs; 330 + 331 + if (((stat & IRQ_SAS_A) && mvi->id == 0) || 332 + ((stat & IRQ_SAS_B) && mvi->id == 1)) { 333 + mw32_f(MVS_INT_STAT, CINT_DONE); 334 + #ifndef MVS_USE_TASKLET 335 + spin_lock(&mvi->lock); 336 + #endif 337 + mvs_int_full(mvi); 338 + #ifndef MVS_USE_TASKLET 339 + spin_unlock(&mvi->lock); 340 + #endif 341 + } 342 + return IRQ_HANDLED; 343 + } 344 + 345 + static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) 346 + { 347 + u32 tmp; 348 + mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32)); 349 + do { 350 + tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3)); 351 + } while (tmp & 1 << (slot_idx % 32)); 352 + } 353 + 354 + static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, 355 + u32 tfs) 356 + { 357 + void __iomem *regs = mvi->regs; 358 + u32 tmp; 359 + 360 + if (type == PORT_TYPE_SATA) { 361 + tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs); 362 + mw32(MVS_INT_STAT_SRS_0, tmp); 363 + } 364 + mw32(MVS_INT_STAT, CINT_CI_STOP); 365 + tmp = mr32(MVS_PCS) | 0xFF00; 366 + mw32(MVS_PCS, tmp); 367 + } 368 + 369 + static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) 370 + { 371 + void __iomem *regs = mvi->regs; 372 + u32 tmp; 373 + u8 reg_set = *tfs; 374 + 375 + if (*tfs == MVS_ID_NOT_MAPPED) 376 + return; 377 + 378 + mvi->sata_reg_set &= ~bit(reg_set); 379 + if (reg_set < 32) { 380 + w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set); 381 + tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set; 382 + if (tmp) 383 + mw32(MVS_INT_STAT_SRS_0, tmp); 384 + } else { 385 + w_reg_set_enable(reg_set, mvi->sata_reg_set); 386 + tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set; 387 + if (tmp) 388 + mw32(MVS_INT_STAT_SRS_1, tmp); 389 + } 390 + 391 + *tfs = MVS_ID_NOT_MAPPED; 392 + 393 + return; 394 + } 395 + 396 + static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs) 397 + { 398 + int i; 399 + void __iomem *regs = mvi->regs; 400 + 401 + if (*tfs != MVS_ID_NOT_MAPPED) 402 + return 0; 403 + 404 + i = mv_ffc64(mvi->sata_reg_set); 405 + if (i > 32) { 406 + mvi->sata_reg_set |= bit(i); 407 + w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32)); 408 + *tfs = i; 409 + return 0; 410 + } else if (i >= 0) { 411 + mvi->sata_reg_set |= bit(i); 412 + w_reg_set_enable(i, (u32)mvi->sata_reg_set); 413 + *tfs = i; 414 + return 0; 415 + } 416 + return MVS_ID_NOT_MAPPED; 417 + } 418 + 419 + static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd) 420 + { 421 + int i; 422 + struct scatterlist *sg; 423 + struct mvs_prd *buf_prd = prd; 424 + for_each_sg(scatter, sg, nr, i) { 425 + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); 426 + buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg)); 427 + buf_prd++; 428 + } 429 + } 430 + 431 + static int mvs_94xx_oob_done(struct mvs_info *mvi, int i) 432 + { 433 + u32 phy_st; 434 + phy_st = mvs_read_phy_ctl(mvi, i); 435 + if (phy_st & PHY_READY_MASK) /* phy ready */ 436 + return 1; 437 + return 0; 438 + } 439 + 440 + static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id, 441 + struct sas_identify_frame *id) 442 + { 443 + int i; 444 + u32 id_frame[7]; 445 + 446 + for (i = 0; i < 7; i++) { 447 + mvs_write_port_cfg_addr(mvi, port_id, 448 + CONFIG_ID_FRAME0 + i * 4); 449 + id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); 450 + } 451 + memcpy(id, id_frame, 28); 452 + } 453 + 454 + static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id, 455 + struct sas_identify_frame *id) 456 + { 457 + int i; 458 + u32 id_frame[7]; 459 + 460 + /* mvs_hexdump(28, (u8 *)id_frame, 0); */ 461 + for (i = 0; i < 7; i++) { 462 + mvs_write_port_cfg_addr(mvi, port_id, 463 + CONFIG_ATT_ID_FRAME0 + i * 4); 464 + id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); 465 + mv_dprintk("94xx phy %d atta frame %d %x.\n", 466 + port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]); 467 + } 468 + /* mvs_hexdump(28, (u8 *)id_frame, 0); */ 469 + memcpy(id, id_frame, 28); 470 + } 471 + 472 + static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id) 473 + { 474 + u32 att_dev_info = 0; 475 + 476 + att_dev_info |= id->dev_type; 477 + if (id->stp_iport) 478 + att_dev_info |= PORT_DEV_STP_INIT; 479 + if (id->smp_iport) 480 + att_dev_info |= PORT_DEV_SMP_INIT; 481 + if (id->ssp_iport) 482 + att_dev_info |= PORT_DEV_SSP_INIT; 483 + if (id->stp_tport) 484 + att_dev_info |= PORT_DEV_STP_TRGT; 485 + if (id->smp_tport) 486 + att_dev_info |= PORT_DEV_SMP_TRGT; 487 + if (id->ssp_tport) 488 + att_dev_info |= PORT_DEV_SSP_TRGT; 489 + 490 + att_dev_info |= (u32)id->phy_id<<24; 491 + return att_dev_info; 492 + } 493 + 494 + static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id) 495 + { 496 + return mvs_94xx_make_dev_info(id); 497 + } 498 + 499 + static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i, 500 + struct sas_identify_frame *id) 501 + { 502 + struct mvs_phy *phy = &mvi->phy[i]; 503 + struct asd_sas_phy *sas_phy = &phy->sas_phy; 504 + mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status); 505 + sas_phy->linkrate = 506 + (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> 507 + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; 508 + sas_phy->linkrate += 0x8; 509 + mv_dprintk("get link rate is %d\n", sas_phy->linkrate); 510 + phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 511 + phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS; 512 + mvs_94xx_get_dev_identify_frame(mvi, i, id); 513 + phy->dev_info = mvs_94xx_make_dev_info(id); 514 + 515 + if (phy->phy_type & PORT_TYPE_SAS) { 516 + mvs_94xx_get_att_identify_frame(mvi, i, id); 517 + phy->att_dev_info = mvs_94xx_make_att_info(id); 518 + phy->att_dev_sas_addr = *(u64 *)id->sas_addr; 519 + } else { 520 + phy->att_dev_info = PORT_DEV_STP_TRGT | 1; 521 + } 522 + 523 + } 524 + 525 + void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, 526 + struct sas_phy_linkrates *rates) 527 + { 528 + /* TODO */ 529 + } 530 + 531 + static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi) 532 + { 533 + u32 tmp; 534 + void __iomem *regs = mvi->regs; 535 + tmp = mr32(MVS_STP_REG_SET_0); 536 + mw32(MVS_STP_REG_SET_0, 0); 537 + mw32(MVS_STP_REG_SET_0, tmp); 538 + tmp = mr32(MVS_STP_REG_SET_1); 539 + mw32(MVS_STP_REG_SET_1, 0); 540 + mw32(MVS_STP_REG_SET_1, tmp); 541 + } 542 + 543 + 544 + u32 mvs_94xx_spi_read_data(struct mvs_info *mvi) 545 + { 546 + void __iomem *regs = mvi->regs_ex - 0x10200; 547 + return mr32(SPI_RD_DATA_REG_94XX); 548 + } 549 + 550 + void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data) 551 + { 552 + void __iomem *regs = mvi->regs_ex - 0x10200; 553 + mw32(SPI_RD_DATA_REG_94XX, data); 554 + } 555 + 556 + 557 + int mvs_94xx_spi_buildcmd(struct mvs_info *mvi, 558 + u32 *dwCmd, 559 + u8 cmd, 560 + u8 read, 561 + u8 length, 562 + u32 addr 563 + ) 564 + { 565 + void __iomem *regs = mvi->regs_ex - 0x10200; 566 + u32 dwTmp; 567 + 568 + dwTmp = ((u32)cmd << 8) | ((u32)length << 4); 569 + if (read) 570 + dwTmp |= SPI_CTRL_READ_94XX; 571 + 572 + if (addr != MV_MAX_U32) { 573 + mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL)); 574 + dwTmp |= SPI_ADDR_VLD_94XX; 575 + } 576 + 577 + *dwCmd = dwTmp; 578 + return 0; 579 + } 580 + 581 + 582 + int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd) 583 + { 584 + void __iomem *regs = mvi->regs_ex - 0x10200; 585 + mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX); 586 + 587 + return 0; 588 + } 589 + 590 + int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) 591 + { 592 + void __iomem *regs = mvi->regs_ex - 0x10200; 593 + u32 i, dwTmp; 594 + 595 + for (i = 0; i < timeout; i++) { 596 + dwTmp = mr32(SPI_CTRL_REG_94XX); 597 + if (!(dwTmp & SPI_CTRL_SpiStart_94XX)) 598 + return 0; 599 + msleep(10); 600 + } 601 + 602 + return -1; 603 + } 604 + 605 + #ifndef DISABLE_HOTPLUG_DMA_FIX 606 + void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) 607 + { 608 + int i; 609 + struct mvs_prd *buf_prd = prd; 610 + buf_prd += from; 611 + for (i = 0; i < MAX_SG_ENTRY - from; i++) { 612 + buf_prd->addr = cpu_to_le64(buf_dma); 613 + buf_prd->im_len.len = cpu_to_le32(buf_len); 614 + ++buf_prd; 615 + } 616 + } 617 + #endif 618 + 619 + const struct mvs_dispatch mvs_94xx_dispatch = { 620 + "mv94xx", 621 + mvs_94xx_init, 622 + NULL, 623 + mvs_94xx_ioremap, 624 + mvs_94xx_iounmap, 625 + mvs_94xx_isr, 626 + mvs_94xx_isr_status, 627 + mvs_94xx_interrupt_enable, 628 + mvs_94xx_interrupt_disable, 629 + mvs_read_phy_ctl, 630 + mvs_write_phy_ctl, 631 + mvs_read_port_cfg_data, 632 + mvs_write_port_cfg_data, 633 + mvs_write_port_cfg_addr, 634 + mvs_read_port_vsr_data, 635 + mvs_write_port_vsr_data, 636 + mvs_write_port_vsr_addr, 637 + mvs_read_port_irq_stat, 638 + mvs_write_port_irq_stat, 639 + mvs_read_port_irq_mask, 640 + mvs_write_port_irq_mask, 641 + mvs_get_sas_addr, 642 + mvs_94xx_command_active, 643 + mvs_94xx_issue_stop, 644 + mvs_start_delivery, 645 + mvs_rx_update, 646 + mvs_int_full, 647 + mvs_94xx_assign_reg_set, 648 + mvs_94xx_free_reg_set, 649 + mvs_get_prd_size, 650 + mvs_get_prd_count, 651 + mvs_94xx_make_prd, 652 + mvs_94xx_detect_porttype, 653 + mvs_94xx_oob_done, 654 + mvs_94xx_fix_phy_info, 655 + NULL, 656 + mvs_94xx_phy_set_link_rate, 657 + mvs_hw_max_link_rate, 658 + mvs_94xx_phy_disable, 659 + mvs_94xx_phy_enable, 660 + mvs_94xx_phy_reset, 661 + NULL, 662 + mvs_94xx_clear_active_cmds, 663 + mvs_94xx_spi_read_data, 664 + mvs_94xx_spi_write_data, 665 + mvs_94xx_spi_buildcmd, 666 + mvs_94xx_spi_issuecmd, 667 + mvs_94xx_spi_waitdataready, 668 + #ifndef DISABLE_HOTPLUG_DMA_FIX 669 + mvs_94xx_fix_dma, 670 + #endif 671 + }; 672 +
+222
drivers/scsi/mvsas/mv_94xx.h
··· 1 + /* 2 + * Marvell 88SE94xx hardware specific head file 3 + * 4 + * Copyright 2007 Red Hat, Inc. 5 + * Copyright 2008 Marvell. <kewei@marvell.com> 6 + * 7 + * This file is licensed under GPLv2. 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public License as 11 + * published by the Free Software Foundation; version 2 of the 12 + * License. 13 + * 14 + * This program is distributed in the hope that it will be useful, 15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 + * General Public License for more details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this program; if not, write to the Free Software 21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 22 + * USA 23 + */ 24 + 25 + #ifndef _MVS94XX_REG_H_ 26 + #define _MVS94XX_REG_H_ 27 + 28 + #include <linux/types.h> 29 + 30 + #define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS 31 + 32 + enum hw_registers { 33 + MVS_GBL_CTL = 0x04, /* global control */ 34 + MVS_GBL_INT_STAT = 0x00, /* global irq status */ 35 + MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ 36 + 37 + MVS_PHY_CTL = 0x40, /* SOC PHY Control */ 38 + MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */ 39 + 40 + MVS_GBL_PORT_TYPE = 0xa0, /* port type */ 41 + 42 + MVS_CTL = 0x100, /* SAS/SATA port configuration */ 43 + MVS_PCS = 0x104, /* SAS/SATA port control/status */ 44 + MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ 45 + MVS_CMD_LIST_HI = 0x10C, 46 + MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ 47 + MVS_RX_FIS_HI = 0x114, 48 + MVS_STP_REG_SET_0 = 0x118, /* STP/SATA Register Set Enable */ 49 + MVS_STP_REG_SET_1 = 0x11C, 50 + MVS_TX_CFG = 0x120, /* TX configuration */ 51 + MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ 52 + MVS_TX_HI = 0x128, 53 + 54 + MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ 55 + MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ 56 + MVS_RX_CFG = 0x134, /* RX configuration */ 57 + MVS_RX_LO = 0x138, /* RX (completion) ring addr */ 58 + MVS_RX_HI = 0x13C, 59 + MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ 60 + 61 + MVS_INT_COAL = 0x148, /* Int coalescing config */ 62 + MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ 63 + MVS_INT_STAT = 0x150, /* Central int status */ 64 + MVS_INT_MASK = 0x154, /* Central int enable */ 65 + MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */ 66 + MVS_INT_MASK_SRS_0 = 0x15C, 67 + MVS_INT_STAT_SRS_1 = 0x160, 68 + MVS_INT_MASK_SRS_1 = 0x164, 69 + MVS_NON_NCQ_ERR_0 = 0x168, /* SRS Non-specific NCQ Error */ 70 + MVS_NON_NCQ_ERR_1 = 0x16C, 71 + MVS_CMD_ADDR = 0x170, /* Command register port (addr) */ 72 + MVS_CMD_DATA = 0x174, /* Command register port (data) */ 73 + MVS_MEM_PARITY_ERR = 0x178, /* Memory parity error */ 74 + 75 + /* ports 1-3 follow after this */ 76 + MVS_P0_INT_STAT = 0x180, /* port0 interrupt status */ 77 + MVS_P0_INT_MASK = 0x184, /* port0 interrupt mask */ 78 + /* ports 5-7 follow after this */ 79 + MVS_P4_INT_STAT = 0x1A0, /* Port4 interrupt status */ 80 + MVS_P4_INT_MASK = 0x1A4, /* Port4 interrupt enable mask */ 81 + 82 + /* ports 1-3 follow after this */ 83 + MVS_P0_SER_CTLSTAT = 0x1D0, /* port0 serial control/status */ 84 + /* ports 5-7 follow after this */ 85 + MVS_P4_SER_CTLSTAT = 0x1E0, /* port4 serial control/status */ 86 + 87 + /* ports 1-3 follow after this */ 88 + MVS_P0_CFG_ADDR = 0x200, /* port0 phy register address */ 89 + MVS_P0_CFG_DATA = 0x204, /* port0 phy register data */ 90 + /* ports 5-7 follow after this */ 91 + MVS_P4_CFG_ADDR = 0x220, /* Port4 config address */ 92 + MVS_P4_CFG_DATA = 0x224, /* Port4 config data */ 93 + 94 + /* phys 1-3 follow after this */ 95 + MVS_P0_VSR_ADDR = 0x250, /* phy0 VSR address */ 96 + MVS_P0_VSR_DATA = 0x254, /* phy0 VSR data */ 97 + /* phys 1-3 follow after this */ 98 + /* multiplexing */ 99 + MVS_P4_VSR_ADDR = 0x250, /* phy4 VSR address */ 100 + MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */ 101 + MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */ 102 + MVS_PA_VSR_PORT = 0x294, /* All port VSR data */ 103 + }; 104 + 105 + enum pci_cfg_registers { 106 + PCR_PHY_CTL = 0x40, 107 + PCR_PHY_CTL2 = 0x90, 108 + PCR_DEV_CTRL = 0x78, 109 + PCR_LINK_STAT = 0x82, 110 + }; 111 + 112 + /* SAS/SATA Vendor Specific Port Registers */ 113 + enum sas_sata_vsp_regs { 114 + VSR_PHY_STAT = 0x00 * 4, /* Phy Status */ 115 + VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */ 116 + VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */ 117 + VSR_PHY_MODE3 = 0x03 * 4, /* pll */ 118 + VSR_PHY_MODE4 = 0x04 * 4, /* VCO */ 119 + VSR_PHY_MODE5 = 0x05 * 4, /* Rx */ 120 + VSR_PHY_MODE6 = 0x06 * 4, /* CDR */ 121 + VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */ 122 + VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */ 123 + VSR_PHY_MODE9 = 0x09 * 4, /* Test */ 124 + VSR_PHY_MODE10 = 0x0A * 4, /* Power */ 125 + VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */ 126 + VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */ 127 + VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */ 128 + }; 129 + 130 + enum chip_register_bits { 131 + PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), 132 + PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), 133 + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12), 134 + PHY_NEG_SPP_PHYS_LINK_RATE_MASK = 135 + (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), 136 + }; 137 + 138 + enum pci_interrupt_cause { 139 + /* MAIN_IRQ_CAUSE (R10200) Bits*/ 140 + IRQ_COM_IN_I2O_IOP0 = (1 << 0), 141 + IRQ_COM_IN_I2O_IOP1 = (1 << 1), 142 + IRQ_COM_IN_I2O_IOP2 = (1 << 2), 143 + IRQ_COM_IN_I2O_IOP3 = (1 << 3), 144 + IRQ_COM_OUT_I2O_HOS0 = (1 << 4), 145 + IRQ_COM_OUT_I2O_HOS1 = (1 << 5), 146 + IRQ_COM_OUT_I2O_HOS2 = (1 << 6), 147 + IRQ_COM_OUT_I2O_HOS3 = (1 << 7), 148 + IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8), 149 + IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9), 150 + IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10), 151 + IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11), 152 + IRQ_PCIF_DRBL0 = (1 << 12), 153 + IRQ_PCIF_DRBL1 = (1 << 13), 154 + IRQ_PCIF_DRBL2 = (1 << 14), 155 + IRQ_PCIF_DRBL3 = (1 << 15), 156 + IRQ_XOR_A = (1 << 16), 157 + IRQ_XOR_B = (1 << 17), 158 + IRQ_SAS_A = (1 << 18), 159 + IRQ_SAS_B = (1 << 19), 160 + IRQ_CPU_CNTRL = (1 << 20), 161 + IRQ_GPIO = (1 << 21), 162 + IRQ_UART = (1 << 22), 163 + IRQ_SPI = (1 << 23), 164 + IRQ_I2C = (1 << 24), 165 + IRQ_SGPIO = (1 << 25), 166 + IRQ_COM_ERR = (1 << 29), 167 + IRQ_I2O_ERR = (1 << 30), 168 + IRQ_PCIE_ERR = (1 << 31), 169 + }; 170 + 171 + #define MAX_SG_ENTRY 255 172 + 173 + struct mvs_prd_imt { 174 + __le32 len:22; 175 + u8 _r_a:2; 176 + u8 misc_ctl:4; 177 + u8 inter_sel:4; 178 + }; 179 + 180 + struct mvs_prd { 181 + /* 64-bit buffer address */ 182 + __le64 addr; 183 + /* 22-bit length */ 184 + struct mvs_prd_imt im_len; 185 + } __attribute__ ((packed)); 186 + 187 + #define SPI_CTRL_REG_94XX 0xc800 188 + #define SPI_ADDR_REG_94XX 0xc804 189 + #define SPI_WR_DATA_REG_94XX 0xc808 190 + #define SPI_RD_DATA_REG_94XX 0xc80c 191 + #define SPI_CTRL_READ_94XX (1U << 2) 192 + #define SPI_ADDR_VLD_94XX (1U << 1) 193 + #define SPI_CTRL_SpiStart_94XX (1U << 0) 194 + 195 + #define mv_ffc(x) ffz(x) 196 + 197 + static inline int 198 + mv_ffc64(u64 v) 199 + { 200 + int i; 201 + i = mv_ffc((u32)v); 202 + if (i >= 0) 203 + return i; 204 + i = mv_ffc((u32)(v>>32)); 205 + 206 + if (i != 0) 207 + return 32 + i; 208 + 209 + return -1; 210 + } 211 + 212 + #define r_reg_set_enable(i) \ 213 + (((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \ 214 + mr32(MVS_STP_REG_SET_0)) 215 + 216 + #define w_reg_set_enable(i, tmp) \ 217 + (((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \ 218 + mw32(MVS_STP_REG_SET_0, tmp)) 219 + 220 + extern const struct mvs_dispatch mvs_94xx_dispatch; 221 + #endif 222 +
+187 -25
drivers/scsi/mvsas/mv_chips.h
··· 1 + /* 2 + * Marvell 88SE64xx/88SE94xx register IO interface 3 + * 4 + * Copyright 2007 Red Hat, Inc. 5 + * Copyright 2008 Marvell. <kewei@marvell.com> 6 + * 7 + * This file is licensed under GPLv2. 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public License as 11 + * published by the Free Software Foundation; version 2 of the 12 + * License. 13 + * 14 + * This program is distributed in the hope that it will be useful, 15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 + * General Public License for more details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this program; if not, write to the Free Software 21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 22 + * USA 23 + */ 24 + 25 + 1 26 #ifndef _MV_CHIPS_H_ 2 27 #define _MV_CHIPS_H_ 3 28 4 - #define mr32(reg) readl(regs + MVS_##reg) 5 - #define mw32(reg,val) writel((val), regs + MVS_##reg) 6 - #define mw32_f(reg,val) do { \ 7 - writel((val), regs + MVS_##reg); \ 8 - readl(regs + MVS_##reg); \ 9 - } while (0) 29 + #define mr32(reg) readl(regs + reg) 30 + #define mw32(reg, val) writel((val), regs + reg) 31 + #define mw32_f(reg, val) do { \ 32 + mw32(reg, val); \ 33 + mr32(reg); \ 34 + } while (0) 10 35 11 - static inline u32 mvs_cr32(void __iomem *regs, u32 addr) 36 + #define iow32(reg, val) outl(val, (unsigned long)(regs + reg)) 37 + #define ior32(reg) inl((unsigned long)(regs + reg)) 38 + #define iow16(reg, val) outw((unsigned long)(val, regs + reg)) 39 + #define ior16(reg) inw((unsigned long)(regs + reg)) 40 + #define iow8(reg, val) outb((unsigned long)(val, regs + reg)) 41 + #define ior8(reg) inb((unsigned long)(regs + reg)) 42 + 43 + static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr) 12 44 { 13 - mw32(CMD_ADDR, addr); 14 - return mr32(CMD_DATA); 45 + void __iomem *regs = mvi->regs; 46 + mw32(MVS_CMD_ADDR, addr); 47 + return mr32(MVS_CMD_DATA); 15 48 } 16 49 17 - static inline void mvs_cw32(void __iomem *regs, u32 addr, u32 val) 50 + static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val) 18 51 { 19 - mw32(CMD_ADDR, addr); 20 - mw32(CMD_DATA, val); 52 + void __iomem *regs = mvi->regs; 53 + mw32(MVS_CMD_ADDR, addr); 54 + mw32(MVS_CMD_DATA, val); 21 55 } 22 56 23 57 static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) 24 58 { 25 59 void __iomem *regs = mvi->regs; 26 - return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4): 27 - mr32(P4_SER_CTLSTAT + (port - 4) * 4); 60 + return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) : 61 + mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4); 28 62 } 29 63 30 64 static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) 31 65 { 32 66 void __iomem *regs = mvi->regs; 33 67 if (port < 4) 34 - mw32(P0_SER_CTLSTAT + port * 4, val); 68 + mw32(MVS_P0_SER_CTLSTAT + port * 4, val); 35 69 else 36 - mw32(P4_SER_CTLSTAT + (port - 4) * 4, val); 70 + mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val); 37 71 } 38 72 39 - static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port) 73 + static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off, 74 + u32 off2, u32 port) 40 75 { 41 76 void __iomem *regs = mvi->regs + off; 42 77 void __iomem *regs2 = mvi->regs + off2; 43 - return (port < 4)?readl(regs + port * 8): 78 + return (port < 4) ? readl(regs + port * 8) : 44 79 readl(regs2 + (port - 4) * 8); 45 80 } 46 81 ··· 96 61 MVS_P4_CFG_DATA, port); 97 62 } 98 63 99 - static inline void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val) 64 + static inline void mvs_write_port_cfg_data(struct mvs_info *mvi, 65 + u32 port, u32 val) 100 66 { 101 67 mvs_write_port(mvi, MVS_P0_CFG_DATA, 102 68 MVS_P4_CFG_DATA, port, val); 103 69 } 104 70 105 - static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr) 71 + static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi, 72 + u32 port, u32 addr) 106 73 { 107 74 mvs_write_port(mvi, MVS_P0_CFG_ADDR, 108 75 MVS_P4_CFG_ADDR, port, addr); 76 + mdelay(10); 109 77 } 110 78 111 79 static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) ··· 117 79 MVS_P4_VSR_DATA, port); 118 80 } 119 81 120 - static inline void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val) 82 + static inline void mvs_write_port_vsr_data(struct mvs_info *mvi, 83 + u32 port, u32 val) 121 84 { 122 85 mvs_write_port(mvi, MVS_P0_VSR_DATA, 123 86 MVS_P4_VSR_DATA, port, val); 124 87 } 125 88 126 - static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr) 89 + static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi, 90 + u32 port, u32 addr) 127 91 { 128 92 mvs_write_port(mvi, MVS_P0_VSR_ADDR, 129 93 MVS_P4_VSR_ADDR, port, addr); 94 + mdelay(10); 130 95 } 131 96 132 97 static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) ··· 138 97 MVS_P4_INT_STAT, port); 139 98 } 140 99 141 - static inline void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val) 100 + static inline void mvs_write_port_irq_stat(struct mvs_info *mvi, 101 + u32 port, u32 val) 142 102 { 143 103 mvs_write_port(mvi, MVS_P0_INT_STAT, 144 104 MVS_P4_INT_STAT, port, val); ··· 149 107 { 150 108 return mvs_read_port(mvi, MVS_P0_INT_MASK, 151 109 MVS_P4_INT_MASK, port); 110 + 152 111 } 153 112 154 - static inline void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val) 113 + static inline void mvs_write_port_irq_mask(struct mvs_info *mvi, 114 + u32 port, u32 val) 155 115 { 156 116 mvs_write_port(mvi, MVS_P0_INT_MASK, 157 117 MVS_P4_INT_MASK, port, val); 158 118 } 159 119 160 - #endif 120 + static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi) 121 + { 122 + u32 tmp; 123 + 124 + /* workaround for SATA R-ERR, to ignore phy glitch */ 125 + tmp = mvs_cr32(mvi, CMD_PHY_TIMER); 126 + tmp &= ~(1 << 9); 127 + tmp |= (1 << 10); 128 + mvs_cw32(mvi, CMD_PHY_TIMER, tmp); 129 + 130 + /* enable retry 127 times */ 131 + mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f); 132 + 133 + /* extend open frame timeout to max */ 134 + tmp = mvs_cr32(mvi, CMD_SAS_CTL0); 135 + tmp &= ~0xffff; 136 + tmp |= 0x3fff; 137 + mvs_cw32(mvi, CMD_SAS_CTL0, tmp); 138 + 139 + /* workaround for WDTIMEOUT , set to 550 ms */ 140 + mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000); 141 + 142 + /* not to halt for different port op during wideport link change */ 143 + mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d); 144 + 145 + /* workaround for Seagate disk not-found OOB sequence, recv 146 + * COMINIT before sending out COMWAKE */ 147 + tmp = mvs_cr32(mvi, CMD_PHY_MODE_21); 148 + tmp &= 0x0000ffff; 149 + tmp |= 0x00fa0000; 150 + mvs_cw32(mvi, CMD_PHY_MODE_21, tmp); 151 + 152 + tmp = mvs_cr32(mvi, CMD_PHY_TIMER); 153 + tmp &= 0x1fffffff; 154 + tmp |= (2U << 29); /* 8 ms retry */ 155 + mvs_cw32(mvi, CMD_PHY_TIMER, tmp); 156 + } 157 + 158 + static inline void mvs_int_sata(struct mvs_info *mvi) 159 + { 160 + u32 tmp; 161 + void __iomem *regs = mvi->regs; 162 + tmp = mr32(MVS_INT_STAT_SRS_0); 163 + if (tmp) 164 + mw32(MVS_INT_STAT_SRS_0, tmp); 165 + MVS_CHIP_DISP->clear_active_cmds(mvi); 166 + } 167 + 168 + static inline void mvs_int_full(struct mvs_info *mvi) 169 + { 170 + void __iomem *regs = mvi->regs; 171 + u32 tmp, stat; 172 + int i; 173 + 174 + stat = mr32(MVS_INT_STAT); 175 + mvs_int_rx(mvi, false); 176 + 177 + for (i = 0; i < mvi->chip->n_phy; i++) { 178 + tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); 179 + if (tmp) 180 + mvs_int_port(mvi, i, tmp); 181 + } 182 + 183 + if (stat & CINT_SRS) 184 + mvs_int_sata(mvi); 185 + 186 + mw32(MVS_INT_STAT, stat); 187 + } 188 + 189 + static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx) 190 + { 191 + void __iomem *regs = mvi->regs; 192 + mw32(MVS_TX_PROD_IDX, tx); 193 + } 194 + 195 + static inline u32 mvs_rx_update(struct mvs_info *mvi) 196 + { 197 + void __iomem *regs = mvi->regs; 198 + return mr32(MVS_RX_CONS_IDX); 199 + } 200 + 201 + static inline u32 mvs_get_prd_size(void) 202 + { 203 + return sizeof(struct mvs_prd); 204 + } 205 + 206 + static inline u32 mvs_get_prd_count(void) 207 + { 208 + return MAX_SG_ENTRY; 209 + } 210 + 211 + static inline void mvs_show_pcie_usage(struct mvs_info *mvi) 212 + { 213 + u16 link_stat, link_spd; 214 + const char *spd[] = { 215 + "UnKnown", 216 + "2.5", 217 + "5.0", 218 + }; 219 + if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0) 220 + return; 221 + 222 + pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat); 223 + link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS; 224 + if (link_spd >= 3) 225 + link_spd = 0; 226 + dev_printk(KERN_INFO, mvi->dev, 227 + "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n", 228 + (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS, 229 + spd[link_spd]); 230 + } 231 + 232 + static inline u32 mvs_hw_max_link_rate(void) 233 + { 234 + return MAX_LINK_RATE; 235 + } 236 + 237 + #endif /* _MV_CHIPS_H_ */ 238 +
+129 -68
drivers/scsi/mvsas/mv_defs.h
··· 1 1 /* 2 - mv_defs.h - Marvell 88SE6440 SAS/SATA support 3 - 4 - Copyright 2007 Red Hat, Inc. 5 - Copyright 2008 Marvell. <kewei@marvell.com> 6 - 7 - This program is free software; you can redistribute it and/or 8 - modify it under the terms of the GNU General Public License as 9 - published by the Free Software Foundation; either version 2, 10 - or (at your option) any later version. 11 - 12 - This program is distributed in the hope that it will be useful, 13 - but WITHOUT ANY WARRANTY; without even the implied warranty 14 - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 15 - See the GNU General Public License for more details. 16 - 17 - You should have received a copy of the GNU General Public 18 - License along with this program; see the file COPYING. If not, 19 - write to the Free Software Foundation, 675 Mass Ave, Cambridge, 20 - MA 02139, USA. 21 - 22 - */ 2 + * Marvell 88SE64xx/88SE94xx const head file 3 + * 4 + * Copyright 2007 Red Hat, Inc. 5 + * Copyright 2008 Marvell. <kewei@marvell.com> 6 + * 7 + * This file is licensed under GPLv2. 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public License as 11 + * published by the Free Software Foundation; version 2 of the 12 + * License. 13 + * 14 + * This program is distributed in the hope that it will be useful, 15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 + * General Public License for more details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this program; if not, write to the Free Software 21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 22 + * USA 23 + */ 23 24 24 25 #ifndef _MV_DEFS_H_ 25 26 #define _MV_DEFS_H_ 26 27 28 + 29 + enum chip_flavors { 30 + chip_6320, 31 + chip_6440, 32 + chip_6485, 33 + chip_9480, 34 + chip_9180, 35 + }; 36 + 27 37 /* driver compile-time configuration */ 28 38 enum driver_configuration { 39 + MVS_SLOTS = 512, /* command slots */ 29 40 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ 30 41 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ 31 42 /* software requires power-of-2 32 43 ring size */ 44 + MVS_SOC_SLOTS = 64, 45 + MVS_SOC_TX_RING_SZ = MVS_SOC_SLOTS * 2, 46 + MVS_SOC_RX_RING_SZ = MVS_SOC_SLOTS * 2, 33 47 34 - MVS_SLOTS = 512, /* command slots */ 35 48 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ 36 49 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ 37 50 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ 38 51 MVS_OAF_SZ = 64, /* Open address frame buffer size */ 39 - 40 - MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */ 41 - 42 - MVS_QUEUE_SIZE = 30, /* Support Queue depth */ 43 - MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */ 52 + MVS_QUEUE_SIZE = 32, /* Support Queue depth */ 53 + MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */ 54 + MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2, 44 55 }; 45 56 46 57 /* unchangeable hardware details */ 47 58 enum hardware_details { 48 59 MVS_MAX_PHYS = 8, /* max. possible phys */ 49 60 MVS_MAX_PORTS = 8, /* max. possible ports */ 50 - MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100), 61 + MVS_SOC_PHYS = 4, /* soc phys */ 62 + MVS_SOC_PORTS = 4, /* soc phys */ 63 + MVS_MAX_DEVICES = 1024, /* max supported device */ 51 64 }; 52 65 53 66 /* peripheral registers (BAR2) */ ··· 146 133 CINT_PORT = (1U << 8), /* port0 event */ 147 134 CINT_PORT_MASK_OFFSET = 8, 148 135 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), 136 + CINT_PHY_MASK_OFFSET = 4, 137 + CINT_PHY_MASK = (0x0F << CINT_PHY_MASK_OFFSET), 149 138 150 139 /* TX (delivery) ring bits */ 151 140 TXQ_CMD_SHIFT = 29, ··· 157 142 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ 158 143 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ 159 144 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ 145 + TXQ_MODE_TARGET = 0, 146 + TXQ_MODE_INITIATOR = 1, 160 147 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ 148 + TXQ_PRI_NORMAL = 0, 149 + TXQ_PRI_HIGH = 1, 161 150 TXQ_SRS_SHIFT = 20, /* SATA register set */ 162 151 TXQ_SRS_MASK = 0x7f, 163 152 TXQ_PHY_SHIFT = 12, /* PHY bitmap */ ··· 194 175 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ 195 176 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ 196 177 178 + MCH_SSP_MODE_PASSTHRU = 1, 179 + MCH_SSP_MODE_NORMAL = 0, 197 180 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ 198 181 MCH_FBURST = (1U << 11), /* first burst (SSP) */ 199 182 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ ··· 220 199 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ 221 200 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ 222 201 PHY_RST = (1U << 0), /* phy reset */ 223 - PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), 224 - PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), 225 - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), 226 - PHY_NEG_SPP_PHYS_LINK_RATE_MASK = 227 - (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), 228 202 PHY_READY_MASK = (1U << 20), 229 203 230 204 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ 231 205 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ 206 + PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */ 207 + PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */ 232 208 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ 233 209 PHYEV_AN = (1U << 18), /* SATA async notification */ 234 210 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ ··· 247 229 /* MVS_PCS */ 248 230 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ 249 231 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ 250 - PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */ 232 + PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6485 */ 251 233 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ 252 234 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ 235 + PCS_SATA_RETRY_2 = (1U << 6), /* For 9180 */ 253 236 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ 254 237 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ 255 238 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ ··· 265 246 PORT_DEV_SMP_INIT = (1U << 10), 266 247 PORT_DEV_STP_INIT = (1U << 9), 267 248 PORT_PHY_ID_MASK = (0xFFU << 24), 249 + PORT_SSP_TRGT_MASK = (0x1U << 19), 250 + PORT_SSP_INIT_MASK = (0x1U << 11), 268 251 PORT_DEV_TRGT_MASK = (0x7U << 17), 269 252 PORT_DEV_INIT_MASK = (0x7U << 9), 270 253 PORT_DEV_TYPE_MASK = (0x7U << 0), ··· 304 283 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ 305 284 PHYR_SATA_CTL = 0x18, /* SATA control */ 306 285 PHYR_PHY_STAT = 0x1C, /* PHY status */ 307 - PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ 308 - PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ 309 - PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ 310 - PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ 286 + PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ 287 + PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ 288 + PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ 289 + PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ 311 290 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ 312 291 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ 313 - PHYR_WIDE_PORT = 0x38, /* wide port participating */ 292 + PHYR_WIDE_PORT = 0x38, /* wide port participating */ 314 293 PHYR_CURRENT0 = 0x80, /* current connection info 0 */ 315 294 PHYR_CURRENT1 = 0x84, /* current connection info 1 */ 316 295 PHYR_CURRENT2 = 0x88, /* current connection info 2 */ 317 - }; 318 - 319 - enum mvs_info_flags { 320 - MVF_MSI = (1U << 0), /* MSI is enabled */ 321 - MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ 296 + CONFIG_ID_FRAME0 = 0x100, /* Port device ID frame register 0 */ 297 + CONFIG_ID_FRAME1 = 0x104, /* Port device ID frame register 1 */ 298 + CONFIG_ID_FRAME2 = 0x108, /* Port device ID frame register 2 */ 299 + CONFIG_ID_FRAME3 = 0x10c, /* Port device ID frame register 3 */ 300 + CONFIG_ID_FRAME4 = 0x110, /* Port device ID frame register 4 */ 301 + CONFIG_ID_FRAME5 = 0x114, /* Port device ID frame register 5 */ 302 + CONFIG_ID_FRAME6 = 0x118, /* Port device ID frame register 6 */ 303 + CONFIG_ATT_ID_FRAME0 = 0x11c, /* attached ID frame register 0 */ 304 + CONFIG_ATT_ID_FRAME1 = 0x120, /* attached ID frame register 1 */ 305 + CONFIG_ATT_ID_FRAME2 = 0x124, /* attached ID frame register 2 */ 306 + CONFIG_ATT_ID_FRAME3 = 0x128, /* attached ID frame register 3 */ 307 + CONFIG_ATT_ID_FRAME4 = 0x12c, /* attached ID frame register 4 */ 308 + CONFIG_ATT_ID_FRAME5 = 0x130, /* attached ID frame register 5 */ 309 + CONFIG_ATT_ID_FRAME6 = 0x134, /* attached ID frame register 6 */ 322 310 }; 323 311 324 312 enum sas_cmd_port_registers { ··· 335 305 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ 336 306 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ 337 307 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ 338 - CMD_OOB_SPACE = 0x110, /* OOB space control register */ 339 - CMD_OOB_BURST = 0x114, /* OOB burst control register */ 308 + CMD_OOB_SPACE = 0x110, /* OOB space control register */ 309 + CMD_OOB_BURST = 0x114, /* OOB burst control register */ 340 310 CMD_PHY_TIMER = 0x118, /* PHY timer control register */ 341 - CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ 342 - CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ 311 + CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ 312 + CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ 343 313 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ 344 314 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ 345 315 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ ··· 348 318 CMD_PL_TIMER = 0x138, /* PL timer register */ 349 319 CMD_WD_TIMER = 0x13c, /* WD timer register */ 350 320 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ 351 - CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ 352 - CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ 353 - CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ 321 + CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ 322 + CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ 323 + CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ 354 324 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ 355 325 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ 356 326 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ ··· 383 353 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ 384 354 }; 385 355 386 - enum pci_cfg_register_bits { 387 - PCTL_PWR_ON = (0xFU << 24), 388 - PCTL_OFF = (0xFU << 12), 389 - PRD_REQ_SIZE = (0x4000), 390 - PRD_REQ_MASK = (0x00007000), 356 + enum mvs_info_flags { 357 + MVF_MSI = (1U << 0), /* MSI is enabled */ 358 + MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ 359 + MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */ 391 360 }; 392 361 393 - enum nvram_layout_offsets { 394 - NVR_SIG = 0x00, /* 0xAA, 0x55 */ 395 - NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */ 362 + enum mvs_event_flags { 363 + PHY_PLUG_EVENT = (3U), 364 + PHY_PLUG_IN = (1U << 0), /* phy plug in */ 365 + PHY_PLUG_OUT = (1U << 1), /* phy plug out */ 396 366 }; 397 367 398 - enum chip_flavors { 399 - chip_6320, 400 - chip_6440, 401 - chip_6480, 402 - }; 403 - 404 - enum port_type { 405 - PORT_TYPE_SAS = (1L << 1), 406 - PORT_TYPE_SATA = (1L << 0), 368 + enum mvs_port_type { 369 + PORT_TGT_MASK = (1U << 5), 370 + PORT_INIT_PORT = (1U << 4), 371 + PORT_TGT_PORT = (1U << 3), 372 + PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT), 373 + PORT_TYPE_SAS = (1U << 1), 374 + PORT_TYPE_SATA = (1U << 0), 407 375 }; 408 376 409 377 /* Command Table Format */ ··· 466 438 USR_BLK_NM = (1U << 0), /* User Block Number */ 467 439 }; 468 440 441 + enum pci_cfg_register_bits { 442 + PCTL_PWR_OFF = (0xFU << 24), 443 + PCTL_COM_ON = (0xFU << 20), 444 + PCTL_LINK_RST = (0xFU << 16), 445 + PCTL_LINK_OFFS = (16), 446 + PCTL_PHY_DSBL = (0xFU << 12), 447 + PCTL_PHY_DSBL_OFFS = (12), 448 + PRD_REQ_SIZE = (0x4000), 449 + PRD_REQ_MASK = (0x00007000), 450 + PLS_NEG_LINK_WD = (0x3FU << 4), 451 + PLS_NEG_LINK_WD_OFFS = 4, 452 + PLS_LINK_SPD = (0x0FU << 0), 453 + PLS_LINK_SPD_OFFS = 0, 454 + }; 455 + 456 + enum open_frame_protocol { 457 + PROTOCOL_SMP = 0x0, 458 + PROTOCOL_SSP = 0x1, 459 + PROTOCOL_STP = 0x2, 460 + }; 461 + 462 + /* define for response frame datapres field */ 463 + enum datapres_field { 464 + NO_DATA = 0, 465 + RESPONSE_DATA = 1, 466 + SENSE_DATA = 2, 467 + }; 468 + 469 + /* define task management IU */ 470 + struct mvs_tmf_task{ 471 + u8 tmf; 472 + u16 tag_of_task_to_be_managed; 473 + }; 469 474 #endif
+404 -225
drivers/scsi/mvsas/mv_init.c
··· 1 1 /* 2 - mv_init.c - Marvell 88SE6440 SAS/SATA init support 2 + * Marvell 88SE64xx/88SE94xx pci init 3 + * 4 + * Copyright 2007 Red Hat, Inc. 5 + * Copyright 2008 Marvell. <kewei@marvell.com> 6 + * 7 + * This file is licensed under GPLv2. 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public License as 11 + * published by the Free Software Foundation; version 2 of the 12 + * License. 13 + * 14 + * This program is distributed in the hope that it will be useful, 15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 + * General Public License for more details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this program; if not, write to the Free Software 21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 22 + * USA 23 + */ 3 24 4 - Copyright 2007 Red Hat, Inc. 5 - Copyright 2008 Marvell. <kewei@marvell.com> 6 - 7 - This program is free software; you can redistribute it and/or 8 - modify it under the terms of the GNU General Public License as 9 - published by the Free Software Foundation; either version 2, 10 - or (at your option) any later version. 11 - 12 - This program is distributed in the hope that it will be useful, 13 - but WITHOUT ANY WARRANTY; without even the implied warranty 14 - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 15 - See the GNU General Public License for more details. 16 - 17 - You should have received a copy of the GNU General Public 18 - License along with this program; see the file COPYING. If not, 19 - write to the Free Software Foundation, 675 Mass Ave, Cambridge, 20 - MA 02139, USA. 21 - 22 - */ 23 25 24 26 #include "mv_sas.h" 25 - #include "mv_64xx.h" 26 - #include "mv_chips.h" 27 27 28 28 static struct scsi_transport_template *mvs_stt; 29 - 30 29 static const struct mvs_chip_info mvs_chips[] = { 31 - [chip_6320] = { 2, 16, 9 }, 32 - [chip_6440] = { 4, 16, 9 }, 33 - [chip_6480] = { 8, 32, 10 }, 30 + [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 31 + [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 32 + [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, }, 33 + [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 34 + [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 34 35 }; 36 + 37 + #define SOC_SAS_NUM 2 35 38 36 39 static struct scsi_host_template mvs_sht = { 37 40 .module = THIS_MODULE, ··· 56 53 .use_clustering = ENABLE_CLUSTERING, 57 54 .eh_device_reset_handler = sas_eh_device_reset_handler, 58 55 .eh_bus_reset_handler = sas_eh_bus_reset_handler, 59 - .slave_alloc = sas_slave_alloc, 56 + .slave_alloc = mvs_slave_alloc, 60 57 .target_destroy = sas_target_destroy, 61 58 .ioctl = sas_ioctl, 62 59 }; 63 60 64 61 static struct sas_domain_function_template mvs_transport_ops = { 65 - .lldd_execute_task = mvs_task_exec, 62 + .lldd_dev_found = mvs_dev_found, 63 + .lldd_dev_gone = mvs_dev_gone, 64 + 65 + .lldd_execute_task = mvs_queue_command, 66 66 .lldd_control_phy = mvs_phy_control, 67 - .lldd_abort_task = mvs_task_abort, 68 - .lldd_port_formed = mvs_port_formed, 67 + 68 + .lldd_abort_task = mvs_abort_task, 69 + .lldd_abort_task_set = mvs_abort_task_set, 70 + .lldd_clear_aca = mvs_clear_aca, 71 + .lldd_clear_task_set = mvs_clear_task_set, 69 72 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, 73 + .lldd_lu_reset = mvs_lu_reset, 74 + .lldd_query_task = mvs_query_task, 75 + 76 + .lldd_port_formed = mvs_port_formed, 77 + .lldd_port_deformed = mvs_port_deformed, 78 + 70 79 }; 71 80 72 81 static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) ··· 86 71 struct mvs_phy *phy = &mvi->phy[phy_id]; 87 72 struct asd_sas_phy *sas_phy = &phy->sas_phy; 88 73 74 + phy->mvi = mvi; 75 + init_timer(&phy->timer); 89 76 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; 90 77 sas_phy->class = SAS; 91 78 sas_phy->iproto = SAS_PROTOCOL_ALL; ··· 100 83 sas_phy->id = phy_id; 101 84 sas_phy->sas_addr = &mvi->sas_addr[0]; 102 85 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 103 - sas_phy->ha = &mvi->sas; 86 + sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata; 104 87 sas_phy->lldd_phy = phy; 105 88 } 106 89 107 90 static void mvs_free(struct mvs_info *mvi) 108 91 { 109 92 int i; 93 + struct mvs_wq *mwq; 94 + int slot_nr; 110 95 111 96 if (!mvi) 112 97 return; 113 98 114 - for (i = 0; i < MVS_SLOTS; i++) { 115 - struct mvs_slot_info *slot = &mvi->slot_info[i]; 99 + if (mvi->flags & MVF_FLAG_SOC) 100 + slot_nr = MVS_SOC_SLOTS; 101 + else 102 + slot_nr = MVS_SLOTS; 116 103 104 + for (i = 0; i < mvi->tags_num; i++) { 105 + struct mvs_slot_info *slot = &mvi->slot_info[i]; 117 106 if (slot->buf) 118 - dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ, 107 + dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ, 119 108 slot->buf, slot->buf_dma); 120 109 } 121 110 122 111 if (mvi->tx) 123 - dma_free_coherent(&mvi->pdev->dev, 112 + dma_free_coherent(mvi->dev, 124 113 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, 125 114 mvi->tx, mvi->tx_dma); 126 115 if (mvi->rx_fis) 127 - dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ, 116 + dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ, 128 117 mvi->rx_fis, mvi->rx_fis_dma); 129 118 if (mvi->rx) 130 - dma_free_coherent(&mvi->pdev->dev, 119 + dma_free_coherent(mvi->dev, 131 120 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), 132 121 mvi->rx, mvi->rx_dma); 133 122 if (mvi->slot) 134 - dma_free_coherent(&mvi->pdev->dev, 135 - sizeof(*mvi->slot) * MVS_SLOTS, 123 + dma_free_coherent(mvi->dev, 124 + sizeof(*mvi->slot) * slot_nr, 136 125 mvi->slot, mvi->slot_dma); 137 - #ifdef MVS_ENABLE_PERI 138 - if (mvi->peri_regs) 139 - iounmap(mvi->peri_regs); 126 + #ifndef DISABLE_HOTPLUG_DMA_FIX 127 + if (mvi->bulk_buffer) 128 + dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, 129 + mvi->bulk_buffer, mvi->bulk_buffer_dma); 140 130 #endif 141 - if (mvi->regs) 142 - iounmap(mvi->regs); 131 + 132 + MVS_CHIP_DISP->chip_iounmap(mvi); 143 133 if (mvi->shost) 144 134 scsi_host_put(mvi->shost); 145 - kfree(mvi->sas.sas_port); 146 - kfree(mvi->sas.sas_phy); 135 + list_for_each_entry(mwq, &mvi->wq_list, entry) 136 + cancel_delayed_work(&mwq->work_q); 147 137 kfree(mvi); 148 138 } 149 139 150 140 #ifdef MVS_USE_TASKLET 151 - static void mvs_tasklet(unsigned long data) 141 + struct tasklet_struct mv_tasklet; 142 + static void mvs_tasklet(unsigned long opaque) 152 143 { 153 - struct mvs_info *mvi = (struct mvs_info *) data; 154 144 unsigned long flags; 145 + u32 stat; 146 + u16 core_nr, i = 0; 155 147 156 - spin_lock_irqsave(&mvi->lock, flags); 148 + struct mvs_info *mvi; 149 + struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque; 157 150 158 - #ifdef MVS_DISABLE_MSI 159 - mvs_int_full(mvi); 160 - #else 161 - mvs_int_rx(mvi, true); 162 - #endif 163 - spin_unlock_irqrestore(&mvi->lock, flags); 151 + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 152 + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; 153 + 154 + if (unlikely(!mvi)) 155 + BUG_ON(1); 156 + 157 + for (i = 0; i < core_nr; i++) { 158 + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; 159 + stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq); 160 + if (stat) 161 + MVS_CHIP_DISP->isr(mvi, mvi->irq, stat); 162 + } 163 + 164 164 } 165 165 #endif 166 166 167 167 static irqreturn_t mvs_interrupt(int irq, void *opaque) 168 168 { 169 - struct mvs_info *mvi = opaque; 170 - void __iomem *regs = mvi->regs; 169 + u32 core_nr, i = 0; 171 170 u32 stat; 171 + struct mvs_info *mvi; 172 + struct sas_ha_struct *sha = opaque; 172 173 173 - stat = mr32(GBL_INT_STAT); 174 + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 175 + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; 174 176 175 - if (stat == 0 || stat == 0xffffffff) 177 + if (unlikely(!mvi)) 176 178 return IRQ_NONE; 177 179 178 - /* clear CMD_CMPLT ASAP */ 179 - mw32_f(INT_STAT, CINT_DONE); 180 + stat = MVS_CHIP_DISP->isr_status(mvi, irq); 181 + if (!stat) 182 + return IRQ_NONE; 180 183 181 - #ifndef MVS_USE_TASKLET 182 - spin_lock(&mvi->lock); 183 - 184 - mvs_int_full(mvi); 185 - 186 - spin_unlock(&mvi->lock); 184 + #ifdef MVS_USE_TASKLET 185 + tasklet_schedule(&mv_tasklet); 187 186 #else 188 - tasklet_schedule(&mvi->tasklet); 187 + for (i = 0; i < core_nr; i++) { 188 + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; 189 + MVS_CHIP_DISP->isr(mvi, irq, stat); 190 + } 189 191 #endif 190 192 return IRQ_HANDLED; 191 193 } 192 194 193 - static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, 194 - const struct pci_device_id *ent) 195 + static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) 195 196 { 196 - struct mvs_info *mvi; 197 - unsigned long res_start, res_len, res_flag; 198 - struct asd_sas_phy **arr_phy; 199 - struct asd_sas_port **arr_port; 200 - const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data]; 201 - int i; 197 + int i, slot_nr; 202 198 203 - /* 204 - * alloc and init our per-HBA mvs_info struct 205 - */ 206 - 207 - mvi = kzalloc(sizeof(*mvi), GFP_KERNEL); 208 - if (!mvi) 209 - return NULL; 199 + if (mvi->flags & MVF_FLAG_SOC) 200 + slot_nr = MVS_SOC_SLOTS; 201 + else 202 + slot_nr = MVS_SLOTS; 210 203 211 204 spin_lock_init(&mvi->lock); 212 - #ifdef MVS_USE_TASKLET 213 - tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi); 214 - #endif 215 - mvi->pdev = pdev; 216 - mvi->chip = chip; 217 - 218 - if (pdev->device == 0x6440 && pdev->revision == 0) 219 - mvi->flags |= MVF_PHY_PWR_FIX; 220 - 221 - /* 222 - * alloc and init SCSI, SAS glue 223 - */ 224 - 225 - mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); 226 - if (!mvi->shost) 227 - goto err_out; 228 - 229 - arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); 230 - arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); 231 - if (!arr_phy || !arr_port) 232 - goto err_out; 233 - 234 - for (i = 0; i < MVS_MAX_PHYS; i++) { 205 + for (i = 0; i < mvi->chip->n_phy; i++) { 235 206 mvs_phy_init(mvi, i); 236 - arr_phy[i] = &mvi->phy[i].sas_phy; 237 - arr_port[i] = &mvi->port[i].sas_port; 238 - mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED; 239 207 mvi->port[i].wide_port_phymap = 0; 240 208 mvi->port[i].port_attached = 0; 241 209 INIT_LIST_HEAD(&mvi->port[i].list); 242 210 } 243 - 244 - SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; 245 - mvi->shost->transportt = mvs_stt; 246 - mvi->shost->max_id = 21; 247 - mvi->shost->max_lun = ~0; 248 - mvi->shost->max_channel = 0; 249 - mvi->shost->max_cmd_len = 16; 250 - 251 - mvi->sas.sas_ha_name = DRV_NAME; 252 - mvi->sas.dev = &pdev->dev; 253 - mvi->sas.lldd_module = THIS_MODULE; 254 - mvi->sas.sas_addr = &mvi->sas_addr[0]; 255 - mvi->sas.sas_phy = arr_phy; 256 - mvi->sas.sas_port = arr_port; 257 - mvi->sas.num_phys = chip->n_phy; 258 - mvi->sas.lldd_max_execute_num = 1; 259 - mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE; 260 - mvi->shost->can_queue = MVS_CAN_QUEUE; 261 - mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys; 262 - mvi->sas.lldd_ha = mvi; 263 - mvi->sas.core.shost = mvi->shost; 264 - 265 - mvs_tag_init(mvi); 266 - 267 - /* 268 - * ioremap main and peripheral registers 269 - */ 270 - 271 - #ifdef MVS_ENABLE_PERI 272 - res_start = pci_resource_start(pdev, 2); 273 - res_len = pci_resource_len(pdev, 2); 274 - if (!res_start || !res_len) 275 - goto err_out; 276 - 277 - mvi->peri_regs = ioremap_nocache(res_start, res_len); 278 - if (!mvi->peri_regs) 279 - goto err_out; 280 - #endif 281 - 282 - res_start = pci_resource_start(pdev, 4); 283 - res_len = pci_resource_len(pdev, 4); 284 - if (!res_start || !res_len) 285 - goto err_out; 286 - 287 - res_flag = pci_resource_flags(pdev, 4); 288 - if (res_flag & IORESOURCE_CACHEABLE) 289 - mvi->regs = ioremap(res_start, res_len); 290 - else 291 - mvi->regs = ioremap_nocache(res_start, res_len); 292 - 293 - if (!mvi->regs) 294 - goto err_out; 211 + for (i = 0; i < MVS_MAX_DEVICES; i++) { 212 + mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; 213 + mvi->devices[i].dev_type = NO_DEVICE; 214 + mvi->devices[i].device_id = i; 215 + mvi->devices[i].dev_status = MVS_DEV_NORMAL; 216 + } 295 217 296 218 /* 297 219 * alloc and init our DMA areas 298 220 */ 299 - 300 - mvi->tx = dma_alloc_coherent(&pdev->dev, 221 + mvi->tx = dma_alloc_coherent(mvi->dev, 301 222 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, 302 223 &mvi->tx_dma, GFP_KERNEL); 303 224 if (!mvi->tx) 304 225 goto err_out; 305 226 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); 306 - 307 - mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ, 227 + mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ, 308 228 &mvi->rx_fis_dma, GFP_KERNEL); 309 229 if (!mvi->rx_fis) 310 230 goto err_out; 311 231 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); 312 232 313 - mvi->rx = dma_alloc_coherent(&pdev->dev, 233 + mvi->rx = dma_alloc_coherent(mvi->dev, 314 234 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), 315 235 &mvi->rx_dma, GFP_KERNEL); 316 236 if (!mvi->rx) 317 237 goto err_out; 318 238 memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); 319 - 320 239 mvi->rx[0] = cpu_to_le32(0xfff); 321 240 mvi->rx_cons = 0xfff; 322 241 323 - mvi->slot = dma_alloc_coherent(&pdev->dev, 324 - sizeof(*mvi->slot) * MVS_SLOTS, 242 + mvi->slot = dma_alloc_coherent(mvi->dev, 243 + sizeof(*mvi->slot) * slot_nr, 325 244 &mvi->slot_dma, GFP_KERNEL); 326 245 if (!mvi->slot) 327 246 goto err_out; 328 - memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS); 247 + memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr); 329 248 330 - for (i = 0; i < MVS_SLOTS; i++) { 249 + #ifndef DISABLE_HOTPLUG_DMA_FIX 250 + mvi->bulk_buffer = dma_alloc_coherent(mvi->dev, 251 + TRASH_BUCKET_SIZE, 252 + &mvi->bulk_buffer_dma, GFP_KERNEL); 253 + if (!mvi->bulk_buffer) 254 + goto err_out; 255 + #endif 256 + for (i = 0; i < slot_nr; i++) { 331 257 struct mvs_slot_info *slot = &mvi->slot_info[i]; 332 258 333 - slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ, 259 + slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ, 334 260 &slot->buf_dma, GFP_KERNEL); 335 - if (!slot->buf) 261 + if (!slot->buf) { 262 + printk(KERN_DEBUG"failed to allocate slot->buf.\n"); 336 263 goto err_out; 264 + } 337 265 memset(slot->buf, 0, MVS_SLOT_BUF_SZ); 266 + ++mvi->tags_num; 267 + } 268 + /* Initialize tags */ 269 + mvs_tag_init(mvi); 270 + return 0; 271 + err_out: 272 + return 1; 273 + } 274 + 275 + 276 + int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex) 277 + { 278 + unsigned long res_start, res_len, res_flag, res_flag_ex = 0; 279 + struct pci_dev *pdev = mvi->pdev; 280 + if (bar_ex != -1) { 281 + /* 282 + * ioremap main and peripheral registers 283 + */ 284 + res_start = pci_resource_start(pdev, bar_ex); 285 + res_len = pci_resource_len(pdev, bar_ex); 286 + if (!res_start || !res_len) 287 + goto err_out; 288 + 289 + res_flag_ex = pci_resource_flags(pdev, bar_ex); 290 + if (res_flag_ex & IORESOURCE_MEM) { 291 + if (res_flag_ex & IORESOURCE_CACHEABLE) 292 + mvi->regs_ex = ioremap(res_start, res_len); 293 + else 294 + mvi->regs_ex = ioremap_nocache(res_start, 295 + res_len); 296 + } else 297 + mvi->regs_ex = (void *)res_start; 298 + if (!mvi->regs_ex) 299 + goto err_out; 338 300 } 339 301 340 - /* finally, read NVRAM to get our SAS address */ 341 - if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8)) 302 + res_start = pci_resource_start(pdev, bar); 303 + res_len = pci_resource_len(pdev, bar); 304 + if (!res_start || !res_len) 342 305 goto err_out; 343 - return mvi; 344 306 307 + res_flag = pci_resource_flags(pdev, bar); 308 + if (res_flag & IORESOURCE_CACHEABLE) 309 + mvi->regs = ioremap(res_start, res_len); 310 + else 311 + mvi->regs = ioremap_nocache(res_start, res_len); 312 + 313 + if (!mvi->regs) { 314 + if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM)) 315 + iounmap(mvi->regs_ex); 316 + mvi->regs_ex = NULL; 317 + goto err_out; 318 + } 319 + 320 + return 0; 321 + err_out: 322 + return -1; 323 + } 324 + 325 + void mvs_iounmap(void __iomem *regs) 326 + { 327 + iounmap(regs); 328 + } 329 + 330 + static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev, 331 + const struct pci_device_id *ent, 332 + struct Scsi_Host *shost, unsigned int id) 333 + { 334 + struct mvs_info *mvi; 335 + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 336 + 337 + mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info), 338 + GFP_KERNEL); 339 + if (!mvi) 340 + return NULL; 341 + 342 + mvi->pdev = pdev; 343 + mvi->dev = &pdev->dev; 344 + mvi->chip_id = ent->driver_data; 345 + mvi->chip = &mvs_chips[mvi->chip_id]; 346 + INIT_LIST_HEAD(&mvi->wq_list); 347 + mvi->irq = pdev->irq; 348 + 349 + ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi; 350 + ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy; 351 + 352 + mvi->id = id; 353 + mvi->sas = sha; 354 + mvi->shost = shost; 355 + #ifdef MVS_USE_TASKLET 356 + tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha); 357 + #endif 358 + 359 + if (MVS_CHIP_DISP->chip_ioremap(mvi)) 360 + goto err_out; 361 + if (!mvs_alloc(mvi, shost)) 362 + return mvi; 345 363 err_out: 346 364 mvs_free(mvi); 347 365 return NULL; ··· 415 363 return rc; 416 364 } 417 365 366 + static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost, 367 + const struct mvs_chip_info *chip_info) 368 + { 369 + int phy_nr, port_nr; unsigned short core_nr; 370 + struct asd_sas_phy **arr_phy; 371 + struct asd_sas_port **arr_port; 372 + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 373 + 374 + core_nr = chip_info->n_host; 375 + phy_nr = core_nr * chip_info->n_phy; 376 + port_nr = phy_nr; 377 + 378 + memset(sha, 0x00, sizeof(struct sas_ha_struct)); 379 + arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL); 380 + arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL); 381 + if (!arr_phy || !arr_port) 382 + goto exit_free; 383 + 384 + sha->sas_phy = arr_phy; 385 + sha->sas_port = arr_port; 386 + 387 + sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL); 388 + if (!sha->lldd_ha) 389 + goto exit_free; 390 + 391 + ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr; 392 + 393 + shost->transportt = mvs_stt; 394 + shost->max_id = 128; 395 + shost->max_lun = ~0; 396 + shost->max_channel = 1; 397 + shost->max_cmd_len = 16; 398 + 399 + return 0; 400 + exit_free: 401 + kfree(arr_phy); 402 + kfree(arr_port); 403 + return -1; 404 + 405 + } 406 + 407 + static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost, 408 + const struct mvs_chip_info *chip_info) 409 + { 410 + int can_queue, i = 0, j = 0; 411 + struct mvs_info *mvi = NULL; 412 + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 413 + unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 414 + 415 + for (j = 0; j < nr_core; j++) { 416 + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; 417 + for (i = 0; i < chip_info->n_phy; i++) { 418 + sha->sas_phy[j * chip_info->n_phy + i] = 419 + &mvi->phy[i].sas_phy; 420 + sha->sas_port[j * chip_info->n_phy + i] = 421 + &mvi->port[i].sas_port; 422 + } 423 + } 424 + 425 + sha->sas_ha_name = DRV_NAME; 426 + sha->dev = mvi->dev; 427 + sha->lldd_module = THIS_MODULE; 428 + sha->sas_addr = &mvi->sas_addr[0]; 429 + 430 + sha->num_phys = nr_core * chip_info->n_phy; 431 + 432 + sha->lldd_max_execute_num = 1; 433 + 434 + if (mvi->flags & MVF_FLAG_SOC) 435 + can_queue = MVS_SOC_CAN_QUEUE; 436 + else 437 + can_queue = MVS_CAN_QUEUE; 438 + 439 + sha->lldd_queue_size = can_queue; 440 + shost->can_queue = can_queue; 441 + mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys; 442 + sha->core.shost = mvi->shost; 443 + } 444 + 445 + static void mvs_init_sas_add(struct mvs_info *mvi) 446 + { 447 + u8 i; 448 + for (i = 0; i < mvi->chip->n_phy; i++) { 449 + mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL; 450 + mvi->phy[i].dev_sas_addr = 451 + cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr)); 452 + } 453 + 454 + memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE); 455 + } 456 + 418 457 static int __devinit mvs_pci_init(struct pci_dev *pdev, 419 458 const struct pci_device_id *ent) 420 459 { 421 - int rc; 460 + unsigned int rc, nhost = 0; 422 461 struct mvs_info *mvi; 423 462 irq_handler_t irq_handler = mvs_interrupt; 463 + struct Scsi_Host *shost = NULL; 464 + const struct mvs_chip_info *chip; 424 465 466 + dev_printk(KERN_INFO, &pdev->dev, 467 + "mvsas: driver version %s\n", DRV_VERSION); 425 468 rc = pci_enable_device(pdev); 426 469 if (rc) 427 - return rc; 470 + goto err_out_enable; 428 471 429 472 pci_set_master(pdev); 430 473 ··· 531 384 if (rc) 532 385 goto err_out_regions; 533 386 534 - mvi = mvs_alloc(pdev, ent); 535 - if (!mvi) { 387 + shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); 388 + if (!shost) { 536 389 rc = -ENOMEM; 537 390 goto err_out_regions; 538 391 } 539 392 540 - rc = mvs_hw_init(mvi); 541 - if (rc) 542 - goto err_out_mvi; 543 - 544 - #ifndef MVS_DISABLE_MSI 545 - if (!pci_enable_msi(pdev)) { 546 - u32 tmp; 547 - void __iomem *regs = mvi->regs; 548 - mvi->flags |= MVF_MSI; 549 - irq_handler = mvs_msi_interrupt; 550 - tmp = mr32(PCS); 551 - mw32(PCS, tmp | PCS_SELF_CLEAR); 393 + chip = &mvs_chips[ent->driver_data]; 394 + SHOST_TO_SAS_HA(shost) = 395 + kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL); 396 + if (!SHOST_TO_SAS_HA(shost)) { 397 + kfree(shost); 398 + rc = -ENOMEM; 399 + goto err_out_regions; 552 400 } 553 - #endif 554 401 555 - rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi); 556 - if (rc) 557 - goto err_out_msi; 402 + rc = mvs_prep_sas_ha_init(shost, chip); 403 + if (rc) { 404 + kfree(shost); 405 + rc = -ENOMEM; 406 + goto err_out_regions; 407 + } 558 408 559 - rc = scsi_add_host(mvi->shost, &pdev->dev); 560 - if (rc) 561 - goto err_out_irq; 409 + pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); 562 410 563 - rc = sas_register_ha(&mvi->sas); 411 + do { 412 + mvi = mvs_pci_alloc(pdev, ent, shost, nhost); 413 + if (!mvi) { 414 + rc = -ENOMEM; 415 + goto err_out_regions; 416 + } 417 + 418 + mvs_init_sas_add(mvi); 419 + 420 + mvi->instance = nhost; 421 + rc = MVS_CHIP_DISP->chip_init(mvi); 422 + if (rc) { 423 + mvs_free(mvi); 424 + goto err_out_regions; 425 + } 426 + nhost++; 427 + } while (nhost < chip->n_host); 428 + 429 + mvs_post_sas_ha_init(shost, chip); 430 + 431 + rc = scsi_add_host(shost, &pdev->dev); 564 432 if (rc) 565 433 goto err_out_shost; 566 434 567 - pci_set_drvdata(pdev, mvi); 435 + rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); 436 + if (rc) 437 + goto err_out_shost; 438 + rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, 439 + DRV_NAME, SHOST_TO_SAS_HA(shost)); 440 + if (rc) 441 + goto err_not_sas; 568 442 569 - mvs_print_info(mvi); 570 - 571 - mvs_hba_interrupt_enable(mvi); 443 + MVS_CHIP_DISP->interrupt_enable(mvi); 572 444 573 445 scsi_scan_host(mvi->shost); 574 446 575 447 return 0; 576 448 449 + err_not_sas: 450 + sas_unregister_ha(SHOST_TO_SAS_HA(shost)); 577 451 err_out_shost: 578 452 scsi_remove_host(mvi->shost); 579 - err_out_irq: 580 - free_irq(pdev->irq, mvi); 581 - err_out_msi: 582 - if (mvi->flags |= MVF_MSI) 583 - pci_disable_msi(pdev); 584 - err_out_mvi: 585 - mvs_free(mvi); 586 453 err_out_regions: 587 454 pci_release_regions(pdev); 588 455 err_out_disable: 589 456 pci_disable_device(pdev); 457 + err_out_enable: 590 458 return rc; 591 459 } 592 460 593 461 static void __devexit mvs_pci_remove(struct pci_dev *pdev) 594 462 { 595 - struct mvs_info *mvi = pci_get_drvdata(pdev); 463 + unsigned short core_nr, i = 0; 464 + struct sas_ha_struct *sha = pci_get_drvdata(pdev); 465 + struct mvs_info *mvi = NULL; 466 + 467 + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 468 + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; 469 + 470 + #ifdef MVS_USE_TASKLET 471 + tasklet_kill(&mv_tasklet); 472 + #endif 596 473 597 474 pci_set_drvdata(pdev, NULL); 475 + sas_unregister_ha(sha); 476 + sas_remove_host(mvi->shost); 477 + scsi_remove_host(mvi->shost); 598 478 599 - if (mvi) { 600 - sas_unregister_ha(&mvi->sas); 601 - mvs_hba_interrupt_disable(mvi); 602 - sas_remove_host(mvi->shost); 603 - scsi_remove_host(mvi->shost); 604 - 605 - free_irq(pdev->irq, mvi); 606 - if (mvi->flags & MVF_MSI) 607 - pci_disable_msi(pdev); 479 + MVS_CHIP_DISP->interrupt_disable(mvi); 480 + free_irq(mvi->irq, sha); 481 + for (i = 0; i < core_nr; i++) { 482 + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; 608 483 mvs_free(mvi); 609 - pci_release_regions(pdev); 610 484 } 485 + kfree(sha->sas_phy); 486 + kfree(sha->sas_port); 487 + kfree(sha); 488 + pci_release_regions(pdev); 611 489 pci_disable_device(pdev); 490 + return; 612 491 } 613 492 614 493 static struct pci_device_id __devinitdata mvs_pci_table[] = { ··· 647 474 .subdevice = 0x6480, 648 475 .class = 0, 649 476 .class_mask = 0, 650 - .driver_data = chip_6480, 477 + .driver_data = chip_6485, 651 478 }, 652 479 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, 653 - { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 }, 480 + { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 }, 481 + { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 }, 482 + { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 }, 654 483 655 484 { } /* terminate list */ 656 485 }; ··· 664 489 .remove = __devexit_p(mvs_pci_remove), 665 490 }; 666 491 492 + /* task handler */ 493 + struct task_struct *mvs_th; 667 494 static int __init mvs_init(void) 668 495 { 669 496 int rc; 670 - 671 497 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); 672 498 if (!mvs_stt) 673 499 return -ENOMEM; 674 500 675 501 rc = pci_register_driver(&mvs_pci_driver); 502 + 676 503 if (rc) 677 504 goto err_out; 678 505 ··· 698 521 MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); 699 522 MODULE_VERSION(DRV_VERSION); 700 523 MODULE_LICENSE("GPL"); 524 + #ifdef CONFIG_PCI 701 525 MODULE_DEVICE_TABLE(pci, mvs_pci_table); 526 + #endif
+1236 -897
drivers/scsi/mvsas/mv_sas.c
··· 1 1 /* 2 - mv_sas.c - Marvell 88SE6440 SAS/SATA support 3 - 4 - Copyright 2007 Red Hat, Inc. 5 - Copyright 2008 Marvell. <kewei@marvell.com> 6 - 7 - This program is free software; you can redistribute it and/or 8 - modify it under the terms of the GNU General Public License as 9 - published by the Free Software Foundation; either version 2, 10 - or (at your option) any later version. 11 - 12 - This program is distributed in the hope that it will be useful, 13 - but WITHOUT ANY WARRANTY; without even the implied warranty 14 - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 15 - See the GNU General Public License for more details. 16 - 17 - You should have received a copy of the GNU General Public 18 - License along with this program; see the file COPYING. If not, 19 - write to the Free Software Foundation, 675 Mass Ave, Cambridge, 20 - MA 02139, USA. 21 - 22 - --------------------------------------------------------------- 23 - 24 - Random notes: 25 - * hardware supports controlling the endian-ness of data 26 - structures. this permits elimination of all the le32_to_cpu() 27 - and cpu_to_le32() conversions. 28 - 29 - */ 2 + * Marvell 88SE64xx/88SE94xx main function 3 + * 4 + * Copyright 2007 Red Hat, Inc. 5 + * Copyright 2008 Marvell. <kewei@marvell.com> 6 + * 7 + * This file is licensed under GPLv2. 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public License as 11 + * published by the Free Software Foundation; version 2 of the 12 + * License. 13 + * 14 + * This program is distributed in the hope that it will be useful, 15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 + * General Public License for more details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this program; if not, write to the Free Software 21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 22 + * USA 23 + */ 30 24 31 25 #include "mv_sas.h" 32 - #include "mv_64xx.h" 33 - #include "mv_chips.h" 34 - 35 - /* offset for D2H FIS in the Received FIS List Structure */ 36 - #define SATA_RECEIVED_D2H_FIS(reg_set) \ 37 - ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40) 38 - #define SATA_RECEIVED_PIO_FIS(reg_set) \ 39 - ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20) 40 - #define UNASSOC_D2H_FIS(id) \ 41 - ((void *) mvi->rx_fis + 0x100 * id) 42 - 43 - struct mvs_task_exec_info { 44 - struct sas_task *task; 45 - struct mvs_cmd_hdr *hdr; 46 - struct mvs_port *port; 47 - u32 tag; 48 - int n_elem; 49 - }; 50 - 51 - static void mvs_release_task(struct mvs_info *mvi, int phy_no); 52 - static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i); 53 - static void mvs_update_phyinfo(struct mvs_info *mvi, int i, 54 - int get_st); 55 - static int mvs_int_rx(struct mvs_info *mvi, bool self_clear); 56 - static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, 57 - u32 slot_idx); 58 26 59 27 static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) 60 28 { 61 29 if (task->lldd_task) { 62 30 struct mvs_slot_info *slot; 63 31 slot = (struct mvs_slot_info *) task->lldd_task; 64 - *tag = slot - mvi->slot_info; 32 + *tag = slot->slot_tag; 65 33 return 1; 66 34 } 67 35 return 0; 68 36 } 69 37 70 - static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) 38 + void mvs_tag_clear(struct mvs_info *mvi, u32 tag) 71 39 { 72 40 void *bitmap = (void *) &mvi->tags; 73 41 clear_bit(tag, bitmap); 74 42 } 75 43 76 - static void mvs_tag_free(struct mvs_info *mvi, u32 tag) 44 + void mvs_tag_free(struct mvs_info *mvi, u32 tag) 77 45 { 78 46 mvs_tag_clear(mvi, tag); 79 47 } 80 48 81 - static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) 49 + void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) 82 50 { 83 51 void *bitmap = (void *) &mvi->tags; 84 52 set_bit(tag, bitmap); 85 53 } 86 54 87 - static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) 55 + inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) 88 56 { 89 57 unsigned int index, tag; 90 58 void *bitmap = (void *) &mvi->tags; 91 59 92 - index = find_first_zero_bit(bitmap, MVS_SLOTS); 60 + index = find_first_zero_bit(bitmap, mvi->tags_num); 93 61 tag = index; 94 - if (tag >= MVS_SLOTS) 62 + if (tag >= mvi->tags_num) 95 63 return -SAS_QUEUE_FULL; 96 64 mvs_tag_set(mvi, tag); 97 65 *tag_out = tag; ··· 69 101 void mvs_tag_init(struct mvs_info *mvi) 70 102 { 71 103 int i; 72 - for (i = 0; i < MVS_SLOTS; ++i) 104 + for (i = 0; i < mvi->tags_num; ++i) 73 105 mvs_tag_clear(mvi, i); 74 106 } 75 107 76 - static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) 108 + void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) 77 109 { 78 110 u32 i; 79 111 u32 run; ··· 81 113 82 114 offset = 0; 83 115 while (size) { 84 - printk("%08X : ", baseaddr + offset); 116 + printk(KERN_DEBUG"%08X : ", baseaddr + offset); 85 117 if (size >= 16) 86 118 run = 16; 87 119 else ··· 89 121 size -= run; 90 122 for (i = 0; i < 16; i++) { 91 123 if (i < run) 92 - printk("%02X ", (u32)data[i]); 124 + printk(KERN_DEBUG"%02X ", (u32)data[i]); 93 125 else 94 - printk(" "); 126 + printk(KERN_DEBUG" "); 95 127 } 96 - printk(": "); 128 + printk(KERN_DEBUG": "); 97 129 for (i = 0; i < run; i++) 98 - printk("%c", isalnum(data[i]) ? data[i] : '.'); 99 - printk("\n"); 130 + printk(KERN_DEBUG"%c", 131 + isalnum(data[i]) ? data[i] : '.'); 132 + printk(KERN_DEBUG"\n"); 100 133 data = &data[16]; 101 134 offset += run; 102 135 } 103 - printk("\n"); 136 + printk(KERN_DEBUG"\n"); 104 137 } 105 138 106 - #if _MV_DUMP 139 + #if (_MV_DUMP > 1) 107 140 static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, 108 141 enum sas_protocol proto) 109 142 { 110 143 u32 offset; 111 - struct pci_dev *pdev = mvi->pdev; 112 144 struct mvs_slot_info *slot = &mvi->slot_info[tag]; 113 145 114 146 offset = slot->cmd_size + MVS_OAF_SZ + 115 - sizeof(struct mvs_prd) * slot->n_elem; 116 - dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n", 147 + MVS_CHIP_DISP->prd_size() * slot->n_elem; 148 + dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n", 117 149 tag); 118 150 mvs_hexdump(32, (u8 *) slot->response, 119 151 (u32) slot->buf_dma + offset); ··· 123 155 static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, 124 156 enum sas_protocol proto) 125 157 { 126 - #if _MV_DUMP 158 + #if (_MV_DUMP > 1) 127 159 u32 sz, w_ptr; 128 160 u64 addr; 129 - void __iomem *regs = mvi->regs; 130 - struct pci_dev *pdev = mvi->pdev; 131 161 struct mvs_slot_info *slot = &mvi->slot_info[tag]; 132 162 133 163 /*Delivery Queue */ 134 - sz = mr32(TX_CFG) & TX_RING_SZ_MASK; 164 + sz = MVS_CHIP_SLOT_SZ; 135 165 w_ptr = slot->tx; 136 - addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO); 137 - dev_printk(KERN_DEBUG, &pdev->dev, 166 + addr = mvi->tx_dma; 167 + dev_printk(KERN_DEBUG, mvi->dev, 138 168 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr); 139 - dev_printk(KERN_DEBUG, &pdev->dev, 169 + dev_printk(KERN_DEBUG, mvi->dev, 140 170 "Delivery Queue Base Address=0x%llX (PA)" 141 171 "(tx_dma=0x%llX), Entry=%04d\n", 142 - addr, mvi->tx_dma, w_ptr); 172 + addr, (unsigned long long)mvi->tx_dma, w_ptr); 143 173 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), 144 174 (u32) mvi->tx_dma + sizeof(u32) * w_ptr); 145 175 /*Command List */ 146 176 addr = mvi->slot_dma; 147 - dev_printk(KERN_DEBUG, &pdev->dev, 177 + dev_printk(KERN_DEBUG, mvi->dev, 148 178 "Command List Base Address=0x%llX (PA)" 149 179 "(slot_dma=0x%llX), Header=%03d\n", 150 - addr, slot->buf_dma, tag); 151 - dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag); 180 + addr, (unsigned long long)slot->buf_dma, tag); 181 + dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag); 152 182 /*mvs_cmd_hdr */ 153 183 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), 154 184 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr)); 155 185 /*1.command table area */ 156 - dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n"); 186 + dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n"); 157 187 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma); 158 188 /*2.open address frame area */ 159 - dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n"); 189 + dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n"); 160 190 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size, 161 191 (u32) slot->buf_dma + slot->cmd_size); 162 192 /*3.status buffer */ 163 193 mvs_hba_sb_dump(mvi, tag, proto); 164 194 /*4.PRD table */ 165 - dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n"); 166 - mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem, 195 + dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n"); 196 + mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem, 167 197 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ, 168 198 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ); 169 199 #endif ··· 172 206 #if (_MV_DUMP > 2) 173 207 u64 addr; 174 208 void __iomem *regs = mvi->regs; 175 - struct pci_dev *pdev = mvi->pdev; 176 209 u32 entry = mvi->rx_cons + 1; 177 210 u32 rx_desc = le32_to_cpu(mvi->rx[entry]); 178 211 179 212 /*Completion Queue */ 180 213 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); 181 - dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n", 214 + dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n", 182 215 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); 183 - dev_printk(KERN_DEBUG, &pdev->dev, 216 + dev_printk(KERN_DEBUG, mvi->dev, 184 217 "Completion List Base Address=0x%llX (PA), " 185 218 "CQ_Entry=%04d, CQ_WP=0x%08X\n", 186 219 addr, entry - 1, mvi->rx[0]); ··· 188 223 #endif 189 224 } 190 225 191 - /* FIXME: locking? */ 192 - int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata) 226 + void mvs_get_sas_addr(void *buf, u32 buflen) 193 227 { 194 - struct mvs_info *mvi = sas_phy->ha->lldd_ha; 195 - int rc = 0, phy_id = sas_phy->id; 196 - u32 tmp; 228 + /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/ 229 + } 197 230 198 - tmp = mvs_read_phy_ctl(mvi, phy_id); 231 + struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) 232 + { 233 + unsigned long i = 0, j = 0, hi = 0; 234 + struct sas_ha_struct *sha = dev->port->ha; 235 + struct mvs_info *mvi = NULL; 236 + struct asd_sas_phy *phy; 199 237 200 - switch (func) { 201 - case PHY_FUNC_SET_LINK_RATE:{ 202 - struct sas_phy_linkrates *rates = funcdata; 203 - u32 lrmin = 0, lrmax = 0; 204 - 205 - lrmin = (rates->minimum_linkrate << 8); 206 - lrmax = (rates->maximum_linkrate << 12); 207 - 208 - if (lrmin) { 209 - tmp &= ~(0xf << 8); 210 - tmp |= lrmin; 238 + while (sha->sas_port[i]) { 239 + if (sha->sas_port[i] == dev->port) { 240 + phy = container_of(sha->sas_port[i]->phy_list.next, 241 + struct asd_sas_phy, port_phy_el); 242 + j = 0; 243 + while (sha->sas_phy[j]) { 244 + if (sha->sas_phy[j] == phy) 245 + break; 246 + j++; 211 247 } 212 - if (lrmax) { 213 - tmp &= ~(0xf << 12); 214 - tmp |= lrmax; 215 - } 216 - mvs_write_phy_ctl(mvi, phy_id, tmp); 217 248 break; 218 249 } 250 + i++; 251 + } 252 + hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; 253 + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; 254 + 255 + return mvi; 256 + 257 + } 258 + 259 + /* FIXME */ 260 + int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) 261 + { 262 + unsigned long i = 0, j = 0, n = 0, num = 0; 263 + struct mvs_info *mvi = mvs_find_dev_mvi(dev); 264 + struct sas_ha_struct *sha = dev->port->ha; 265 + 266 + while (sha->sas_port[i]) { 267 + if (sha->sas_port[i] == dev->port) { 268 + struct asd_sas_phy *phy; 269 + list_for_each_entry(phy, 270 + &sha->sas_port[i]->phy_list, port_phy_el) { 271 + j = 0; 272 + while (sha->sas_phy[j]) { 273 + if (sha->sas_phy[j] == phy) 274 + break; 275 + j++; 276 + } 277 + phyno[n] = (j >= mvi->chip->n_phy) ? 278 + (j - mvi->chip->n_phy) : j; 279 + num++; 280 + n++; 281 + } 282 + break; 283 + } 284 + i++; 285 + } 286 + return num; 287 + } 288 + 289 + static inline void mvs_free_reg_set(struct mvs_info *mvi, 290 + struct mvs_device *dev) 291 + { 292 + if (!dev) { 293 + mv_printk("device has been free.\n"); 294 + return; 295 + } 296 + if (dev->runing_req != 0) 297 + return; 298 + if (dev->taskfileset == MVS_ID_NOT_MAPPED) 299 + return; 300 + MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset); 301 + } 302 + 303 + static inline u8 mvs_assign_reg_set(struct mvs_info *mvi, 304 + struct mvs_device *dev) 305 + { 306 + if (dev->taskfileset != MVS_ID_NOT_MAPPED) 307 + return 0; 308 + return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset); 309 + } 310 + 311 + void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard) 312 + { 313 + u32 no; 314 + for_each_phy(phy_mask, phy_mask, no) { 315 + if (!(phy_mask & 1)) 316 + continue; 317 + MVS_CHIP_DISP->phy_reset(mvi, no, hard); 318 + } 319 + } 320 + 321 + /* FIXME: locking? */ 322 + int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 323 + void *funcdata) 324 + { 325 + int rc = 0, phy_id = sas_phy->id; 326 + u32 tmp, i = 0, hi; 327 + struct sas_ha_struct *sha = sas_phy->ha; 328 + struct mvs_info *mvi = NULL; 329 + 330 + while (sha->sas_phy[i]) { 331 + if (sha->sas_phy[i] == sas_phy) 332 + break; 333 + i++; 334 + } 335 + hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; 336 + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; 337 + 338 + switch (func) { 339 + case PHY_FUNC_SET_LINK_RATE: 340 + MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata); 341 + break; 219 342 220 343 case PHY_FUNC_HARD_RESET: 344 + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); 221 345 if (tmp & PHY_RST_HARD) 222 346 break; 223 - mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD); 347 + MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1); 224 348 break; 225 349 226 350 case PHY_FUNC_LINK_RESET: 227 - mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST); 351 + MVS_CHIP_DISP->phy_enable(mvi, phy_id); 352 + MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0); 228 353 break; 229 354 230 355 case PHY_FUNC_DISABLE: 356 + MVS_CHIP_DISP->phy_disable(mvi, phy_id); 357 + break; 231 358 case PHY_FUNC_RELEASE_SPINUP_HOLD: 232 359 default: 233 360 rc = -EOPNOTSUPP; 234 361 } 235 - 362 + msleep(200); 236 363 return rc; 364 + } 365 + 366 + void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id, 367 + u32 off_lo, u32 off_hi, u64 sas_addr) 368 + { 369 + u32 lo = (u32)sas_addr; 370 + u32 hi = (u32)(sas_addr>>32); 371 + 372 + MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo); 373 + MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo); 374 + MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi); 375 + MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi); 237 376 } 238 377 239 378 static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) 240 379 { 241 380 struct mvs_phy *phy = &mvi->phy[i]; 242 - struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; 243 - 381 + struct asd_sas_phy *sas_phy = &phy->sas_phy; 382 + struct sas_ha_struct *sas_ha; 244 383 if (!phy->phy_attached) 245 384 return; 385 + 386 + if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK) 387 + && phy->phy_type & PORT_TYPE_SAS) { 388 + return; 389 + } 390 + 391 + sas_ha = mvi->sas; 392 + sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); 246 393 247 394 if (sas_phy->phy) { 248 395 struct sas_phy *sphy = sas_phy->phy; ··· 363 286 sphy->minimum_linkrate = phy->minimum_linkrate; 364 287 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 365 288 sphy->maximum_linkrate = phy->maximum_linkrate; 366 - sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; 289 + sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate(); 367 290 } 368 291 369 292 if (phy->phy_type & PORT_TYPE_SAS) { ··· 374 297 id->initiator_bits = SAS_PROTOCOL_ALL; 375 298 id->target_bits = phy->identify.target_port_protocols; 376 299 } else if (phy->phy_type & PORT_TYPE_SATA) { 377 - /* TODO */ 300 + /*Nothing*/ 378 301 } 379 - mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size; 380 - mvi->sas.notify_port_event(mvi->sas.sas_phy[i], 302 + mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy); 303 + 304 + sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 305 + 306 + mvi->sas->notify_port_event(sas_phy, 381 307 PORTE_BYTES_DMAED); 308 + } 309 + 310 + int mvs_slave_alloc(struct scsi_device *scsi_dev) 311 + { 312 + struct domain_device *dev = sdev_to_domain_dev(scsi_dev); 313 + if (dev_is_sata(dev)) { 314 + /* We don't need to rescan targets 315 + * if REPORT_LUNS request is failed 316 + */ 317 + if (scsi_dev->lun > 0) 318 + return -ENXIO; 319 + scsi_dev->tagged_supported = 1; 320 + } 321 + 322 + return sas_slave_alloc(scsi_dev); 382 323 } 383 324 384 325 int mvs_slave_configure(struct scsi_device *sdev) ··· 406 311 407 312 if (ret) 408 313 return ret; 409 - 410 314 if (dev_is_sata(dev)) { 411 - /* struct ata_port *ap = dev->sata_dev.ap; */ 412 - /* struct ata_device *adev = ap->link.device; */ 413 - 414 - /* clamp at no NCQ for the time being */ 415 - /* adev->flags |= ATA_DFLAG_NCQ_OFF; */ 315 + /* may set PIO mode */ 316 + #if MV_DISABLE_NCQ 317 + struct ata_port *ap = dev->sata_dev.ap; 318 + struct ata_device *adev = ap->link.device; 319 + adev->flags |= ATA_DFLAG_NCQ_OFF; 416 320 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); 321 + #endif 417 322 } 418 323 return 0; 419 324 } 420 325 421 326 void mvs_scan_start(struct Scsi_Host *shost) 422 327 { 423 - int i; 424 - struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha; 328 + int i, j; 329 + unsigned short core_nr; 330 + struct mvs_info *mvi; 331 + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 425 332 426 - for (i = 0; i < mvi->chip->n_phy; ++i) { 427 - mvs_bytes_dmaed(mvi, i); 333 + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 334 + 335 + for (j = 0; j < core_nr; j++) { 336 + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; 337 + for (i = 0; i < mvi->chip->n_phy; ++i) 338 + mvs_bytes_dmaed(mvi, i); 428 339 } 429 340 } 430 341 ··· 451 350 int elem, rc, i; 452 351 struct sas_task *task = tei->task; 453 352 struct mvs_cmd_hdr *hdr = tei->hdr; 353 + struct domain_device *dev = task->dev; 354 + struct asd_sas_port *sas_port = dev->port; 454 355 struct scatterlist *sg_req, *sg_resp; 455 356 u32 req_len, resp_len, tag = tei->tag; 456 357 void *buf_tmp; 457 358 u8 *buf_oaf; 458 359 dma_addr_t buf_tmp_dma; 459 - struct mvs_prd *buf_prd; 460 - struct scatterlist *sg; 360 + void *buf_prd; 461 361 struct mvs_slot_info *slot = &mvi->slot_info[tag]; 462 - struct asd_sas_port *sas_port = task->dev->port; 463 362 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 464 363 #if _MV_DUMP 465 364 u8 *buf_cmd; ··· 469 368 * DMA-map SMP request, response buffers 470 369 */ 471 370 sg_req = &task->smp_task.smp_req; 472 - elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE); 371 + elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE); 473 372 if (!elem) 474 373 return -ENOMEM; 475 374 req_len = sg_dma_len(sg_req); 476 375 477 376 sg_resp = &task->smp_task.smp_resp; 478 - elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE); 377 + elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); 479 378 if (!elem) { 480 379 rc = -ENOMEM; 481 380 goto err_out; 482 381 } 483 - resp_len = sg_dma_len(sg_resp); 382 + resp_len = SB_RFB_MAX; 484 383 485 384 /* must be in dwords */ 486 385 if ((req_len & 0x3) || (resp_len & 0x3)) { ··· 492 391 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs 493 392 */ 494 393 495 - /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ 394 + /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */ 496 395 buf_tmp = slot->buf; 497 396 buf_tmp_dma = slot->buf_dma; 498 397 ··· 513 412 buf_tmp += MVS_OAF_SZ; 514 413 buf_tmp_dma += MVS_OAF_SZ; 515 414 516 - /* region 3: PRD table ********************************************* */ 415 + /* region 3: PRD table *********************************** */ 517 416 buf_prd = buf_tmp; 518 417 if (tei->n_elem) 519 418 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); 520 419 else 521 420 hdr->prd_tbl = 0; 522 421 523 - i = sizeof(struct mvs_prd) * tei->n_elem; 422 + i = MVS_CHIP_DISP->prd_size() * tei->n_elem; 524 423 buf_tmp += i; 525 424 buf_tmp_dma += i; 526 425 527 426 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 528 427 slot->response = buf_tmp; 529 428 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 429 + if (mvi->flags & MVF_FLAG_SOC) 430 + hdr->reserved[0] = 0; 530 431 531 432 /* 532 433 * Fill in TX ring and command slot header ··· 544 441 hdr->data_len = 0; 545 442 546 443 /* generate open address frame hdr (first 12 bytes) */ 547 - buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */ 548 - buf_oaf[1] = task->dev->linkrate & 0xf; 444 + /* initiator, SMP, ftype 1h */ 445 + buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01; 446 + buf_oaf[1] = dev->linkrate & 0xf; 549 447 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ 550 - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); 448 + memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); 551 449 552 450 /* fill in PRD (scatter/gather) table, if any */ 553 - for_each_sg(task->scatter, sg, tei->n_elem, i) { 554 - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); 555 - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); 556 - buf_prd++; 557 - } 451 + MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 558 452 559 453 #if _MV_DUMP 560 454 /* copy cmd table */ ··· 562 462 return 0; 563 463 564 464 err_out_2: 565 - pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1, 465 + dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, 566 466 PCI_DMA_FROMDEVICE); 567 467 err_out: 568 - pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1, 468 + dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, 569 469 PCI_DMA_TODEVICE); 570 470 return rc; 571 471 } ··· 590 490 { 591 491 struct sas_task *task = tei->task; 592 492 struct domain_device *dev = task->dev; 493 + struct mvs_device *mvi_dev = 494 + (struct mvs_device *)dev->lldd_dev; 593 495 struct mvs_cmd_hdr *hdr = tei->hdr; 594 496 struct asd_sas_port *sas_port = dev->port; 595 497 struct mvs_slot_info *slot; 596 - struct scatterlist *sg; 597 - struct mvs_prd *buf_prd; 598 - struct mvs_port *port = tei->port; 599 - u32 tag = tei->tag; 600 - u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 498 + void *buf_prd; 499 + u32 tag = tei->tag, hdr_tag; 500 + u32 flags, del_q; 601 501 void *buf_tmp; 602 502 u8 *buf_cmd, *buf_oaf; 603 503 dma_addr_t buf_tmp_dma; 604 504 u32 i, req_len, resp_len; 605 505 const u32 max_resp_len = SB_RFB_MAX; 606 506 607 - if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED) 507 + if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) { 508 + mv_dprintk("Have not enough regiset for dev %d.\n", 509 + mvi_dev->device_id); 608 510 return -EBUSY; 609 - 511 + } 610 512 slot = &mvi->slot_info[tag]; 611 513 slot->tx = mvi->tx_prod; 612 - mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | 613 - (TXQ_CMD_STP << TXQ_CMD_SHIFT) | 614 - (sas_port->phy_mask << TXQ_PHY_SHIFT) | 615 - (port->taskfileset << TXQ_SRS_SHIFT)); 514 + del_q = TXQ_MODE_I | tag | 515 + (TXQ_CMD_STP << TXQ_CMD_SHIFT) | 516 + (sas_port->phy_mask << TXQ_PHY_SHIFT) | 517 + (mvi_dev->taskfileset << TXQ_SRS_SHIFT); 518 + mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); 616 519 520 + #ifndef DISABLE_HOTPLUG_DMA_FIX 521 + if (task->data_dir == DMA_FROM_DEVICE) 522 + flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); 523 + else 524 + flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 525 + #else 526 + flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 527 + #endif 617 528 if (task->ata_task.use_ncq) 618 529 flags |= MCH_FPDMA; 619 530 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { ··· 637 526 hdr->flags = cpu_to_le32(flags); 638 527 639 528 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ 640 - if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags)) 641 - task->ata_task.fis.sector_count |= hdr->tags << 3; 529 + if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) 530 + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 642 531 else 643 - hdr->tags = cpu_to_le32(tag); 532 + hdr_tag = tag; 533 + 534 + hdr->tags = cpu_to_le32(hdr_tag); 535 + 644 536 hdr->data_len = cpu_to_le32(task->total_xfer_len); 645 537 646 538 /* ··· 672 558 673 559 /* region 3: PRD table ********************************************* */ 674 560 buf_prd = buf_tmp; 561 + 675 562 if (tei->n_elem) 676 563 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); 677 564 else 678 565 hdr->prd_tbl = 0; 566 + i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count(); 679 567 680 - i = sizeof(struct mvs_prd) * tei->n_elem; 681 568 buf_tmp += i; 682 569 buf_tmp_dma += i; 683 570 ··· 688 573 */ 689 574 slot->response = buf_tmp; 690 575 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 576 + if (mvi->flags & MVF_FLAG_SOC) 577 + hdr->reserved[0] = 0; 691 578 692 579 req_len = sizeof(struct host_to_dev_fis); 693 580 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - ··· 699 582 resp_len = min(resp_len, max_resp_len); 700 583 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); 701 584 702 - task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 585 + if (likely(!task->ata_task.device_control_reg_update)) 586 + task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 703 587 /* fill in command FIS and ATAPI CDB */ 704 588 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 705 589 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) ··· 708 590 task->ata_task.atapi_packet, 16); 709 591 710 592 /* generate open address frame hdr (first 12 bytes) */ 711 - buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */ 712 - buf_oaf[1] = task->dev->linkrate & 0xf; 713 - *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); 714 - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); 593 + /* initiator, STP, ftype 1h */ 594 + buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1; 595 + buf_oaf[1] = dev->linkrate & 0xf; 596 + *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); 597 + memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); 715 598 716 599 /* fill in PRD (scatter/gather) table, if any */ 717 - for_each_sg(task->scatter, sg, tei->n_elem, i) { 718 - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); 719 - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); 720 - buf_prd++; 721 - } 722 - 600 + MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 601 + #ifndef DISABLE_HOTPLUG_DMA_FIX 602 + if (task->data_dir == DMA_FROM_DEVICE) 603 + MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma, 604 + TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); 605 + #endif 723 606 return 0; 724 607 } 725 608 726 609 static int mvs_task_prep_ssp(struct mvs_info *mvi, 727 - struct mvs_task_exec_info *tei) 610 + struct mvs_task_exec_info *tei, int is_tmf, 611 + struct mvs_tmf_task *tmf) 728 612 { 729 613 struct sas_task *task = tei->task; 730 614 struct mvs_cmd_hdr *hdr = tei->hdr; 731 615 struct mvs_port *port = tei->port; 616 + struct domain_device *dev = task->dev; 617 + struct mvs_device *mvi_dev = 618 + (struct mvs_device *)dev->lldd_dev; 619 + struct asd_sas_port *sas_port = dev->port; 732 620 struct mvs_slot_info *slot; 733 - struct scatterlist *sg; 734 - struct mvs_prd *buf_prd; 621 + void *buf_prd; 735 622 struct ssp_frame_hdr *ssp_hdr; 736 623 void *buf_tmp; 737 624 u8 *buf_cmd, *buf_oaf, fburst = 0; ··· 744 621 u32 flags; 745 622 u32 resp_len, req_len, i, tag = tei->tag; 746 623 const u32 max_resp_len = SB_RFB_MAX; 747 - u8 phy_mask; 624 + u32 phy_mask; 748 625 749 626 slot = &mvi->slot_info[tag]; 750 627 751 - phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : 752 - task->dev->port->phy_mask; 628 + phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap : 629 + sas_port->phy_mask) & TXQ_PHY_MASK; 630 + 753 631 slot->tx = mvi->tx_prod; 754 632 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | 755 633 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | ··· 764 640 hdr->flags = cpu_to_le32(flags | 765 641 (tei->n_elem << MCH_PRD_LEN_SHIFT) | 766 642 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT)); 767 - 768 643 hdr->tags = cpu_to_le32(tag); 769 644 hdr->data_len = cpu_to_le32(task->total_xfer_len); 770 645 ··· 797 674 else 798 675 hdr->prd_tbl = 0; 799 676 800 - i = sizeof(struct mvs_prd) * tei->n_elem; 677 + i = MVS_CHIP_DISP->prd_size() * tei->n_elem; 801 678 buf_tmp += i; 802 679 buf_tmp_dma += i; 803 680 804 681 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 805 682 slot->response = buf_tmp; 806 683 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 684 + if (mvi->flags & MVF_FLAG_SOC) 685 + hdr->reserved[0] = 0; 807 686 808 687 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - 809 688 sizeof(struct mvs_err_info) - i; ··· 817 692 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); 818 693 819 694 /* generate open address frame hdr (first 12 bytes) */ 820 - buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */ 821 - buf_oaf[1] = task->dev->linkrate & 0xf; 822 - *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); 823 - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); 695 + /* initiator, SSP, ftype 1h */ 696 + buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1; 697 + buf_oaf[1] = dev->linkrate & 0xf; 698 + *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); 699 + memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); 824 700 825 701 /* fill in SSP frame header (Command Table.SSP frame header) */ 826 702 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; 827 - ssp_hdr->frame_type = SSP_COMMAND; 828 - memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr, 703 + 704 + if (is_tmf) 705 + ssp_hdr->frame_type = SSP_TASK; 706 + else 707 + ssp_hdr->frame_type = SSP_COMMAND; 708 + 709 + memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr, 829 710 HASHED_SAS_ADDR_SIZE); 830 711 memcpy(ssp_hdr->hashed_src_addr, 831 - task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); 712 + dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); 832 713 ssp_hdr->tag = cpu_to_be16(tag); 833 714 834 - /* fill in command frame IU */ 715 + /* fill in IU for TASK and Command Frame */ 835 716 buf_cmd += sizeof(*ssp_hdr); 836 717 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 837 - buf_cmd[9] = fburst | task->ssp_task.task_attr | 838 - (task->ssp_task.task_prio << 3); 839 - memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); 840 718 841 - /* fill in PRD (scatter/gather) table, if any */ 842 - for_each_sg(task->scatter, sg, tei->n_elem, i) { 843 - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); 844 - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); 845 - buf_prd++; 719 + if (ssp_hdr->frame_type != SSP_TASK) { 720 + buf_cmd[9] = fburst | task->ssp_task.task_attr | 721 + (task->ssp_task.task_prio << 3); 722 + memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); 723 + } else{ 724 + buf_cmd[10] = tmf->tmf; 725 + switch (tmf->tmf) { 726 + case TMF_ABORT_TASK: 727 + case TMF_QUERY_TASK: 728 + buf_cmd[12] = 729 + (tmf->tag_of_task_to_be_managed >> 8) & 0xff; 730 + buf_cmd[13] = 731 + tmf->tag_of_task_to_be_managed & 0xff; 732 + break; 733 + default: 734 + break; 735 + } 846 736 } 847 - 737 + /* fill in PRD (scatter/gather) table, if any */ 738 + MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 848 739 return 0; 849 740 } 850 741 851 - int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) 742 + #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) 743 + static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, 744 + struct completion *completion, int lock, 745 + int is_tmf, struct mvs_tmf_task *tmf) 852 746 { 853 747 struct domain_device *dev = task->dev; 854 - struct mvs_info *mvi = dev->port->ha->lldd_ha; 855 - struct pci_dev *pdev = mvi->pdev; 856 - void __iomem *regs = mvi->regs; 748 + struct mvs_info *mvi; 749 + struct mvs_device *mvi_dev; 857 750 struct mvs_task_exec_info tei; 858 751 struct sas_task *t = task; 859 752 struct mvs_slot_info *slot; 860 753 u32 tag = 0xdeadbeef, rc, n_elem = 0; 861 - unsigned long flags; 862 754 u32 n = num, pass = 0; 755 + unsigned long flags = 0; 863 756 864 - spin_lock_irqsave(&mvi->lock, flags); 757 + if (!dev->port) { 758 + struct task_status_struct *tsm = &t->task_status; 759 + 760 + tsm->resp = SAS_TASK_UNDELIVERED; 761 + tsm->stat = SAS_PHY_DOWN; 762 + t->task_done(t); 763 + return 0; 764 + } 765 + 766 + mvi = mvs_find_dev_mvi(task->dev); 767 + 768 + if (lock) 769 + spin_lock_irqsave(&mvi->lock, flags); 865 770 do { 866 771 dev = t->dev; 867 - tei.port = &mvi->port[dev->port->id]; 772 + mvi_dev = (struct mvs_device *)dev->lldd_dev; 773 + if (DEV_IS_GONE(mvi_dev)) { 774 + if (mvi_dev) 775 + mv_dprintk("device %d not ready.\n", 776 + mvi_dev->device_id); 777 + else 778 + mv_dprintk("device %016llx not ready.\n", 779 + SAS_ADDR(dev->sas_addr)); 780 + 781 + rc = SAS_PHY_DOWN; 782 + goto out_done; 783 + } 784 + 785 + if (dev->port->id >= mvi->chip->n_phy) 786 + tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy]; 787 + else 788 + tei.port = &mvi->port[dev->port->id]; 868 789 869 790 if (!tei.port->port_attached) { 870 791 if (sas_protocol_ata(t->task_proto)) { 792 + mv_dprintk("port %d does not" 793 + "attached device.\n", dev->port->id); 871 794 rc = SAS_PHY_DOWN; 872 795 goto out_done; 873 796 } else { ··· 932 759 933 760 if (!sas_protocol_ata(t->task_proto)) { 934 761 if (t->num_scatter) { 935 - n_elem = pci_map_sg(mvi->pdev, t->scatter, 762 + n_elem = dma_map_sg(mvi->dev, 763 + t->scatter, 936 764 t->num_scatter, 937 765 t->data_dir); 938 766 if (!n_elem) { ··· 950 776 goto err_out; 951 777 952 778 slot = &mvi->slot_info[tag]; 779 + 780 + 953 781 t->lldd_task = NULL; 954 782 slot->n_elem = n_elem; 783 + slot->slot_tag = tag; 955 784 memset(slot->buf, 0, MVS_SLOT_BUF_SZ); 785 + 956 786 tei.task = t; 957 787 tei.hdr = &mvi->slot[tag]; 958 788 tei.tag = tag; 959 789 tei.n_elem = n_elem; 960 - 961 790 switch (t->task_proto) { 962 791 case SAS_PROTOCOL_SMP: 963 792 rc = mvs_task_prep_smp(mvi, &tei); 964 793 break; 965 794 case SAS_PROTOCOL_SSP: 966 - rc = mvs_task_prep_ssp(mvi, &tei); 795 + rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); 967 796 break; 968 797 case SAS_PROTOCOL_SATA: 969 798 case SAS_PROTOCOL_STP: ··· 974 797 rc = mvs_task_prep_ata(mvi, &tei); 975 798 break; 976 799 default: 977 - dev_printk(KERN_ERR, &pdev->dev, 800 + dev_printk(KERN_ERR, mvi->dev, 978 801 "unknown sas_task proto: 0x%x\n", 979 802 t->task_proto); 980 803 rc = -EINVAL; 981 804 break; 982 805 } 983 806 984 - if (rc) 807 + if (rc) { 808 + mv_dprintk("rc is %x\n", rc); 985 809 goto err_out_tag; 986 - 810 + } 987 811 slot->task = t; 988 812 slot->port = tei.port; 989 813 t->lldd_task = (void *) slot; 990 - list_add_tail(&slot->list, &slot->port->list); 814 + list_add_tail(&slot->entry, &tei.port->list); 991 815 /* TODO: select normal or high priority */ 992 - 993 816 spin_lock(&t->task_state_lock); 994 817 t->task_state_flags |= SAS_TASK_AT_INITIATOR; 995 818 spin_unlock(&t->task_state_lock); 996 819 997 820 mvs_hba_memory_dump(mvi, tag, t->task_proto); 998 - 821 + mvi_dev->runing_req++; 999 822 ++pass; 1000 823 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); 1001 824 if (n > 1) 1002 825 t = list_entry(t->list.next, struct sas_task, list); 1003 826 } while (--n); 1004 - 1005 827 rc = 0; 1006 828 goto out_done; 1007 829 1008 830 err_out_tag: 1009 831 mvs_tag_free(mvi, tag); 1010 832 err_out: 1011 - dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc); 833 + 834 + dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); 1012 835 if (!sas_protocol_ata(t->task_proto)) 1013 836 if (n_elem) 1014 - pci_unmap_sg(mvi->pdev, t->scatter, n_elem, 837 + dma_unmap_sg(mvi->dev, t->scatter, n_elem, 1015 838 t->data_dir); 1016 839 out_done: 1017 - if (pass) 1018 - mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); 1019 - spin_unlock_irqrestore(&mvi->lock, flags); 840 + if (likely(pass)) { 841 + MVS_CHIP_DISP->start_delivery(mvi, 842 + (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); 843 + } 844 + if (lock) 845 + spin_unlock_irqrestore(&mvi->lock, flags); 1020 846 return rc; 847 + } 848 + 849 + int mvs_queue_command(struct sas_task *task, const int num, 850 + gfp_t gfp_flags) 851 + { 852 + return mvs_task_exec(task, num, gfp_flags, NULL, 1, 0, NULL); 1021 853 } 1022 854 1023 855 static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) ··· 1038 852 static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, 1039 853 struct mvs_slot_info *slot, u32 slot_idx) 1040 854 { 855 + if (!slot->task) 856 + return; 1041 857 if (!sas_protocol_ata(task->task_proto)) 1042 858 if (slot->n_elem) 1043 - pci_unmap_sg(mvi->pdev, task->scatter, 859 + dma_unmap_sg(mvi->dev, task->scatter, 1044 860 slot->n_elem, task->data_dir); 1045 861 1046 862 switch (task->task_proto) { 1047 863 case SAS_PROTOCOL_SMP: 1048 - pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1, 864 + dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, 1049 865 PCI_DMA_FROMDEVICE); 1050 - pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1, 866 + dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, 1051 867 PCI_DMA_TODEVICE); 1052 868 break; 1053 869 ··· 1060 872 /* do nothing */ 1061 873 break; 1062 874 } 1063 - list_del(&slot->list); 875 + list_del_init(&slot->entry); 1064 876 task->lldd_task = NULL; 1065 877 slot->task = NULL; 1066 878 slot->port = NULL; 879 + slot->slot_tag = 0xFFFFFFFF; 880 + mvs_slot_free(mvi, slot_idx); 1067 881 } 1068 882 1069 883 static void mvs_update_wideport(struct mvs_info *mvi, int i) ··· 1074 884 struct mvs_port *port = phy->port; 1075 885 int j, no; 1076 886 1077 - for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy) 1078 - if (no & 1) { 1079 - mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); 1080 - mvs_write_port_cfg_data(mvi, no, 887 + for_each_phy(port->wide_port_phymap, j, no) { 888 + if (j & 1) { 889 + MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, 890 + PHYR_WIDE_PORT); 891 + MVS_CHIP_DISP->write_port_cfg_data(mvi, no, 1081 892 port->wide_port_phymap); 1082 893 } else { 1083 - mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); 1084 - mvs_write_port_cfg_data(mvi, no, 0); 894 + MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, 895 + PHYR_WIDE_PORT); 896 + MVS_CHIP_DISP->write_port_cfg_data(mvi, no, 897 + 0); 1085 898 } 899 + } 1086 900 } 1087 901 1088 902 static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) 1089 903 { 1090 904 u32 tmp; 1091 905 struct mvs_phy *phy = &mvi->phy[i]; 1092 - struct mvs_port *port = phy->port;; 906 + struct mvs_port *port = phy->port; 1093 907 1094 - tmp = mvs_read_phy_ctl(mvi, i); 1095 - 908 + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i); 1096 909 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { 1097 910 if (!port) 1098 911 phy->phy_attached = 1; ··· 1110 917 mvs_update_wideport(mvi, i); 1111 918 } else if (phy->phy_type & PORT_TYPE_SATA) 1112 919 port->port_attached = 0; 1113 - mvs_free_reg_set(mvi, phy->port); 1114 920 phy->port = NULL; 1115 921 phy->phy_attached = 0; 1116 922 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); ··· 1124 932 if (!s) 1125 933 return NULL; 1126 934 1127 - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); 1128 - s[3] = mvs_read_port_cfg_data(mvi, i); 935 + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); 936 + s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1129 937 1130 - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); 1131 - s[2] = mvs_read_port_cfg_data(mvi, i); 938 + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); 939 + s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1132 940 1133 - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); 1134 - s[1] = mvs_read_port_cfg_data(mvi, i); 941 + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); 942 + s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1135 943 1136 - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); 1137 - s[0] = mvs_read_port_cfg_data(mvi, i); 944 + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); 945 + s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 946 + 947 + /* Workaround: take some ATAPI devices for ATA */ 948 + if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) 949 + s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); 1138 950 1139 951 return (void *)s; 1140 952 } ··· 1148 952 return irq_status & PHYEV_SIG_FIS; 1149 953 } 1150 954 1151 - static void mvs_update_phyinfo(struct mvs_info *mvi, int i, 1152 - int get_st) 955 + void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) 1153 956 { 1154 957 struct mvs_phy *phy = &mvi->phy[i]; 1155 - struct pci_dev *pdev = mvi->pdev; 1156 - u32 tmp; 1157 - u64 tmp64; 958 + struct sas_identify_frame *id; 1158 959 1159 - mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); 1160 - phy->dev_info = mvs_read_port_cfg_data(mvi, i); 1161 - 1162 - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); 1163 - phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32; 1164 - 1165 - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); 1166 - phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); 960 + id = (struct sas_identify_frame *)phy->frame_rcvd; 1167 961 1168 962 if (get_st) { 1169 - phy->irq_status = mvs_read_port_irq_stat(mvi, i); 963 + phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i); 1170 964 phy->phy_status = mvs_is_phy_ready(mvi, i); 1171 965 } 1172 966 1173 967 if (phy->phy_status) { 1174 - u32 phy_st; 1175 - struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; 968 + int oob_done = 0; 969 + struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy; 1176 970 1177 - mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); 1178 - phy_st = mvs_read_port_cfg_data(mvi, i); 971 + oob_done = MVS_CHIP_DISP->oob_done(mvi, i); 1179 972 1180 - sas_phy->linkrate = 1181 - (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> 1182 - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; 1183 - phy->minimum_linkrate = 1184 - (phy->phy_status & 1185 - PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; 1186 - phy->maximum_linkrate = 1187 - (phy->phy_status & 1188 - PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; 1189 - 1190 - if (phy->phy_type & PORT_TYPE_SAS) { 1191 - /* Updated attached_sas_addr */ 1192 - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); 1193 - phy->att_dev_sas_addr = 1194 - (u64) mvs_read_port_cfg_data(mvi, i) << 32; 1195 - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); 1196 - phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); 1197 - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); 1198 - phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); 973 + MVS_CHIP_DISP->fix_phy_info(mvi, i, id); 974 + if (phy->phy_type & PORT_TYPE_SATA) { 975 + phy->identify.target_port_protocols = SAS_PROTOCOL_STP; 976 + if (mvs_is_sig_fis_received(phy->irq_status)) { 977 + phy->phy_attached = 1; 978 + phy->att_dev_sas_addr = 979 + i + mvi->id * mvi->chip->n_phy; 980 + if (oob_done) 981 + sas_phy->oob_mode = SATA_OOB_MODE; 982 + phy->frame_rcvd_size = 983 + sizeof(struct dev_to_host_fis); 984 + mvs_get_d2h_reg(mvi, i, (void *)id); 985 + } else { 986 + u32 tmp; 987 + dev_printk(KERN_DEBUG, mvi->dev, 988 + "Phy%d : No sig fis\n", i); 989 + tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i); 990 + MVS_CHIP_DISP->write_port_irq_mask(mvi, i, 991 + tmp | PHYEV_SIG_FIS); 992 + phy->phy_attached = 0; 993 + phy->phy_type &= ~PORT_TYPE_SATA; 994 + MVS_CHIP_DISP->phy_reset(mvi, i, 0); 995 + goto out_done; 996 + } 997 + } else if (phy->phy_type & PORT_TYPE_SAS 998 + || phy->att_dev_info & PORT_SSP_INIT_MASK) { 999 + phy->phy_attached = 1; 1199 1000 phy->identify.device_type = 1200 - phy->att_dev_info & PORT_DEV_TYPE_MASK; 1001 + phy->att_dev_info & PORT_DEV_TYPE_MASK; 1201 1002 1202 1003 if (phy->identify.device_type == SAS_END_DEV) 1203 1004 phy->identify.target_port_protocols = ··· 1202 1009 else if (phy->identify.device_type != NO_DEVICE) 1203 1010 phy->identify.target_port_protocols = 1204 1011 SAS_PROTOCOL_SMP; 1205 - if (phy_st & PHY_OOB_DTCTD) 1012 + if (oob_done) 1206 1013 sas_phy->oob_mode = SAS_OOB_MODE; 1207 1014 phy->frame_rcvd_size = 1208 1015 sizeof(struct sas_identify_frame); 1209 - } else if (phy->phy_type & PORT_TYPE_SATA) { 1210 - phy->identify.target_port_protocols = SAS_PROTOCOL_STP; 1211 - if (mvs_is_sig_fis_received(phy->irq_status)) { 1212 - phy->att_dev_sas_addr = i; /* temp */ 1213 - if (phy_st & PHY_OOB_DTCTD) 1214 - sas_phy->oob_mode = SATA_OOB_MODE; 1215 - phy->frame_rcvd_size = 1216 - sizeof(struct dev_to_host_fis); 1217 - mvs_get_d2h_reg(mvi, i, 1218 - (void *)sas_phy->frame_rcvd); 1219 - } else { 1220 - dev_printk(KERN_DEBUG, &pdev->dev, 1221 - "No sig fis\n"); 1222 - phy->phy_type &= ~(PORT_TYPE_SATA); 1223 - goto out_done; 1224 - } 1225 1016 } 1226 - tmp64 = cpu_to_be64(phy->att_dev_sas_addr); 1227 - memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE); 1017 + memcpy(sas_phy->attached_sas_addr, 1018 + &phy->att_dev_sas_addr, SAS_ADDR_SIZE); 1228 1019 1229 - dev_printk(KERN_DEBUG, &pdev->dev, 1230 - "phy[%d] Get Attached Address 0x%llX ," 1231 - " SAS Address 0x%llX\n", 1232 - i, 1233 - (unsigned long long)phy->att_dev_sas_addr, 1234 - (unsigned long long)phy->dev_sas_addr); 1235 - dev_printk(KERN_DEBUG, &pdev->dev, 1236 - "Rate = %x , type = %d\n", 1237 - sas_phy->linkrate, phy->phy_type); 1238 - 1239 - /* workaround for HW phy decoding error on 1.5g disk drive */ 1240 - mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); 1241 - tmp = mvs_read_port_vsr_data(mvi, i); 1242 - if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> 1243 - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == 1244 - SAS_LINK_RATE_1_5_GBPS) 1245 - tmp &= ~PHY_MODE6_LATECLK; 1246 - else 1247 - tmp |= PHY_MODE6_LATECLK; 1248 - mvs_write_port_vsr_data(mvi, i, tmp); 1249 - 1020 + if (MVS_CHIP_DISP->phy_work_around) 1021 + MVS_CHIP_DISP->phy_work_around(mvi, i); 1250 1022 } 1023 + mv_dprintk("port %d attach dev info is %x\n", 1024 + i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); 1025 + mv_dprintk("port %d attach sas addr is %llx\n", 1026 + i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); 1251 1027 out_done: 1252 1028 if (get_st) 1253 - mvs_write_port_irq_stat(mvi, i, phy->irq_status); 1029 + MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status); 1254 1030 } 1031 + 1032 + static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock) 1033 + { 1034 + struct sas_ha_struct *sas_ha = sas_phy->ha; 1035 + struct mvs_info *mvi = NULL; int i = 0, hi; 1036 + struct mvs_phy *phy = sas_phy->lldd_phy; 1037 + struct asd_sas_port *sas_port = sas_phy->port; 1038 + struct mvs_port *port; 1039 + unsigned long flags = 0; 1040 + if (!sas_port) 1041 + return; 1042 + 1043 + while (sas_ha->sas_phy[i]) { 1044 + if (sas_ha->sas_phy[i] == sas_phy) 1045 + break; 1046 + i++; 1047 + } 1048 + hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; 1049 + mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; 1050 + if (sas_port->id >= mvi->chip->n_phy) 1051 + port = &mvi->port[sas_port->id - mvi->chip->n_phy]; 1052 + else 1053 + port = &mvi->port[sas_port->id]; 1054 + if (lock) 1055 + spin_lock_irqsave(&mvi->lock, flags); 1056 + port->port_attached = 1; 1057 + phy->port = port; 1058 + if (phy->phy_type & PORT_TYPE_SAS) { 1059 + port->wide_port_phymap = sas_port->phy_mask; 1060 + mv_printk("set wide port phy map %x\n", sas_port->phy_mask); 1061 + mvs_update_wideport(mvi, sas_phy->id); 1062 + } 1063 + if (lock) 1064 + spin_unlock_irqrestore(&mvi->lock, flags); 1065 + } 1066 + 1067 + static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock) 1068 + { 1069 + /*Nothing*/ 1070 + } 1071 + 1255 1072 1256 1073 void mvs_port_formed(struct asd_sas_phy *sas_phy) 1257 1074 { 1258 - struct sas_ha_struct *sas_ha = sas_phy->ha; 1259 - struct mvs_info *mvi = sas_ha->lldd_ha; 1260 - struct asd_sas_port *sas_port = sas_phy->port; 1261 - struct mvs_phy *phy = sas_phy->lldd_phy; 1262 - struct mvs_port *port = &mvi->port[sas_port->id]; 1263 - unsigned long flags; 1075 + mvs_port_notify_formed(sas_phy, 1); 1076 + } 1264 1077 1265 - spin_lock_irqsave(&mvi->lock, flags); 1266 - port->port_attached = 1; 1267 - phy->port = port; 1268 - port->taskfileset = MVS_ID_NOT_MAPPED; 1269 - if (phy->phy_type & PORT_TYPE_SAS) { 1270 - port->wide_port_phymap = sas_port->phy_mask; 1271 - mvs_update_wideport(mvi, sas_phy->id); 1078 + void mvs_port_deformed(struct asd_sas_phy *sas_phy) 1079 + { 1080 + mvs_port_notify_deformed(sas_phy, 1); 1081 + } 1082 + 1083 + struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) 1084 + { 1085 + u32 dev; 1086 + for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { 1087 + if (mvi->devices[dev].dev_type == NO_DEVICE) { 1088 + mvi->devices[dev].device_id = dev; 1089 + return &mvi->devices[dev]; 1090 + } 1272 1091 } 1273 - spin_unlock_irqrestore(&mvi->lock, flags); 1092 + 1093 + if (dev == MVS_MAX_DEVICES) 1094 + mv_printk("max support %d devices, ignore ..\n", 1095 + MVS_MAX_DEVICES); 1096 + 1097 + return NULL; 1098 + } 1099 + 1100 + void mvs_free_dev(struct mvs_device *mvi_dev) 1101 + { 1102 + u32 id = mvi_dev->device_id; 1103 + memset(mvi_dev, 0, sizeof(*mvi_dev)); 1104 + mvi_dev->device_id = id; 1105 + mvi_dev->dev_type = NO_DEVICE; 1106 + mvi_dev->dev_status = MVS_DEV_NORMAL; 1107 + mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; 1108 + } 1109 + 1110 + int mvs_dev_found_notify(struct domain_device *dev, int lock) 1111 + { 1112 + unsigned long flags = 0; 1113 + int res = 0; 1114 + struct mvs_info *mvi = NULL; 1115 + struct domain_device *parent_dev = dev->parent; 1116 + struct mvs_device *mvi_device; 1117 + 1118 + mvi = mvs_find_dev_mvi(dev); 1119 + 1120 + if (lock) 1121 + spin_lock_irqsave(&mvi->lock, flags); 1122 + 1123 + mvi_device = mvs_alloc_dev(mvi); 1124 + if (!mvi_device) { 1125 + res = -1; 1126 + goto found_out; 1127 + } 1128 + dev->lldd_dev = (void *)mvi_device; 1129 + mvi_device->dev_type = dev->dev_type; 1130 + 1131 + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { 1132 + int phy_id; 1133 + u8 phy_num = parent_dev->ex_dev.num_phys; 1134 + struct ex_phy *phy; 1135 + for (phy_id = 0; phy_id < phy_num; phy_id++) { 1136 + phy = &parent_dev->ex_dev.ex_phy[phy_id]; 1137 + if (SAS_ADDR(phy->attached_sas_addr) == 1138 + SAS_ADDR(dev->sas_addr)) { 1139 + mvi_device->attached_phy = phy_id; 1140 + break; 1141 + } 1142 + } 1143 + 1144 + if (phy_id == phy_num) { 1145 + mv_printk("Error: no attached dev:%016llx" 1146 + "at ex:%016llx.\n", 1147 + SAS_ADDR(dev->sas_addr), 1148 + SAS_ADDR(parent_dev->sas_addr)); 1149 + res = -1; 1150 + } 1151 + } 1152 + 1153 + found_out: 1154 + if (lock) 1155 + spin_unlock_irqrestore(&mvi->lock, flags); 1156 + return res; 1157 + } 1158 + 1159 + int mvs_dev_found(struct domain_device *dev) 1160 + { 1161 + return mvs_dev_found_notify(dev, 1); 1162 + } 1163 + 1164 + void mvs_dev_gone_notify(struct domain_device *dev, int lock) 1165 + { 1166 + unsigned long flags = 0; 1167 + struct mvs_info *mvi; 1168 + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 1169 + 1170 + mvi = mvs_find_dev_mvi(dev); 1171 + 1172 + if (lock) 1173 + spin_lock_irqsave(&mvi->lock, flags); 1174 + 1175 + if (mvi_dev) { 1176 + mv_dprintk("found dev[%d:%x] is gone.\n", 1177 + mvi_dev->device_id, mvi_dev->dev_type); 1178 + mvs_free_reg_set(mvi, mvi_dev); 1179 + mvs_free_dev(mvi_dev); 1180 + } else { 1181 + mv_dprintk("found dev has gone.\n"); 1182 + } 1183 + dev->lldd_dev = NULL; 1184 + 1185 + if (lock) 1186 + spin_unlock_irqrestore(&mvi->lock, flags); 1187 + } 1188 + 1189 + 1190 + void mvs_dev_gone(struct domain_device *dev) 1191 + { 1192 + mvs_dev_gone_notify(dev, 1); 1193 + } 1194 + 1195 + static struct sas_task *mvs_alloc_task(void) 1196 + { 1197 + struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL); 1198 + 1199 + if (task) { 1200 + INIT_LIST_HEAD(&task->list); 1201 + spin_lock_init(&task->task_state_lock); 1202 + task->task_state_flags = SAS_TASK_STATE_PENDING; 1203 + init_timer(&task->timer); 1204 + init_completion(&task->completion); 1205 + } 1206 + return task; 1207 + } 1208 + 1209 + static void mvs_free_task(struct sas_task *task) 1210 + { 1211 + if (task) { 1212 + BUG_ON(!list_empty(&task->list)); 1213 + kfree(task); 1214 + } 1215 + } 1216 + 1217 + static void mvs_task_done(struct sas_task *task) 1218 + { 1219 + if (!del_timer(&task->timer)) 1220 + return; 1221 + complete(&task->completion); 1222 + } 1223 + 1224 + static void mvs_tmf_timedout(unsigned long data) 1225 + { 1226 + struct sas_task *task = (struct sas_task *)data; 1227 + 1228 + task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1229 + complete(&task->completion); 1230 + } 1231 + 1232 + /* XXX */ 1233 + #define MVS_TASK_TIMEOUT 20 1234 + static int mvs_exec_internal_tmf_task(struct domain_device *dev, 1235 + void *parameter, u32 para_len, struct mvs_tmf_task *tmf) 1236 + { 1237 + int res, retry; 1238 + struct sas_task *task = NULL; 1239 + 1240 + for (retry = 0; retry < 3; retry++) { 1241 + task = mvs_alloc_task(); 1242 + if (!task) 1243 + return -ENOMEM; 1244 + 1245 + task->dev = dev; 1246 + task->task_proto = dev->tproto; 1247 + 1248 + memcpy(&task->ssp_task, parameter, para_len); 1249 + task->task_done = mvs_task_done; 1250 + 1251 + task->timer.data = (unsigned long) task; 1252 + task->timer.function = mvs_tmf_timedout; 1253 + task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; 1254 + add_timer(&task->timer); 1255 + 1256 + res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 0, 1, tmf); 1257 + 1258 + if (res) { 1259 + del_timer(&task->timer); 1260 + mv_printk("executing internel task failed:%d\n", res); 1261 + goto ex_err; 1262 + } 1263 + 1264 + wait_for_completion(&task->completion); 1265 + res = -TMF_RESP_FUNC_FAILED; 1266 + /* Even TMF timed out, return direct. */ 1267 + if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1268 + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1269 + mv_printk("TMF task[%x] timeout.\n", tmf->tmf); 1270 + goto ex_err; 1271 + } 1272 + } 1273 + 1274 + if (task->task_status.resp == SAS_TASK_COMPLETE && 1275 + task->task_status.stat == SAM_GOOD) { 1276 + res = TMF_RESP_FUNC_COMPLETE; 1277 + break; 1278 + } 1279 + 1280 + if (task->task_status.resp == SAS_TASK_COMPLETE && 1281 + task->task_status.stat == SAS_DATA_UNDERRUN) { 1282 + /* no error, but return the number of bytes of 1283 + * underrun */ 1284 + res = task->task_status.residual; 1285 + break; 1286 + } 1287 + 1288 + if (task->task_status.resp == SAS_TASK_COMPLETE && 1289 + task->task_status.stat == SAS_DATA_OVERRUN) { 1290 + mv_dprintk("blocked task error.\n"); 1291 + res = -EMSGSIZE; 1292 + break; 1293 + } else { 1294 + mv_dprintk(" task to dev %016llx response: 0x%x " 1295 + "status 0x%x\n", 1296 + SAS_ADDR(dev->sas_addr), 1297 + task->task_status.resp, 1298 + task->task_status.stat); 1299 + mvs_free_task(task); 1300 + task = NULL; 1301 + 1302 + } 1303 + } 1304 + ex_err: 1305 + BUG_ON(retry == 3 && task != NULL); 1306 + if (task != NULL) 1307 + mvs_free_task(task); 1308 + return res; 1309 + } 1310 + 1311 + static int mvs_debug_issue_ssp_tmf(struct domain_device *dev, 1312 + u8 *lun, struct mvs_tmf_task *tmf) 1313 + { 1314 + struct sas_ssp_task ssp_task; 1315 + DECLARE_COMPLETION_ONSTACK(completion); 1316 + if (!(dev->tproto & SAS_PROTOCOL_SSP)) 1317 + return TMF_RESP_FUNC_ESUPP; 1318 + 1319 + strncpy((u8 *)&ssp_task.LUN, lun, 8); 1320 + 1321 + return mvs_exec_internal_tmf_task(dev, &ssp_task, 1322 + sizeof(ssp_task), tmf); 1323 + } 1324 + 1325 + 1326 + /* Standard mandates link reset for ATA (type 0) 1327 + and hard reset for SSP (type 1) , only for RECOVERY */ 1328 + static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) 1329 + { 1330 + int rc; 1331 + struct sas_phy *phy = sas_find_local_phy(dev); 1332 + int reset_type = (dev->dev_type == SATA_DEV || 1333 + (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; 1334 + rc = sas_phy_reset(phy, reset_type); 1335 + msleep(2000); 1336 + return rc; 1337 + } 1338 + 1339 + /* mandatory SAM-3 */ 1340 + int mvs_lu_reset(struct domain_device *dev, u8 *lun) 1341 + { 1342 + unsigned long flags; 1343 + int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; 1344 + struct mvs_tmf_task tmf_task; 1345 + struct mvs_info *mvi = mvs_find_dev_mvi(dev); 1346 + struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev; 1347 + 1348 + tmf_task.tmf = TMF_LU_RESET; 1349 + mvi_dev->dev_status = MVS_DEV_EH; 1350 + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1351 + if (rc == TMF_RESP_FUNC_COMPLETE) { 1352 + num = mvs_find_dev_phyno(dev, phyno); 1353 + spin_lock_irqsave(&mvi->lock, flags); 1354 + for (i = 0; i < num; i++) 1355 + mvs_release_task(mvi, phyno[i], dev); 1356 + spin_unlock_irqrestore(&mvi->lock, flags); 1357 + } 1358 + /* If failed, fall-through I_T_Nexus reset */ 1359 + mv_printk("%s for device[%x]:rc= %d\n", __func__, 1360 + mvi_dev->device_id, rc); 1361 + return rc; 1274 1362 } 1275 1363 1276 1364 int mvs_I_T_nexus_reset(struct domain_device *dev) 1277 1365 { 1278 - return TMF_RESP_FUNC_FAILED; 1366 + unsigned long flags; 1367 + int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; 1368 + struct mvs_info *mvi = mvs_find_dev_mvi(dev); 1369 + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 1370 + 1371 + if (mvi_dev->dev_status != MVS_DEV_EH) 1372 + return TMF_RESP_FUNC_COMPLETE; 1373 + rc = mvs_debug_I_T_nexus_reset(dev); 1374 + mv_printk("%s for device[%x]:rc= %d\n", 1375 + __func__, mvi_dev->device_id, rc); 1376 + 1377 + /* housekeeper */ 1378 + num = mvs_find_dev_phyno(dev, phyno); 1379 + spin_lock_irqsave(&mvi->lock, flags); 1380 + for (i = 0; i < num; i++) 1381 + mvs_release_task(mvi, phyno[i], dev); 1382 + spin_unlock_irqrestore(&mvi->lock, flags); 1383 + 1384 + return rc; 1385 + } 1386 + /* optional SAM-3 */ 1387 + int mvs_query_task(struct sas_task *task) 1388 + { 1389 + u32 tag; 1390 + struct scsi_lun lun; 1391 + struct mvs_tmf_task tmf_task; 1392 + int rc = TMF_RESP_FUNC_FAILED; 1393 + 1394 + if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1395 + struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; 1396 + struct domain_device *dev = task->dev; 1397 + struct mvs_info *mvi = mvs_find_dev_mvi(dev); 1398 + 1399 + int_to_scsilun(cmnd->device->lun, &lun); 1400 + rc = mvs_find_tag(mvi, task, &tag); 1401 + if (rc == 0) { 1402 + rc = TMF_RESP_FUNC_FAILED; 1403 + return rc; 1404 + } 1405 + 1406 + tmf_task.tmf = TMF_QUERY_TASK; 1407 + tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1408 + 1409 + rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1410 + switch (rc) { 1411 + /* The task is still in Lun, release it then */ 1412 + case TMF_RESP_FUNC_SUCC: 1413 + /* The task is not in Lun or failed, reset the phy */ 1414 + case TMF_RESP_FUNC_FAILED: 1415 + case TMF_RESP_FUNC_COMPLETE: 1416 + break; 1417 + } 1418 + } 1419 + mv_printk("%s:rc= %d\n", __func__, rc); 1420 + return rc; 1421 + } 1422 + 1423 + /* mandatory SAM-3, still need free task/slot info */ 1424 + int mvs_abort_task(struct sas_task *task) 1425 + { 1426 + struct scsi_lun lun; 1427 + struct mvs_tmf_task tmf_task; 1428 + struct domain_device *dev = task->dev; 1429 + struct mvs_info *mvi = mvs_find_dev_mvi(dev); 1430 + int rc = TMF_RESP_FUNC_FAILED; 1431 + unsigned long flags; 1432 + u32 tag; 1433 + if (mvi->exp_req) 1434 + mvi->exp_req--; 1435 + spin_lock_irqsave(&task->task_state_lock, flags); 1436 + if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1437 + spin_unlock_irqrestore(&task->task_state_lock, flags); 1438 + rc = TMF_RESP_FUNC_COMPLETE; 1439 + goto out; 1440 + } 1441 + spin_unlock_irqrestore(&task->task_state_lock, flags); 1442 + if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1443 + struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; 1444 + 1445 + int_to_scsilun(cmnd->device->lun, &lun); 1446 + rc = mvs_find_tag(mvi, task, &tag); 1447 + if (rc == 0) { 1448 + mv_printk("No such tag in %s\n", __func__); 1449 + rc = TMF_RESP_FUNC_FAILED; 1450 + return rc; 1451 + } 1452 + 1453 + tmf_task.tmf = TMF_ABORT_TASK; 1454 + tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1455 + 1456 + rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1457 + 1458 + /* if successful, clear the task and callback forwards.*/ 1459 + if (rc == TMF_RESP_FUNC_COMPLETE) { 1460 + u32 slot_no; 1461 + struct mvs_slot_info *slot; 1462 + struct mvs_info *mvi = mvs_find_dev_mvi(dev); 1463 + 1464 + if (task->lldd_task) { 1465 + slot = (struct mvs_slot_info *)task->lldd_task; 1466 + slot_no = (u32) (slot - mvi->slot_info); 1467 + mvs_slot_complete(mvi, slot_no, 1); 1468 + } 1469 + } 1470 + } else if (task->task_proto & SAS_PROTOCOL_SATA || 1471 + task->task_proto & SAS_PROTOCOL_STP) { 1472 + /* to do free register_set */ 1473 + } else { 1474 + /* SMP */ 1475 + 1476 + } 1477 + out: 1478 + if (rc != TMF_RESP_FUNC_COMPLETE) 1479 + mv_printk("%s:rc= %d\n", __func__, rc); 1480 + return rc; 1481 + } 1482 + 1483 + int mvs_abort_task_set(struct domain_device *dev, u8 *lun) 1484 + { 1485 + int rc = TMF_RESP_FUNC_FAILED; 1486 + struct mvs_tmf_task tmf_task; 1487 + 1488 + tmf_task.tmf = TMF_ABORT_TASK_SET; 1489 + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1490 + 1491 + return rc; 1492 + } 1493 + 1494 + int mvs_clear_aca(struct domain_device *dev, u8 *lun) 1495 + { 1496 + int rc = TMF_RESP_FUNC_FAILED; 1497 + struct mvs_tmf_task tmf_task; 1498 + 1499 + tmf_task.tmf = TMF_CLEAR_ACA; 1500 + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1501 + 1502 + return rc; 1503 + } 1504 + 1505 + int mvs_clear_task_set(struct domain_device *dev, u8 *lun) 1506 + { 1507 + int rc = TMF_RESP_FUNC_FAILED; 1508 + struct mvs_tmf_task tmf_task; 1509 + 1510 + tmf_task.tmf = TMF_CLEAR_TASK_SET; 1511 + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1512 + 1513 + return rc; 1279 1514 } 1280 1515 1281 1516 static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, 1282 1517 u32 slot_idx, int err) 1283 1518 { 1284 - struct mvs_port *port = mvi->slot_info[slot_idx].port; 1519 + struct mvs_device *mvi_dev = (struct mvs_device *)task->dev->lldd_dev; 1285 1520 struct task_status_struct *tstat = &task->task_status; 1286 1521 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; 1287 1522 int stat = SAM_GOOD; 1288 1523 1524 + 1289 1525 resp->frame_len = sizeof(struct dev_to_host_fis); 1290 1526 memcpy(&resp->ending_fis[0], 1291 - SATA_RECEIVED_D2H_FIS(port->taskfileset), 1527 + SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset), 1292 1528 sizeof(struct dev_to_host_fis)); 1293 1529 tstat->buf_valid_size = sizeof(*resp); 1294 1530 if (unlikely(err)) ··· 1729 1107 u32 slot_idx) 1730 1108 { 1731 1109 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1110 + int stat; 1732 1111 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); 1733 - u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4)); 1734 - int stat = SAM_CHECK_COND; 1112 + u32 tfs = 0; 1113 + enum mvs_port_type type = PORT_TYPE_SAS; 1735 1114 1736 - if (err_dw1 & SLOT_BSY_ERR) { 1737 - stat = SAS_QUEUE_FULL; 1738 - mvs_slot_reset(mvi, task, slot_idx); 1739 - } 1115 + if (err_dw0 & CMD_ISS_STPD) 1116 + MVS_CHIP_DISP->issue_stop(mvi, type, tfs); 1117 + 1118 + MVS_CHIP_DISP->command_active(mvi, slot_idx); 1119 + 1120 + stat = SAM_CHECK_COND; 1740 1121 switch (task->task_proto) { 1741 1122 case SAS_PROTOCOL_SSP: 1123 + stat = SAS_ABORTED_TASK; 1742 1124 break; 1743 1125 case SAS_PROTOCOL_SMP: 1126 + stat = SAM_CHECK_COND; 1744 1127 break; 1128 + 1745 1129 case SAS_PROTOCOL_SATA: 1746 1130 case SAS_PROTOCOL_STP: 1747 1131 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 1748 - if (err_dw0 & TFILE_ERR) 1749 - stat = mvs_sata_done(mvi, task, slot_idx, 1); 1132 + { 1133 + if (err_dw0 == 0x80400002) 1134 + mv_printk("find reserved error, why?\n"); 1135 + 1136 + task->ata_task.use_ncq = 0; 1137 + stat = SAS_PROTO_RESPONSE; 1138 + mvs_sata_done(mvi, task, slot_idx, 1); 1139 + 1140 + } 1750 1141 break; 1751 1142 default: 1752 1143 break; 1753 1144 } 1754 1145 1755 - mvs_hexdump(16, (u8 *) slot->response, 0); 1756 1146 return stat; 1757 1147 } 1758 1148 1759 - static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) 1149 + int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) 1760 1150 { 1761 1151 u32 slot_idx = rx_desc & RXQ_SLOT_MASK; 1762 1152 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1763 1153 struct sas_task *task = slot->task; 1154 + struct mvs_device *mvi_dev = NULL; 1764 1155 struct task_status_struct *tstat; 1765 - struct mvs_port *port; 1156 + 1766 1157 bool aborted; 1767 1158 void *to; 1159 + enum exec_status sts; 1768 1160 1161 + if (mvi->exp_req) 1162 + mvi->exp_req--; 1769 1163 if (unlikely(!task || !task->lldd_task)) 1770 1164 return -1; 1165 + 1166 + tstat = &task->task_status; 1167 + mvi_dev = (struct mvs_device *)task->dev->lldd_dev; 1771 1168 1772 1169 mvs_hba_cq_dump(mvi); 1773 1170 1774 1171 spin_lock(&task->task_state_lock); 1172 + task->task_state_flags &= 1173 + ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1174 + task->task_state_flags |= SAS_TASK_STATE_DONE; 1175 + /* race condition*/ 1775 1176 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; 1776 - if (!aborted) { 1777 - task->task_state_flags &= 1778 - ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1779 - task->task_state_flags |= SAS_TASK_STATE_DONE; 1780 - } 1781 1177 spin_unlock(&task->task_state_lock); 1782 1178 1783 - if (aborted) { 1784 - mvs_slot_task_free(mvi, task, slot, slot_idx); 1785 - mvs_slot_free(mvi, rx_desc); 1786 - return -1; 1787 - } 1788 - 1789 - port = slot->port; 1790 - tstat = &task->task_status; 1791 1179 memset(tstat, 0, sizeof(*tstat)); 1792 1180 tstat->resp = SAS_TASK_COMPLETE; 1793 1181 1794 - if (unlikely(!port->port_attached || flags)) { 1795 - mvs_slot_err(mvi, task, slot_idx); 1796 - if (!sas_protocol_ata(task->task_proto)) 1797 - tstat->stat = SAS_PHY_DOWN; 1182 + if (unlikely(aborted)) { 1183 + tstat->stat = SAS_ABORTED_TASK; 1184 + if (mvi_dev) 1185 + mvi_dev->runing_req--; 1186 + if (sas_protocol_ata(task->task_proto)) 1187 + mvs_free_reg_set(mvi, mvi_dev); 1188 + 1189 + mvs_slot_task_free(mvi, task, slot, slot_idx); 1190 + return -1; 1191 + } 1192 + 1193 + if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) { 1194 + mv_dprintk("port has not device.\n"); 1195 + tstat->stat = SAS_PHY_DOWN; 1798 1196 goto out; 1799 1197 } 1198 + 1199 + /* 1200 + if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) { 1201 + mv_dprintk("Find device[%016llx] RXQ_ERR %X, 1202 + err info:%016llx\n", 1203 + SAS_ADDR(task->dev->sas_addr), 1204 + rx_desc, (u64)(*(u64 *) slot->response)); 1205 + } 1206 + */ 1800 1207 1801 1208 /* error info record present */ 1802 1209 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { ··· 1842 1191 } 1843 1192 /* response frame present */ 1844 1193 else if (rx_desc & RXQ_RSP) { 1845 - struct ssp_response_iu *iu = 1846 - slot->response + sizeof(struct mvs_err_info); 1847 - sas_ssp_task_response(&mvi->pdev->dev, task, iu); 1848 - } 1849 - 1850 - /* should never happen? */ 1851 - else 1194 + struct ssp_response_iu *iu = slot->response + 1195 + sizeof(struct mvs_err_info); 1196 + sas_ssp_task_response(mvi->dev, task, iu); 1197 + } else 1852 1198 tstat->stat = SAM_CHECK_COND; 1853 1199 break; 1854 1200 ··· 1873 1225 } 1874 1226 1875 1227 out: 1228 + if (mvi_dev) 1229 + mvi_dev->runing_req--; 1230 + if (sas_protocol_ata(task->task_proto)) 1231 + mvs_free_reg_set(mvi, mvi_dev); 1232 + 1876 1233 mvs_slot_task_free(mvi, task, slot, slot_idx); 1877 - if (unlikely(tstat->stat != SAS_QUEUE_FULL)) 1878 - mvs_slot_free(mvi, rx_desc); 1234 + sts = tstat->stat; 1879 1235 1880 1236 spin_unlock(&mvi->lock); 1881 - task->task_done(task); 1237 + if (task->task_done) 1238 + task->task_done(task); 1239 + else 1240 + mv_dprintk("why has not task_done.\n"); 1882 1241 spin_lock(&mvi->lock); 1883 - return tstat->stat; 1242 + 1243 + return sts; 1884 1244 } 1885 1245 1886 - static void mvs_release_task(struct mvs_info *mvi, int phy_no) 1246 + void mvs_release_task(struct mvs_info *mvi, 1247 + int phy_no, struct domain_device *dev) 1887 1248 { 1888 - struct list_head *pos, *n; 1889 - struct mvs_slot_info *slot; 1890 - struct mvs_phy *phy = &mvi->phy[phy_no]; 1891 - struct mvs_port *port = phy->port; 1892 - u32 rx_desc; 1249 + int i = 0; u32 slot_idx; 1250 + struct mvs_phy *phy; 1251 + struct mvs_port *port; 1252 + struct mvs_slot_info *slot, *slot2; 1893 1253 1254 + phy = &mvi->phy[phy_no]; 1255 + port = phy->port; 1894 1256 if (!port) 1895 1257 return; 1896 1258 1897 - list_for_each_safe(pos, n, &port->list) { 1898 - slot = container_of(pos, struct mvs_slot_info, list); 1899 - rx_desc = (u32) (slot - mvi->slot_info); 1900 - mvs_slot_complete(mvi, rx_desc, 1); 1259 + list_for_each_entry_safe(slot, slot2, &port->list, entry) { 1260 + struct sas_task *task; 1261 + slot_idx = (u32) (slot - mvi->slot_info); 1262 + task = slot->task; 1263 + 1264 + if (dev && task->dev != dev) 1265 + continue; 1266 + 1267 + mv_printk("Release slot [%x] tag[%x], task [%p]:\n", 1268 + slot_idx, slot->slot_tag, task); 1269 + 1270 + if (task->task_proto & SAS_PROTOCOL_SSP) { 1271 + mv_printk("attached with SSP task CDB["); 1272 + for (i = 0; i < 16; i++) 1273 + mv_printk(" %02x", task->ssp_task.cdb[i]); 1274 + mv_printk(" ]\n"); 1275 + } 1276 + 1277 + mvs_slot_complete(mvi, slot_idx, 1); 1901 1278 } 1902 1279 } 1903 1280 1904 - static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) 1281 + static void mvs_phy_disconnected(struct mvs_phy *phy) 1905 1282 { 1906 - struct pci_dev *pdev = mvi->pdev; 1907 - struct sas_ha_struct *sas_ha = &mvi->sas; 1283 + phy->phy_attached = 0; 1284 + phy->att_dev_info = 0; 1285 + phy->att_dev_sas_addr = 0; 1286 + } 1287 + 1288 + static void mvs_work_queue(struct work_struct *work) 1289 + { 1290 + struct delayed_work *dw = container_of(work, struct delayed_work, work); 1291 + struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); 1292 + struct mvs_info *mvi = mwq->mvi; 1293 + unsigned long flags; 1294 + 1295 + spin_lock_irqsave(&mvi->lock, flags); 1296 + if (mwq->handler & PHY_PLUG_EVENT) { 1297 + u32 phy_no = (unsigned long) mwq->data; 1298 + struct sas_ha_struct *sas_ha = mvi->sas; 1299 + struct mvs_phy *phy = &mvi->phy[phy_no]; 1300 + struct asd_sas_phy *sas_phy = &phy->sas_phy; 1301 + 1302 + if (phy->phy_event & PHY_PLUG_OUT) { 1303 + u32 tmp; 1304 + struct sas_identify_frame *id; 1305 + id = (struct sas_identify_frame *)phy->frame_rcvd; 1306 + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no); 1307 + phy->phy_event &= ~PHY_PLUG_OUT; 1308 + if (!(tmp & PHY_READY_MASK)) { 1309 + sas_phy_disconnected(sas_phy); 1310 + mvs_phy_disconnected(phy); 1311 + sas_ha->notify_phy_event(sas_phy, 1312 + PHYE_LOSS_OF_SIGNAL); 1313 + mv_dprintk("phy%d Removed Device\n", phy_no); 1314 + } else { 1315 + MVS_CHIP_DISP->detect_porttype(mvi, phy_no); 1316 + mvs_update_phyinfo(mvi, phy_no, 1); 1317 + mvs_bytes_dmaed(mvi, phy_no); 1318 + mvs_port_notify_formed(sas_phy, 0); 1319 + mv_dprintk("phy%d Attached Device\n", phy_no); 1320 + } 1321 + } 1322 + } 1323 + list_del(&mwq->entry); 1324 + spin_unlock_irqrestore(&mvi->lock, flags); 1325 + kfree(mwq); 1326 + } 1327 + 1328 + static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler) 1329 + { 1330 + struct mvs_wq *mwq; 1331 + int ret = 0; 1332 + 1333 + mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC); 1334 + if (mwq) { 1335 + mwq->mvi = mvi; 1336 + mwq->data = data; 1337 + mwq->handler = handler; 1338 + MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq); 1339 + list_add_tail(&mwq->entry, &mvi->wq_list); 1340 + schedule_delayed_work(&mwq->work_q, HZ * 2); 1341 + } else 1342 + ret = -ENOMEM; 1343 + 1344 + return ret; 1345 + } 1346 + 1347 + static void mvs_sig_time_out(unsigned long tphy) 1348 + { 1349 + struct mvs_phy *phy = (struct mvs_phy *)tphy; 1350 + struct mvs_info *mvi = phy->mvi; 1351 + u8 phy_no; 1352 + 1353 + for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) { 1354 + if (&mvi->phy[phy_no] == phy) { 1355 + mv_dprintk("Get signature time out, reset phy %d\n", 1356 + phy_no+mvi->id*mvi->chip->n_phy); 1357 + MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1); 1358 + } 1359 + } 1360 + } 1361 + 1362 + static void mvs_sig_remove_timer(struct mvs_phy *phy) 1363 + { 1364 + if (phy->timer.function) 1365 + del_timer(&phy->timer); 1366 + phy->timer.function = NULL; 1367 + } 1368 + 1369 + void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) 1370 + { 1371 + u32 tmp; 1372 + struct sas_ha_struct *sas_ha = mvi->sas; 1908 1373 struct mvs_phy *phy = &mvi->phy[phy_no]; 1909 1374 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1910 1375 1911 - phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no); 1376 + phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); 1377 + mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy, 1378 + MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); 1379 + mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy, 1380 + phy->irq_status); 1381 + 1912 1382 /* 1913 1383 * events is port event now , 1914 1384 * we need check the interrupt status which belongs to per port. 1915 1385 */ 1916 - dev_printk(KERN_DEBUG, &pdev->dev, 1917 - "Port %d Event = %X\n", 1918 - phy_no, phy->irq_status); 1919 1386 1920 - if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { 1921 - mvs_release_task(mvi, phy_no); 1922 - if (!mvs_is_phy_ready(mvi, phy_no)) { 1923 - sas_phy_disconnected(sas_phy); 1924 - sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); 1925 - dev_printk(KERN_INFO, &pdev->dev, 1926 - "Port %d Unplug Notice\n", phy_no); 1387 + if (phy->irq_status & PHYEV_DCDR_ERR) 1388 + mv_dprintk("port %d STP decoding error.\n", 1389 + phy_no+mvi->id*mvi->chip->n_phy); 1927 1390 1928 - } else 1929 - mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); 1930 - } 1931 - if (!(phy->irq_status & PHYEV_DEC_ERR)) { 1932 - if (phy->irq_status & PHYEV_COMWAKE) { 1933 - u32 tmp = mvs_read_port_irq_mask(mvi, phy_no); 1934 - mvs_write_port_irq_mask(mvi, phy_no, 1935 - tmp | PHYEV_SIG_FIS); 1936 - } 1937 - if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { 1938 - phy->phy_status = mvs_is_phy_ready(mvi, phy_no); 1939 - if (phy->phy_status) { 1940 - mvs_detect_porttype(mvi, phy_no); 1941 - 1942 - if (phy->phy_type & PORT_TYPE_SATA) { 1943 - u32 tmp = mvs_read_port_irq_mask(mvi, 1944 - phy_no); 1945 - tmp &= ~PHYEV_SIG_FIS; 1946 - mvs_write_port_irq_mask(mvi, 1947 - phy_no, tmp); 1948 - } 1949 - 1950 - mvs_update_phyinfo(mvi, phy_no, 0); 1951 - sas_ha->notify_phy_event(sas_phy, 1952 - PHYE_OOB_DONE); 1953 - mvs_bytes_dmaed(mvi, phy_no); 1954 - } else { 1955 - dev_printk(KERN_DEBUG, &pdev->dev, 1956 - "plugin interrupt but phy is gone\n"); 1957 - mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, 1958 - NULL); 1391 + if (phy->irq_status & PHYEV_POOF) { 1392 + if (!(phy->phy_event & PHY_PLUG_OUT)) { 1393 + int dev_sata = phy->phy_type & PORT_TYPE_SATA; 1394 + int ready; 1395 + mvs_release_task(mvi, phy_no, NULL); 1396 + phy->phy_event |= PHY_PLUG_OUT; 1397 + mvs_handle_event(mvi, 1398 + (void *)(unsigned long)phy_no, 1399 + PHY_PLUG_EVENT); 1400 + ready = mvs_is_phy_ready(mvi, phy_no); 1401 + if (!ready) 1402 + mv_dprintk("phy%d Unplug Notice\n", 1403 + phy_no + 1404 + mvi->id * mvi->chip->n_phy); 1405 + if (ready || dev_sata) { 1406 + if (MVS_CHIP_DISP->stp_reset) 1407 + MVS_CHIP_DISP->stp_reset(mvi, 1408 + phy_no); 1409 + else 1410 + MVS_CHIP_DISP->phy_reset(mvi, 1411 + phy_no, 0); 1412 + return; 1959 1413 } 1960 - } else if (phy->irq_status & PHYEV_BROAD_CH) { 1961 - mvs_release_task(mvi, phy_no); 1962 - sas_ha->notify_port_event(sas_phy, 1963 - PORTE_BROADCAST_RCVD); 1964 1414 } 1965 1415 } 1966 - mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status); 1416 + 1417 + if (phy->irq_status & PHYEV_COMWAKE) { 1418 + tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no); 1419 + MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, 1420 + tmp | PHYEV_SIG_FIS); 1421 + if (phy->timer.function == NULL) { 1422 + phy->timer.data = (unsigned long)phy; 1423 + phy->timer.function = mvs_sig_time_out; 1424 + phy->timer.expires = jiffies + 10*HZ; 1425 + add_timer(&phy->timer); 1426 + } 1427 + } 1428 + if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { 1429 + phy->phy_status = mvs_is_phy_ready(mvi, phy_no); 1430 + mvs_sig_remove_timer(phy); 1431 + mv_dprintk("notify plug in on phy[%d]\n", phy_no); 1432 + if (phy->phy_status) { 1433 + mdelay(10); 1434 + MVS_CHIP_DISP->detect_porttype(mvi, phy_no); 1435 + if (phy->phy_type & PORT_TYPE_SATA) { 1436 + tmp = MVS_CHIP_DISP->read_port_irq_mask( 1437 + mvi, phy_no); 1438 + tmp &= ~PHYEV_SIG_FIS; 1439 + MVS_CHIP_DISP->write_port_irq_mask(mvi, 1440 + phy_no, tmp); 1441 + } 1442 + mvs_update_phyinfo(mvi, phy_no, 0); 1443 + mvs_bytes_dmaed(mvi, phy_no); 1444 + /* whether driver is going to handle hot plug */ 1445 + if (phy->phy_event & PHY_PLUG_OUT) { 1446 + mvs_port_notify_formed(sas_phy, 0); 1447 + phy->phy_event &= ~PHY_PLUG_OUT; 1448 + } 1449 + } else { 1450 + mv_dprintk("plugin interrupt but phy%d is gone\n", 1451 + phy_no + mvi->id*mvi->chip->n_phy); 1452 + } 1453 + } else if (phy->irq_status & PHYEV_BROAD_CH) { 1454 + mv_dprintk("port %d broadcast change.\n", 1455 + phy_no + mvi->id*mvi->chip->n_phy); 1456 + /* exception for Samsung disk drive*/ 1457 + mdelay(1000); 1458 + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); 1459 + } 1460 + MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status); 1967 1461 } 1968 1462 1969 - static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) 1463 + int mvs_int_rx(struct mvs_info *mvi, bool self_clear) 1970 1464 { 1971 - void __iomem *regs = mvi->regs; 1972 1465 u32 rx_prod_idx, rx_desc; 1973 1466 bool attn = false; 1974 - struct pci_dev *pdev = mvi->pdev; 1975 1467 1976 1468 /* the first dword in the RX ring is special: it contains 1977 1469 * a mirror of the hardware's RX producer index, so that ··· 2127 1339 * note: if coalescing is enabled, 2128 1340 * it will need to read from register every time for sure 2129 1341 */ 2130 - if (mvi->rx_cons == rx_prod_idx) 2131 - mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; 1342 + if (unlikely(mvi->rx_cons == rx_prod_idx)) 1343 + mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK; 2132 1344 2133 1345 if (mvi->rx_cons == rx_prod_idx) 2134 1346 return 0; 2135 1347 2136 1348 while (mvi->rx_cons != rx_prod_idx) { 2137 - 2138 1349 /* increment our internal RX consumer pointer */ 2139 1350 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); 2140 - 2141 1351 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); 2142 1352 2143 1353 if (likely(rx_desc & RXQ_DONE)) 2144 1354 mvs_slot_complete(mvi, rx_desc, 0); 2145 1355 if (rx_desc & RXQ_ATTN) { 2146 1356 attn = true; 2147 - dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", 2148 - rx_desc); 2149 1357 } else if (rx_desc & RXQ_ERR) { 2150 1358 if (!(rx_desc & RXQ_DONE)) 2151 1359 mvs_slot_complete(mvi, rx_desc, 0); 2152 - dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", 2153 - rx_desc); 2154 1360 } else if (rx_desc & RXQ_SLOT_RESET) { 2155 - dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n", 2156 - rx_desc); 2157 1361 mvs_slot_free(mvi, rx_desc); 2158 1362 } 2159 1363 } 2160 1364 2161 1365 if (attn && self_clear) 2162 - mvs_int_full(mvi); 2163 - 1366 + MVS_CHIP_DISP->int_full(mvi); 2164 1367 return 0; 2165 - } 2166 - 2167 - #ifndef MVS_DISABLE_NVRAM 2168 - static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data) 2169 - { 2170 - int timeout = 1000; 2171 - 2172 - if (addr & ~SPI_ADDR_MASK) 2173 - return -EINVAL; 2174 - 2175 - writel(addr, regs + SPI_CMD); 2176 - writel(TWSI_RD, regs + SPI_CTL); 2177 - 2178 - while (timeout-- > 0) { 2179 - if (readl(regs + SPI_CTL) & TWSI_RDY) { 2180 - *data = readl(regs + SPI_DATA); 2181 - return 0; 2182 - } 2183 - 2184 - udelay(10); 2185 - } 2186 - 2187 - return -EBUSY; 2188 - } 2189 - 2190 - static int mvs_eep_read_buf(void __iomem *regs, u32 addr, 2191 - void *buf, u32 buflen) 2192 - { 2193 - u32 addr_end, tmp_addr, i, j; 2194 - u32 tmp = 0; 2195 - int rc; 2196 - u8 *tmp8, *buf8 = buf; 2197 - 2198 - addr_end = addr + buflen; 2199 - tmp_addr = ALIGN(addr, 4); 2200 - if (addr > 0xff) 2201 - return -EINVAL; 2202 - 2203 - j = addr & 0x3; 2204 - if (j) { 2205 - rc = mvs_eep_read(regs, tmp_addr, &tmp); 2206 - if (rc) 2207 - return rc; 2208 - 2209 - tmp8 = (u8 *)&tmp; 2210 - for (i = j; i < 4; i++) 2211 - *buf8++ = tmp8[i]; 2212 - 2213 - tmp_addr += 4; 2214 - } 2215 - 2216 - for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) { 2217 - rc = mvs_eep_read(regs, tmp_addr, &tmp); 2218 - if (rc) 2219 - return rc; 2220 - 2221 - memcpy(buf8, &tmp, 4); 2222 - buf8 += 4; 2223 - } 2224 - 2225 - if (tmp_addr < addr_end) { 2226 - rc = mvs_eep_read(regs, tmp_addr, &tmp); 2227 - if (rc) 2228 - return rc; 2229 - 2230 - tmp8 = (u8 *)&tmp; 2231 - j = addr_end - tmp_addr; 2232 - for (i = 0; i < j; i++) 2233 - *buf8++ = tmp8[i]; 2234 - 2235 - tmp_addr += 4; 2236 - } 2237 - 2238 - return 0; 2239 - } 2240 - #endif 2241 - 2242 - int mvs_nvram_read(struct mvs_info *mvi, u32 addr, void *buf, u32 buflen) 2243 - { 2244 - #ifndef MVS_DISABLE_NVRAM 2245 - void __iomem *regs = mvi->regs; 2246 - int rc, i; 2247 - u32 sum; 2248 - u8 hdr[2], *tmp; 2249 - const char *msg; 2250 - 2251 - rc = mvs_eep_read_buf(regs, addr, &hdr, 2); 2252 - if (rc) { 2253 - msg = "nvram hdr read failed"; 2254 - goto err_out; 2255 - } 2256 - rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen); 2257 - if (rc) { 2258 - msg = "nvram read failed"; 2259 - goto err_out; 2260 - } 2261 - 2262 - if (hdr[0] != 0x5A) { 2263 - /* entry id */ 2264 - msg = "invalid nvram entry id"; 2265 - rc = -ENOENT; 2266 - goto err_out; 2267 - } 2268 - 2269 - tmp = buf; 2270 - sum = ((u32)hdr[0]) + ((u32)hdr[1]); 2271 - for (i = 0; i < buflen; i++) 2272 - sum += ((u32)tmp[i]); 2273 - 2274 - if (sum) { 2275 - msg = "nvram checksum failure"; 2276 - rc = -EILSEQ; 2277 - goto err_out; 2278 - } 2279 - 2280 - return 0; 2281 - 2282 - err_out: 2283 - dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg); 2284 - return rc; 2285 - #else 2286 - /* FIXME , For SAS target mode */ 2287 - memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8); 2288 - return 0; 2289 - #endif 2290 - } 2291 - 2292 - static void mvs_int_sata(struct mvs_info *mvi) 2293 - { 2294 - u32 tmp; 2295 - void __iomem *regs = mvi->regs; 2296 - tmp = mr32(INT_STAT_SRS); 2297 - mw32(INT_STAT_SRS, tmp & 0xFFFF); 2298 - } 2299 - 2300 - static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, 2301 - u32 slot_idx) 2302 - { 2303 - void __iomem *regs = mvi->regs; 2304 - struct domain_device *dev = task->dev; 2305 - struct asd_sas_port *sas_port = dev->port; 2306 - struct mvs_port *port = mvi->slot_info[slot_idx].port; 2307 - u32 reg_set, phy_mask; 2308 - 2309 - if (!sas_protocol_ata(task->task_proto)) { 2310 - reg_set = 0; 2311 - phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : 2312 - sas_port->phy_mask; 2313 - } else { 2314 - reg_set = port->taskfileset; 2315 - phy_mask = sas_port->phy_mask; 2316 - } 2317 - mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx | 2318 - (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) | 2319 - (phy_mask << TXQ_PHY_SHIFT) | 2320 - (reg_set << TXQ_SRS_SHIFT)); 2321 - 2322 - mw32(TX_PROD_IDX, mvi->tx_prod); 2323 - mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); 2324 - } 2325 - 2326 - void mvs_int_full(struct mvs_info *mvi) 2327 - { 2328 - void __iomem *regs = mvi->regs; 2329 - u32 tmp, stat; 2330 - int i; 2331 - 2332 - stat = mr32(INT_STAT); 2333 - 2334 - mvs_int_rx(mvi, false); 2335 - 2336 - for (i = 0; i < MVS_MAX_PORTS; i++) { 2337 - tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); 2338 - if (tmp) 2339 - mvs_int_port(mvi, i, tmp); 2340 - } 2341 - 2342 - if (stat & CINT_SRS) 2343 - mvs_int_sata(mvi); 2344 - 2345 - mw32(INT_STAT, stat); 2346 - } 2347 - 2348 - #ifndef MVS_DISABLE_MSI 2349 - static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) 2350 - { 2351 - struct mvs_info *mvi = opaque; 2352 - 2353 - #ifndef MVS_USE_TASKLET 2354 - spin_lock(&mvi->lock); 2355 - 2356 - mvs_int_rx(mvi, true); 2357 - 2358 - spin_unlock(&mvi->lock); 2359 - #else 2360 - tasklet_schedule(&mvi->tasklet); 2361 - #endif 2362 - return IRQ_HANDLED; 2363 - } 2364 - #endif 2365 - 2366 - int mvs_task_abort(struct sas_task *task) 2367 - { 2368 - int rc; 2369 - unsigned long flags; 2370 - struct mvs_info *mvi = task->dev->port->ha->lldd_ha; 2371 - struct pci_dev *pdev = mvi->pdev; 2372 - int tag; 2373 - 2374 - spin_lock_irqsave(&task->task_state_lock, flags); 2375 - if (task->task_state_flags & SAS_TASK_STATE_DONE) { 2376 - rc = TMF_RESP_FUNC_COMPLETE; 2377 - spin_unlock_irqrestore(&task->task_state_lock, flags); 2378 - goto out_done; 2379 - } 2380 - spin_unlock_irqrestore(&task->task_state_lock, flags); 2381 - 2382 - switch (task->task_proto) { 2383 - case SAS_PROTOCOL_SMP: 2384 - dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n"); 2385 - break; 2386 - case SAS_PROTOCOL_SSP: 2387 - dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n"); 2388 - break; 2389 - case SAS_PROTOCOL_SATA: 2390 - case SAS_PROTOCOL_STP: 2391 - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{ 2392 - dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n"); 2393 - #if _MV_DUMP 2394 - dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n"); 2395 - mvs_hexdump(sizeof(struct host_to_dev_fis), 2396 - (void *)&task->ata_task.fis, 0); 2397 - dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n"); 2398 - mvs_hexdump(16, task->ata_task.atapi_packet, 0); 2399 - #endif 2400 - spin_lock_irqsave(&task->task_state_lock, flags); 2401 - if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { 2402 - /* TODO */ 2403 - ; 2404 - } 2405 - spin_unlock_irqrestore(&task->task_state_lock, flags); 2406 - break; 2407 - } 2408 - default: 2409 - break; 2410 - } 2411 - 2412 - if (mvs_find_tag(mvi, task, &tag)) { 2413 - spin_lock_irqsave(&mvi->lock, flags); 2414 - mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag); 2415 - spin_unlock_irqrestore(&mvi->lock, flags); 2416 - } 2417 - if (!mvs_task_exec(task, 1, GFP_ATOMIC)) 2418 - rc = TMF_RESP_FUNC_COMPLETE; 2419 - else 2420 - rc = TMF_RESP_FUNC_FAILED; 2421 - out_done: 2422 - return rc; 2423 - } 2424 - 2425 - int __devinit mvs_hw_init(struct mvs_info *mvi) 2426 - { 2427 - void __iomem *regs = mvi->regs; 2428 - int i; 2429 - u32 tmp, cctl; 2430 - 2431 - /* make sure interrupts are masked immediately (paranoia) */ 2432 - mw32(GBL_CTL, 0); 2433 - tmp = mr32(GBL_CTL); 2434 - 2435 - /* Reset Controller */ 2436 - if (!(tmp & HBA_RST)) { 2437 - if (mvi->flags & MVF_PHY_PWR_FIX) { 2438 - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); 2439 - tmp &= ~PCTL_PWR_ON; 2440 - tmp |= PCTL_OFF; 2441 - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); 2442 - 2443 - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); 2444 - tmp &= ~PCTL_PWR_ON; 2445 - tmp |= PCTL_OFF; 2446 - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); 2447 - } 2448 - 2449 - /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ 2450 - mw32_f(GBL_CTL, HBA_RST); 2451 - } 2452 - 2453 - /* wait for reset to finish; timeout is just a guess */ 2454 - i = 1000; 2455 - while (i-- > 0) { 2456 - msleep(10); 2457 - 2458 - if (!(mr32(GBL_CTL) & HBA_RST)) 2459 - break; 2460 - } 2461 - if (mr32(GBL_CTL) & HBA_RST) { 2462 - dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n"); 2463 - return -EBUSY; 2464 - } 2465 - 2466 - /* Init Chip */ 2467 - /* make sure RST is set; HBA_RST /should/ have done that for us */ 2468 - cctl = mr32(CTL); 2469 - if (cctl & CCTL_RST) 2470 - cctl &= ~CCTL_RST; 2471 - else 2472 - mw32_f(CTL, cctl | CCTL_RST); 2473 - 2474 - /* write to device control _AND_ device status register? - A.C. */ 2475 - pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); 2476 - tmp &= ~PRD_REQ_MASK; 2477 - tmp |= PRD_REQ_SIZE; 2478 - pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); 2479 - 2480 - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); 2481 - tmp |= PCTL_PWR_ON; 2482 - tmp &= ~PCTL_OFF; 2483 - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); 2484 - 2485 - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); 2486 - tmp |= PCTL_PWR_ON; 2487 - tmp &= ~PCTL_OFF; 2488 - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); 2489 - 2490 - mw32_f(CTL, cctl); 2491 - 2492 - /* reset control */ 2493 - mw32(PCS, 0); /*MVS_PCS */ 2494 - 2495 - mvs_phy_hacks(mvi); 2496 - 2497 - mw32(CMD_LIST_LO, mvi->slot_dma); 2498 - mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); 2499 - 2500 - mw32(RX_FIS_LO, mvi->rx_fis_dma); 2501 - mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); 2502 - 2503 - mw32(TX_CFG, MVS_CHIP_SLOT_SZ); 2504 - mw32(TX_LO, mvi->tx_dma); 2505 - mw32(TX_HI, (mvi->tx_dma >> 16) >> 16); 2506 - 2507 - mw32(RX_CFG, MVS_RX_RING_SZ); 2508 - mw32(RX_LO, mvi->rx_dma); 2509 - mw32(RX_HI, (mvi->rx_dma >> 16) >> 16); 2510 - 2511 - /* enable auto port detection */ 2512 - mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN); 2513 - msleep(1100); 2514 - /* init and reset phys */ 2515 - for (i = 0; i < mvi->chip->n_phy; i++) { 2516 - u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]); 2517 - u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]); 2518 - 2519 - mvs_detect_porttype(mvi, i); 2520 - 2521 - /* set phy local SAS address */ 2522 - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); 2523 - mvs_write_port_cfg_data(mvi, i, lo); 2524 - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); 2525 - mvs_write_port_cfg_data(mvi, i, hi); 2526 - 2527 - /* reset phy */ 2528 - tmp = mvs_read_phy_ctl(mvi, i); 2529 - tmp |= PHY_RST; 2530 - mvs_write_phy_ctl(mvi, i, tmp); 2531 - } 2532 - 2533 - msleep(100); 2534 - 2535 - for (i = 0; i < mvi->chip->n_phy; i++) { 2536 - /* clear phy int status */ 2537 - tmp = mvs_read_port_irq_stat(mvi, i); 2538 - tmp &= ~PHYEV_SIG_FIS; 2539 - mvs_write_port_irq_stat(mvi, i, tmp); 2540 - 2541 - /* set phy int mask */ 2542 - tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | 2543 - PHYEV_ID_DONE | PHYEV_DEC_ERR; 2544 - mvs_write_port_irq_mask(mvi, i, tmp); 2545 - 2546 - msleep(100); 2547 - mvs_update_phyinfo(mvi, i, 1); 2548 - mvs_enable_xmt(mvi, i); 2549 - } 2550 - 2551 - /* FIXME: update wide port bitmaps */ 2552 - 2553 - /* little endian for open address and command table, etc. */ 2554 - /* A.C. 2555 - * it seems that ( from the spec ) turning on big-endian won't 2556 - * do us any good on big-endian machines, need further confirmation 2557 - */ 2558 - cctl = mr32(CTL); 2559 - cctl |= CCTL_ENDIAN_CMD; 2560 - cctl |= CCTL_ENDIAN_DATA; 2561 - cctl &= ~CCTL_ENDIAN_OPEN; 2562 - cctl |= CCTL_ENDIAN_RSP; 2563 - mw32_f(CTL, cctl); 2564 - 2565 - /* reset CMD queue */ 2566 - tmp = mr32(PCS); 2567 - tmp |= PCS_CMD_RST; 2568 - mw32(PCS, tmp); 2569 - /* interrupt coalescing may cause missing HW interrput in some case, 2570 - * and the max count is 0x1ff, while our max slot is 0x200, 2571 - * it will make count 0. 2572 - */ 2573 - tmp = 0; 2574 - mw32(INT_COAL, tmp); 2575 - 2576 - tmp = 0x100; 2577 - mw32(INT_COAL_TMOUT, tmp); 2578 - 2579 - /* ladies and gentlemen, start your engines */ 2580 - mw32(TX_CFG, 0); 2581 - mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); 2582 - mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN); 2583 - /* enable CMD/CMPL_Q/RESP mode */ 2584 - mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN); 2585 - 2586 - /* enable completion queue interrupt */ 2587 - tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS); 2588 - mw32(INT_MASK, tmp); 2589 - 2590 - /* Enable SRS interrupt */ 2591 - mw32(INT_MASK_SRS, 0xFF); 2592 - return 0; 2593 - } 2594 - 2595 - void __devinit mvs_print_info(struct mvs_info *mvi) 2596 - { 2597 - struct pci_dev *pdev = mvi->pdev; 2598 - static int printed_version; 2599 - 2600 - if (!printed_version++) 2601 - dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 2602 - 2603 - dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n", 2604 - mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr)); 2605 1368 } 2606 1369
+266 -65
drivers/scsi/mvsas/mv_sas.h
··· 1 1 /* 2 - mv_sas.h - Marvell 88SE6440 SAS/SATA support 3 - 4 - Copyright 2007 Red Hat, Inc. 5 - Copyright 2008 Marvell. <kewei@marvell.com> 6 - 7 - This program is free software; you can redistribute it and/or 8 - modify it under the terms of the GNU General Public License as 9 - published by the Free Software Foundation; either version 2, 10 - or (at your option) any later version. 11 - 12 - This program is distributed in the hope that it will be useful, 13 - but WITHOUT ANY WARRANTY; without even the implied warranty 14 - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 15 - See the GNU General Public License for more details. 16 - 17 - You should have received a copy of the GNU General Public 18 - License along with this program; see the file COPYING. If not, 19 - write to the Free Software Foundation, 675 Mass Ave, Cambridge, 20 - MA 02139, USA. 21 - 22 - */ 2 + * Marvell 88SE64xx/88SE94xx main function head file 3 + * 4 + * Copyright 2007 Red Hat, Inc. 5 + * Copyright 2008 Marvell. <kewei@marvell.com> 6 + * 7 + * This file is licensed under GPLv2. 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public License as 11 + * published by the Free Software Foundation; version 2 of the 12 + * License. 13 + * 14 + * This program is distributed in the hope that it will be useful, 15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 + * General Public License for more details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this program; if not, write to the Free Software 21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 22 + * USA 23 + */ 23 24 24 25 #ifndef _MV_SAS_H_ 25 26 #define _MV_SAS_H_ ··· 43 42 #include <linux/version.h> 44 43 #include "mv_defs.h" 45 44 46 - #define DRV_NAME "mvsas" 47 - #define DRV_VERSION "0.5.2" 48 - #define _MV_DUMP 0 49 - #define MVS_DISABLE_NVRAM 50 - #define MVS_DISABLE_MSI 51 - 45 + #define DRV_NAME "mvsas" 46 + #define DRV_VERSION "0.8.2" 47 + #define _MV_DUMP 0 52 48 #define MVS_ID_NOT_MAPPED 0x7f 53 - #define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) 49 + /* #define DISABLE_HOTPLUG_DMA_FIX */ 50 + #define MAX_EXP_RUNNING_REQ 2 51 + #define WIDE_PORT_MAX_PHY 4 52 + #define MV_DISABLE_NCQ 0 53 + #define mv_printk(fmt, arg ...) \ 54 + printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg) 55 + #ifdef MV_DEBUG 56 + #define mv_dprintk(format, arg...) \ 57 + printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg) 58 + #else 59 + #define mv_dprintk(format, arg...) 60 + #endif 61 + #define MV_MAX_U32 0xffffffff 54 62 55 - #define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \ 56 - for ((__mc) = (__lseq_mask), (__lseq) = 0; \ 57 - (__mc) != 0 && __rest; \ 63 + extern struct mvs_tgt_initiator mvs_tgt; 64 + extern struct mvs_info *tgt_mvi; 65 + extern const struct mvs_dispatch mvs_64xx_dispatch; 66 + extern const struct mvs_dispatch mvs_94xx_dispatch; 67 + 68 + #define DEV_IS_EXPANDER(type) \ 69 + ((type == EDGE_DEV) || (type == FANOUT_DEV)) 70 + 71 + #define bit(n) ((u32)1 << n) 72 + 73 + #define for_each_phy(__lseq_mask, __mc, __lseq) \ 74 + for ((__mc) = (__lseq_mask), (__lseq) = 0; \ 75 + (__mc) != 0 ; \ 58 76 (++__lseq), (__mc) >>= 1) 59 77 60 - struct mvs_chip_info { 61 - u32 n_phy; 62 - u32 srs_sz; 63 - u32 slot_width; 78 + #define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f) 79 + #define UNASSOC_D2H_FIS(id) \ 80 + ((void *) mvi->rx_fis + 0x100 * id) 81 + #define SATA_RECEIVED_FIS_LIST(reg_set) \ 82 + ((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set) 83 + #define SATA_RECEIVED_SDB_FIS(reg_set) \ 84 + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x58) 85 + #define SATA_RECEIVED_D2H_FIS(reg_set) \ 86 + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x40) 87 + #define SATA_RECEIVED_PIO_FIS(reg_set) \ 88 + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x20) 89 + #define SATA_RECEIVED_DMA_FIS(reg_set) \ 90 + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x00) 91 + 92 + enum dev_status { 93 + MVS_DEV_NORMAL = 0x0, 94 + MVS_DEV_EH = 0x1, 64 95 }; 96 + 97 + 98 + struct mvs_info; 99 + 100 + struct mvs_dispatch { 101 + char *name; 102 + int (*chip_init)(struct mvs_info *mvi); 103 + int (*spi_init)(struct mvs_info *mvi); 104 + int (*chip_ioremap)(struct mvs_info *mvi); 105 + void (*chip_iounmap)(struct mvs_info *mvi); 106 + irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat); 107 + u32 (*isr_status)(struct mvs_info *mvi, int irq); 108 + void (*interrupt_enable)(struct mvs_info *mvi); 109 + void (*interrupt_disable)(struct mvs_info *mvi); 110 + 111 + u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port); 112 + void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val); 113 + 114 + u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port); 115 + void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val); 116 + void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr); 117 + 118 + u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port); 119 + void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val); 120 + void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr); 121 + 122 + u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port); 123 + void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val); 124 + 125 + u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port); 126 + void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val); 127 + 128 + void (*get_sas_addr)(void *buf, u32 buflen); 129 + void (*command_active)(struct mvs_info *mvi, u32 slot_idx); 130 + void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type, 131 + u32 tfs); 132 + void (*start_delivery)(struct mvs_info *mvi, u32 tx); 133 + u32 (*rx_update)(struct mvs_info *mvi); 134 + void (*int_full)(struct mvs_info *mvi); 135 + u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs); 136 + void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs); 137 + u32 (*prd_size)(void); 138 + u32 (*prd_count)(void); 139 + void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); 140 + void (*detect_porttype)(struct mvs_info *mvi, int i); 141 + int (*oob_done)(struct mvs_info *mvi, int i); 142 + void (*fix_phy_info)(struct mvs_info *mvi, int i, 143 + struct sas_identify_frame *id); 144 + void (*phy_work_around)(struct mvs_info *mvi, int i); 145 + void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id, 146 + struct sas_phy_linkrates *rates); 147 + u32 (*phy_max_link_rate)(void); 148 + void (*phy_disable)(struct mvs_info *mvi, u32 phy_id); 149 + void (*phy_enable)(struct mvs_info *mvi, u32 phy_id); 150 + void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard); 151 + void (*stp_reset)(struct mvs_info *mvi, u32 phy_id); 152 + void (*clear_active_cmds)(struct mvs_info *mvi); 153 + u32 (*spi_read_data)(struct mvs_info *mvi); 154 + void (*spi_write_data)(struct mvs_info *mvi, u32 data); 155 + int (*spi_buildcmd)(struct mvs_info *mvi, 156 + u32 *dwCmd, 157 + u8 cmd, 158 + u8 read, 159 + u8 length, 160 + u32 addr 161 + ); 162 + int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd); 163 + int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout); 164 + #ifndef DISABLE_HOTPLUG_DMA_FIX 165 + void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd); 166 + #endif 167 + 168 + }; 169 + 170 + struct mvs_chip_info { 171 + u32 n_host; 172 + u32 n_phy; 173 + u32 fis_offs; 174 + u32 fis_count; 175 + u32 srs_sz; 176 + u32 slot_width; 177 + const struct mvs_dispatch *dispatch; 178 + }; 179 + #define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) 180 + #define MVS_RX_FISL_SZ \ 181 + (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100)) 182 + #define MVS_CHIP_DISP (mvi->chip->dispatch) 65 183 66 184 struct mvs_err_info { 67 185 __le32 flags; ··· 192 72 __le32 lens; /* cmd, max resp frame len */ 193 73 __le32 tags; /* targ port xfer tag; tag */ 194 74 __le32 data_len; /* data xfer len */ 195 - __le64 cmd_tbl; /* command table address */ 75 + __le64 cmd_tbl; /* command table address */ 196 76 __le64 open_frame; /* open addr frame address */ 197 77 __le64 status_buf; /* status buffer address */ 198 78 __le64 prd_tbl; /* PRD tbl address */ ··· 202 82 struct mvs_port { 203 83 struct asd_sas_port sas_port; 204 84 u8 port_attached; 205 - u8 taskfileset; 206 85 u8 wide_port_phymap; 207 86 struct list_head list; 208 87 }; 209 88 210 89 struct mvs_phy { 90 + struct mvs_info *mvi; 211 91 struct mvs_port *port; 212 92 struct asd_sas_phy sas_phy; 213 93 struct sas_identify identify; 214 94 struct scsi_device *sdev; 95 + struct timer_list timer; 215 96 u64 dev_sas_addr; 216 97 u64 att_dev_sas_addr; 217 98 u32 att_dev_info; ··· 223 102 u32 frame_rcvd_size; 224 103 u8 frame_rcvd[32]; 225 104 u8 phy_attached; 105 + u8 phy_mode; 106 + u8 reserved[2]; 107 + u32 phy_event; 226 108 enum sas_linkrate minimum_linkrate; 227 109 enum sas_linkrate maximum_linkrate; 228 110 }; 229 111 112 + struct mvs_device { 113 + enum sas_dev_type dev_type; 114 + struct domain_device *sas_device; 115 + u32 attached_phy; 116 + u32 device_id; 117 + u32 runing_req; 118 + u8 taskfileset; 119 + u8 dev_status; 120 + u16 reserved; 121 + struct list_head dev_entry; 122 + }; 123 + 230 124 struct mvs_slot_info { 231 - struct list_head list; 232 - struct sas_task *task; 125 + struct list_head entry; 126 + union { 127 + struct sas_task *task; 128 + void *tdata; 129 + }; 233 130 u32 n_elem; 234 131 u32 tx; 132 + u32 slot_tag; 235 133 236 134 /* DMA buffer for storing cmd tbl, open addr frame, status buffer, 237 135 * and PRD table ··· 260 120 #if _MV_DUMP 261 121 u32 cmd_size; 262 122 #endif 263 - 264 123 void *response; 265 124 struct mvs_port *port; 125 + struct mvs_device *device; 126 + void *open_frame; 266 127 }; 267 128 268 129 struct mvs_info { ··· 274 133 275 134 /* our device */ 276 135 struct pci_dev *pdev; 136 + struct device *dev; 277 137 278 138 /* enhanced mode registers */ 279 139 void __iomem *regs; 280 140 281 - /* peripheral registers */ 282 - void __iomem *peri_regs; 283 - 141 + /* peripheral or soc registers */ 142 + void __iomem *regs_ex; 284 143 u8 sas_addr[SAS_ADDR_SIZE]; 285 144 286 145 /* SCSI/SAS glue */ 287 - struct sas_ha_struct sas; 146 + struct sas_ha_struct *sas; 288 147 struct Scsi_Host *shost; 289 148 290 149 /* TX (delivery) DMA ring */ ··· 295 154 u32 tx_prod; 296 155 297 156 /* RX (completion) DMA ring */ 298 - __le32 *rx; 157 + __le32 *rx; 299 158 dma_addr_t rx_dma; 300 159 301 160 /* RX consumer idx */ ··· 309 168 struct mvs_cmd_hdr *slot; 310 169 dma_addr_t slot_dma; 311 170 171 + u32 chip_id; 312 172 const struct mvs_chip_info *chip; 313 173 314 - u8 tags[MVS_SLOTS]; 315 - struct mvs_slot_info slot_info[MVS_SLOTS]; 316 - /* further per-slot information */ 174 + int tags_num; 175 + u8 tags[MVS_SLOTS >> 3]; 176 + 177 + /* further per-slot information */ 317 178 struct mvs_phy phy[MVS_MAX_PHYS]; 318 179 struct mvs_port port[MVS_MAX_PHYS]; 319 - #ifdef MVS_USE_TASKLET 320 - struct tasklet_struct tasklet; 180 + u32 irq; 181 + u32 exp_req; 182 + u32 id; 183 + u64 sata_reg_set; 184 + struct list_head *hba_list; 185 + struct list_head soc_entry; 186 + struct list_head wq_list; 187 + unsigned long instance; 188 + u16 flashid; 189 + u32 flashsize; 190 + u32 flashsectSize; 191 + 192 + void *addon; 193 + struct mvs_device devices[MVS_MAX_DEVICES]; 194 + #ifndef DISABLE_HOTPLUG_DMA_FIX 195 + void *bulk_buffer; 196 + dma_addr_t bulk_buffer_dma; 197 + #define TRASH_BUCKET_SIZE 0x20000 321 198 #endif 199 + struct mvs_slot_info slot_info[0]; 322 200 }; 323 201 202 + struct mvs_prv_info{ 203 + u8 n_host; 204 + u8 n_phy; 205 + u16 reserve; 206 + struct mvs_info *mvi[2]; 207 + }; 208 + 209 + struct mvs_wq { 210 + struct delayed_work work_q; 211 + struct mvs_info *mvi; 212 + void *data; 213 + int handler; 214 + struct list_head entry; 215 + }; 216 + 217 + struct mvs_task_exec_info { 218 + struct sas_task *task; 219 + struct mvs_cmd_hdr *hdr; 220 + struct mvs_port *port; 221 + u32 tag; 222 + int n_elem; 223 + }; 224 + 225 + 226 + /******************** function prototype *********************/ 227 + void mvs_get_sas_addr(void *buf, u32 buflen); 228 + void mvs_tag_clear(struct mvs_info *mvi, u32 tag); 229 + void mvs_tag_free(struct mvs_info *mvi, u32 tag); 230 + void mvs_tag_set(struct mvs_info *mvi, unsigned int tag); 231 + int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out); 232 + void mvs_tag_init(struct mvs_info *mvi); 233 + void mvs_iounmap(void __iomem *regs); 234 + int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex); 235 + void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard); 324 236 int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 325 237 void *funcdata); 238 + void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id, 239 + u32 off_lo, u32 off_hi, u64 sas_addr); 240 + int mvs_slave_alloc(struct scsi_device *scsi_dev); 326 241 int mvs_slave_configure(struct scsi_device *sdev); 327 242 void mvs_scan_start(struct Scsi_Host *shost); 328 243 int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time); 329 - int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags); 330 - int mvs_task_abort(struct sas_task *task); 244 + int mvs_queue_command(struct sas_task *task, const int num, 245 + gfp_t gfp_flags); 246 + int mvs_abort_task(struct sas_task *task); 247 + int mvs_abort_task_set(struct domain_device *dev, u8 *lun); 248 + int mvs_clear_aca(struct domain_device *dev, u8 *lun); 249 + int mvs_clear_task_set(struct domain_device *dev, u8 * lun); 331 250 void mvs_port_formed(struct asd_sas_phy *sas_phy); 251 + void mvs_port_deformed(struct asd_sas_phy *sas_phy); 252 + int mvs_dev_found(struct domain_device *dev); 253 + void mvs_dev_gone(struct domain_device *dev); 254 + int mvs_lu_reset(struct domain_device *dev, u8 *lun); 255 + int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags); 332 256 int mvs_I_T_nexus_reset(struct domain_device *dev); 333 - void mvs_int_full(struct mvs_info *mvi); 334 - void mvs_tag_init(struct mvs_info *mvi); 335 - int mvs_nvram_read(struct mvs_info *mvi, u32 addr, void *buf, u32 buflen); 336 - int __devinit mvs_hw_init(struct mvs_info *mvi); 337 - void __devinit mvs_print_info(struct mvs_info *mvi); 338 - void mvs_hba_interrupt_enable(struct mvs_info *mvi); 339 - void mvs_hba_interrupt_disable(struct mvs_info *mvi); 340 - void mvs_detect_porttype(struct mvs_info *mvi, int i); 341 - u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port); 342 - void mvs_enable_xmt(struct mvs_info *mvi, int PhyId); 343 - void __devinit mvs_phy_hacks(struct mvs_info *mvi); 344 - void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port); 345 - 257 + int mvs_query_task(struct sas_task *task); 258 + void mvs_release_task(struct mvs_info *mvi, int phy_no, 259 + struct domain_device *dev); 260 + void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events); 261 + void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); 262 + int mvs_int_rx(struct mvs_info *mvi, bool self_clear); 263 + void mvs_hexdump(u32 size, u8 *data, u32 baseaddr); 346 264 #endif 265 +