at v2.6.25-rc2 4467 lines 118 kB view raw
1/* 2 * New driver for Marvell Yukon 2 chipset. 3 * Based on earlier sk98lin, and skge driver. 4 * 5 * This driver intentionally does not support all the features 6 * of the original driver such as link fail-over and link management because 7 * those should be done at higher levels. 8 * 9 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 23 */ 24 25#include <linux/crc32.h> 26#include <linux/kernel.h> 27#include <linux/version.h> 28#include <linux/module.h> 29#include <linux/netdevice.h> 30#include <linux/dma-mapping.h> 31#include <linux/etherdevice.h> 32#include <linux/ethtool.h> 33#include <linux/pci.h> 34#include <linux/ip.h> 35#include <net/ip.h> 36#include <linux/tcp.h> 37#include <linux/in.h> 38#include <linux/delay.h> 39#include <linux/workqueue.h> 40#include <linux/if_vlan.h> 41#include <linux/prefetch.h> 42#include <linux/debugfs.h> 43#include <linux/mii.h> 44 45#include <asm/irq.h> 46 47#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 48#define SKY2_VLAN_TAG_USED 1 49#endif 50 51#include "sky2.h" 52 53#define DRV_NAME "sky2" 54#define DRV_VERSION "1.21" 55#define PFX DRV_NAME " " 56 57/* 58 * The Yukon II chipset takes 64 bit command blocks (called list elements) 59 * that are organized into three (receive, transmit, status) different rings 60 * similar to Tigon3. 61 */ 62 63#define RX_LE_SIZE 1024 64#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) 65#define RX_MAX_PENDING (RX_LE_SIZE/6 - 2) 66#define RX_DEF_PENDING RX_MAX_PENDING 67 68#define TX_RING_SIZE 512 69#define TX_DEF_PENDING (TX_RING_SIZE - 1) 70#define TX_MIN_PENDING 64 71#define MAX_SKB_TX_LE (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS) 72 73#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */ 74#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le)) 75#define TX_WATCHDOG (5 * HZ) 76#define NAPI_WEIGHT 64 77#define PHY_RETRIES 1000 78 79#define SKY2_EEPROM_MAGIC 0x9955aabb 80 81 82#define RING_NEXT(x,s) (((x)+1) & ((s)-1)) 83 84static const u32 default_msg = 85 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 86 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR 87 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; 88 89static int debug = -1; /* defaults above */ 90module_param(debug, int, 0); 91MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 92 93static int copybreak __read_mostly = 128; 94module_param(copybreak, int, 0); 95MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 96 97static int disable_msi = 0; 98module_param(disable_msi, int, 0); 99MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); 100 101static const struct pci_device_id sky2_id_table[] = { 102 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ 103 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ 104 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ 105 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */ 106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */ 107 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) }, /* DGE-550T */ 108 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */ 109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */ 110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */ 111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */ 112 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */ 113 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */ 114 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */ 115 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */ 116 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */ 117 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */ 118 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */ 119 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */ 120 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */ 121 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */ 122 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */ 123 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */ 124 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */ 125 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */ 126 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */ 127 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */ 128 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */ 129 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */ 130 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ 131 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ 132 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ 133 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */ 134 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */ 135 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */ 136 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */ 137 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */ 138 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */ 139 { 0 } 140}; 141 142MODULE_DEVICE_TABLE(pci, sky2_id_table); 143 144/* Avoid conditionals by using array */ 145static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; 146static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; 147static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 }; 148 149/* This driver supports yukon2 chipset only */ 150static const char *yukon2_name[] = { 151 "XL", /* 0xb3 */ 152 "EC Ultra", /* 0xb4 */ 153 "Extreme", /* 0xb5 */ 154 "EC", /* 0xb6 */ 155 "FE", /* 0xb7 */ 156 "FE+", /* 0xb8 */ 157}; 158 159static void sky2_set_multicast(struct net_device *dev); 160 161/* Access to PHY via serial interconnect */ 162static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) 163{ 164 int i; 165 166 gma_write16(hw, port, GM_SMI_DATA, val); 167 gma_write16(hw, port, GM_SMI_CTRL, 168 GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg)); 169 170 for (i = 0; i < PHY_RETRIES; i++) { 171 u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL); 172 if (ctrl == 0xffff) 173 goto io_error; 174 175 if (!(ctrl & GM_SMI_CT_BUSY)) 176 return 0; 177 178 udelay(10); 179 } 180 181 dev_warn(&hw->pdev->dev,"%s: phy write timeout\n", hw->dev[port]->name); 182 return -ETIMEDOUT; 183 184io_error: 185 dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); 186 return -EIO; 187} 188 189static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val) 190{ 191 int i; 192 193 gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) 194 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 195 196 for (i = 0; i < PHY_RETRIES; i++) { 197 u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL); 198 if (ctrl == 0xffff) 199 goto io_error; 200 201 if (ctrl & GM_SMI_CT_RD_VAL) { 202 *val = gma_read16(hw, port, GM_SMI_DATA); 203 return 0; 204 } 205 206 udelay(10); 207 } 208 209 dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name); 210 return -ETIMEDOUT; 211io_error: 212 dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); 213 return -EIO; 214} 215 216static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) 217{ 218 u16 v; 219 __gm_phy_read(hw, port, reg, &v); 220 return v; 221} 222 223 224static void sky2_power_on(struct sky2_hw *hw) 225{ 226 /* switch power to VCC (WA for VAUX problem) */ 227 sky2_write8(hw, B0_POWER_CTRL, 228 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 229 230 /* disable Core Clock Division, */ 231 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 232 233 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 234 /* enable bits are inverted */ 235 sky2_write8(hw, B2_Y2_CLK_GATE, 236 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 237 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 238 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); 239 else 240 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 241 242 if (hw->flags & SKY2_HW_ADV_POWER_CTL) { 243 u32 reg; 244 245 sky2_pci_write32(hw, PCI_DEV_REG3, 0); 246 247 reg = sky2_pci_read32(hw, PCI_DEV_REG4); 248 /* set all bits to 0 except bits 15..12 and 8 */ 249 reg &= P_ASPM_CONTROL_MSK; 250 sky2_pci_write32(hw, PCI_DEV_REG4, reg); 251 252 reg = sky2_pci_read32(hw, PCI_DEV_REG5); 253 /* set all bits to 0 except bits 28 & 27 */ 254 reg &= P_CTL_TIM_VMAIN_AV_MSK; 255 sky2_pci_write32(hw, PCI_DEV_REG5, reg); 256 257 sky2_pci_write32(hw, PCI_CFG_REG_1, 0); 258 259 /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */ 260 reg = sky2_read32(hw, B2_GP_IO); 261 reg |= GLB_GPIO_STAT_RACE_DIS; 262 sky2_write32(hw, B2_GP_IO, reg); 263 264 sky2_read32(hw, B2_GP_IO); 265 } 266} 267 268static void sky2_power_aux(struct sky2_hw *hw) 269{ 270 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 271 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 272 else 273 /* enable bits are inverted */ 274 sky2_write8(hw, B2_Y2_CLK_GATE, 275 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 276 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 277 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); 278 279 /* switch power to VAUX */ 280 if (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) 281 sky2_write8(hw, B0_POWER_CTRL, 282 (PC_VAUX_ENA | PC_VCC_ENA | 283 PC_VAUX_ON | PC_VCC_OFF)); 284} 285 286static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) 287{ 288 u16 reg; 289 290 /* disable all GMAC IRQ's */ 291 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); 292 293 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ 294 gma_write16(hw, port, GM_MC_ADDR_H2, 0); 295 gma_write16(hw, port, GM_MC_ADDR_H3, 0); 296 gma_write16(hw, port, GM_MC_ADDR_H4, 0); 297 298 reg = gma_read16(hw, port, GM_RX_CTRL); 299 reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA; 300 gma_write16(hw, port, GM_RX_CTRL, reg); 301} 302 303/* flow control to advertise bits */ 304static const u16 copper_fc_adv[] = { 305 [FC_NONE] = 0, 306 [FC_TX] = PHY_M_AN_ASP, 307 [FC_RX] = PHY_M_AN_PC, 308 [FC_BOTH] = PHY_M_AN_PC | PHY_M_AN_ASP, 309}; 310 311/* flow control to advertise bits when using 1000BaseX */ 312static const u16 fiber_fc_adv[] = { 313 [FC_NONE] = PHY_M_P_NO_PAUSE_X, 314 [FC_TX] = PHY_M_P_ASYM_MD_X, 315 [FC_RX] = PHY_M_P_SYM_MD_X, 316 [FC_BOTH] = PHY_M_P_BOTH_MD_X, 317}; 318 319/* flow control to GMA disable bits */ 320static const u16 gm_fc_disable[] = { 321 [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS, 322 [FC_TX] = GM_GPCR_FC_RX_DIS, 323 [FC_RX] = GM_GPCR_FC_TX_DIS, 324 [FC_BOTH] = 0, 325}; 326 327 328static void sky2_phy_init(struct sky2_hw *hw, unsigned port) 329{ 330 struct sky2_port *sky2 = netdev_priv(hw->dev[port]); 331 u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg; 332 333 if (sky2->autoneg == AUTONEG_ENABLE && 334 !(hw->flags & SKY2_HW_NEWER_PHY)) { 335 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 336 337 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | 338 PHY_M_EC_MAC_S_MSK); 339 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); 340 341 /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */ 342 if (hw->chip_id == CHIP_ID_YUKON_EC) 343 /* set downshift counter to 3x and enable downshift */ 344 ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA; 345 else 346 /* set master & slave downshift counter to 1x */ 347 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); 348 349 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); 350 } 351 352 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); 353 if (sky2_is_copper(hw)) { 354 if (!(hw->flags & SKY2_HW_GIGABIT)) { 355 /* enable automatic crossover */ 356 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1; 357 358 if (hw->chip_id == CHIP_ID_YUKON_FE_P && 359 hw->chip_rev == CHIP_REV_YU_FE2_A0) { 360 u16 spec; 361 362 /* Enable Class A driver for FE+ A0 */ 363 spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2); 364 spec |= PHY_M_FESC_SEL_CL_A; 365 gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); 366 } 367 } else { 368 /* disable energy detect */ 369 ctrl &= ~PHY_M_PC_EN_DET_MSK; 370 371 /* enable automatic crossover */ 372 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); 373 374 /* downshift on PHY 88E1112 and 88E1149 is changed */ 375 if (sky2->autoneg == AUTONEG_ENABLE 376 && (hw->flags & SKY2_HW_NEWER_PHY)) { 377 /* set downshift counter to 3x and enable downshift */ 378 ctrl &= ~PHY_M_PC_DSC_MSK; 379 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; 380 } 381 } 382 } else { 383 /* workaround for deviation #4.88 (CRC errors) */ 384 /* disable Automatic Crossover */ 385 386 ctrl &= ~PHY_M_PC_MDIX_MSK; 387 } 388 389 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); 390 391 /* special setup for PHY 88E1112 Fiber */ 392 if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) { 393 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 394 395 /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */ 396 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); 397 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); 398 ctrl &= ~PHY_M_MAC_MD_MSK; 399 ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX); 400 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); 401 402 if (hw->pmd_type == 'P') { 403 /* select page 1 to access Fiber registers */ 404 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1); 405 406 /* for SFP-module set SIGDET polarity to low */ 407 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); 408 ctrl |= PHY_M_FIB_SIGD_POL; 409 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); 410 } 411 412 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 413 } 414 415 ctrl = PHY_CT_RESET; 416 ct1000 = 0; 417 adv = PHY_AN_CSMA; 418 reg = 0; 419 420 if (sky2->autoneg == AUTONEG_ENABLE) { 421 if (sky2_is_copper(hw)) { 422 if (sky2->advertising & ADVERTISED_1000baseT_Full) 423 ct1000 |= PHY_M_1000C_AFD; 424 if (sky2->advertising & ADVERTISED_1000baseT_Half) 425 ct1000 |= PHY_M_1000C_AHD; 426 if (sky2->advertising & ADVERTISED_100baseT_Full) 427 adv |= PHY_M_AN_100_FD; 428 if (sky2->advertising & ADVERTISED_100baseT_Half) 429 adv |= PHY_M_AN_100_HD; 430 if (sky2->advertising & ADVERTISED_10baseT_Full) 431 adv |= PHY_M_AN_10_FD; 432 if (sky2->advertising & ADVERTISED_10baseT_Half) 433 adv |= PHY_M_AN_10_HD; 434 435 adv |= copper_fc_adv[sky2->flow_mode]; 436 } else { /* special defines for FIBER (88E1040S only) */ 437 if (sky2->advertising & ADVERTISED_1000baseT_Full) 438 adv |= PHY_M_AN_1000X_AFD; 439 if (sky2->advertising & ADVERTISED_1000baseT_Half) 440 adv |= PHY_M_AN_1000X_AHD; 441 442 adv |= fiber_fc_adv[sky2->flow_mode]; 443 } 444 445 /* Restart Auto-negotiation */ 446 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; 447 } else { 448 /* forced speed/duplex settings */ 449 ct1000 = PHY_M_1000C_MSE; 450 451 /* Disable auto update for duplex flow control and speed */ 452 reg |= GM_GPCR_AU_ALL_DIS; 453 454 switch (sky2->speed) { 455 case SPEED_1000: 456 ctrl |= PHY_CT_SP1000; 457 reg |= GM_GPCR_SPEED_1000; 458 break; 459 case SPEED_100: 460 ctrl |= PHY_CT_SP100; 461 reg |= GM_GPCR_SPEED_100; 462 break; 463 } 464 465 if (sky2->duplex == DUPLEX_FULL) { 466 reg |= GM_GPCR_DUP_FULL; 467 ctrl |= PHY_CT_DUP_MD; 468 } else if (sky2->speed < SPEED_1000) 469 sky2->flow_mode = FC_NONE; 470 471 472 reg |= gm_fc_disable[sky2->flow_mode]; 473 474 /* Forward pause packets to GMAC? */ 475 if (sky2->flow_mode & FC_RX) 476 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); 477 else 478 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 479 } 480 481 gma_write16(hw, port, GM_GP_CTRL, reg); 482 483 if (hw->flags & SKY2_HW_GIGABIT) 484 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); 485 486 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); 487 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 488 489 /* Setup Phy LED's */ 490 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS); 491 ledover = 0; 492 493 switch (hw->chip_id) { 494 case CHIP_ID_YUKON_FE: 495 /* on 88E3082 these bits are at 11..9 (shifted left) */ 496 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1; 497 498 ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR); 499 500 /* delete ACT LED control bits */ 501 ctrl &= ~PHY_M_FELP_LED1_MSK; 502 /* change ACT LED control to blink mode */ 503 ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL); 504 gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); 505 break; 506 507 case CHIP_ID_YUKON_FE_P: 508 /* Enable Link Partner Next Page */ 509 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); 510 ctrl |= PHY_M_PC_ENA_LIP_NP; 511 512 /* disable Energy Detect and enable scrambler */ 513 ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB); 514 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); 515 516 /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */ 517 ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) | 518 PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) | 519 PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED); 520 521 gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); 522 break; 523 524 case CHIP_ID_YUKON_XL: 525 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 526 527 /* select page 3 to access LED control register */ 528 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 529 530 /* set LED Function Control register */ 531 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, 532 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ 533 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ 534 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ 535 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */ 536 537 /* set Polarity Control register */ 538 gm_phy_write(hw, port, PHY_MARV_PHY_STAT, 539 (PHY_M_POLC_LS1_P_MIX(4) | 540 PHY_M_POLC_IS0_P_MIX(4) | 541 PHY_M_POLC_LOS_CTRL(2) | 542 PHY_M_POLC_INIT_CTRL(2) | 543 PHY_M_POLC_STA1_CTRL(2) | 544 PHY_M_POLC_STA0_CTRL(2))); 545 546 /* restore page register */ 547 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 548 break; 549 550 case CHIP_ID_YUKON_EC_U: 551 case CHIP_ID_YUKON_EX: 552 case CHIP_ID_YUKON_SUPR: 553 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 554 555 /* select page 3 to access LED control register */ 556 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 557 558 /* set LED Function Control register */ 559 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, 560 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ 561 PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */ 562 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ 563 PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */ 564 565 /* set Blink Rate in LED Timer Control Register */ 566 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 567 ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS)); 568 /* restore page register */ 569 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 570 break; 571 572 default: 573 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ 574 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL; 575 /* turn off the Rx LED (LED_RX) */ 576 ledover &= ~PHY_M_LED_MO_RX; 577 } 578 579 if (hw->chip_id == CHIP_ID_YUKON_EC_U && 580 hw->chip_rev == CHIP_REV_YU_EC_U_A1) { 581 /* apply fixes in PHY AFE */ 582 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255); 583 584 /* increase differential signal amplitude in 10BASE-T */ 585 gm_phy_write(hw, port, 0x18, 0xaa99); 586 gm_phy_write(hw, port, 0x17, 0x2011); 587 588 /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ 589 gm_phy_write(hw, port, 0x18, 0xa204); 590 gm_phy_write(hw, port, 0x17, 0x2002); 591 592 /* set page register to 0 */ 593 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); 594 } else if (hw->chip_id == CHIP_ID_YUKON_FE_P && 595 hw->chip_rev == CHIP_REV_YU_FE2_A0) { 596 /* apply workaround for integrated resistors calibration */ 597 gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17); 598 gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60); 599 } else if (hw->chip_id != CHIP_ID_YUKON_EX) { 600 /* no effect on Yukon-XL */ 601 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 602 603 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { 604 /* turn on 100 Mbps LED (LED_LINK100) */ 605 ledover |= PHY_M_LED_MO_100; 606 } 607 608 if (ledover) 609 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); 610 611 } 612 613 /* Enable phy interrupt on auto-negotiation complete (or link up) */ 614 if (sky2->autoneg == AUTONEG_ENABLE) 615 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); 616 else 617 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); 618} 619 620static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) 621{ 622 u32 reg1; 623 static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; 624 static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA }; 625 626 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 627 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 628 /* Turn on/off phy power saving */ 629 if (onoff) 630 reg1 &= ~phy_power[port]; 631 else 632 reg1 |= phy_power[port]; 633 634 if (onoff && hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 635 reg1 |= coma_mode[port]; 636 637 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 638 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 639 sky2_pci_read32(hw, PCI_DEV_REG1); 640 641 udelay(100); 642} 643 644/* Force a renegotiation */ 645static void sky2_phy_reinit(struct sky2_port *sky2) 646{ 647 spin_lock_bh(&sky2->phy_lock); 648 sky2_phy_init(sky2->hw, sky2->port); 649 spin_unlock_bh(&sky2->phy_lock); 650} 651 652/* Put device in state to listen for Wake On Lan */ 653static void sky2_wol_init(struct sky2_port *sky2) 654{ 655 struct sky2_hw *hw = sky2->hw; 656 unsigned port = sky2->port; 657 enum flow_control save_mode; 658 u16 ctrl; 659 u32 reg1; 660 661 /* Bring hardware out of reset */ 662 sky2_write16(hw, B0_CTST, CS_RST_CLR); 663 sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); 664 665 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); 666 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); 667 668 /* Force to 10/100 669 * sky2_reset will re-enable on resume 670 */ 671 save_mode = sky2->flow_mode; 672 ctrl = sky2->advertising; 673 674 sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full); 675 sky2->flow_mode = FC_NONE; 676 sky2_phy_power(hw, port, 1); 677 sky2_phy_reinit(sky2); 678 679 sky2->flow_mode = save_mode; 680 sky2->advertising = ctrl; 681 682 /* Set GMAC to no flow control and auto update for speed/duplex */ 683 gma_write16(hw, port, GM_GP_CTRL, 684 GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| 685 GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); 686 687 /* Set WOL address */ 688 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), 689 sky2->netdev->dev_addr, ETH_ALEN); 690 691 /* Turn on appropriate WOL control bits */ 692 sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); 693 ctrl = 0; 694 if (sky2->wol & WAKE_PHY) 695 ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; 696 else 697 ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; 698 699 if (sky2->wol & WAKE_MAGIC) 700 ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; 701 else 702 ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;; 703 704 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; 705 sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); 706 707 /* Turn on legacy PCI-Express PME mode */ 708 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 709 reg1 |= PCI_Y2_PME_LEGACY; 710 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 711 712 /* block receiver */ 713 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 714 715} 716 717static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port) 718{ 719 struct net_device *dev = hw->dev[port]; 720 721 if ( (hw->chip_id == CHIP_ID_YUKON_EX && 722 hw->chip_rev != CHIP_REV_YU_EX_A0) || 723 hw->chip_id == CHIP_ID_YUKON_FE_P || 724 hw->chip_id == CHIP_ID_YUKON_SUPR) { 725 /* Yukon-Extreme B0 and further Extreme devices */ 726 /* enable Store & Forward mode for TX */ 727 728 if (dev->mtu <= ETH_DATA_LEN) 729 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 730 TX_JUMBO_DIS | TX_STFW_ENA); 731 732 else 733 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 734 TX_JUMBO_ENA| TX_STFW_ENA); 735 } else { 736 if (dev->mtu <= ETH_DATA_LEN) 737 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA); 738 else { 739 /* set Tx GMAC FIFO Almost Empty Threshold */ 740 sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), 741 (ECU_JUMBO_WM << 16) | ECU_AE_THR); 742 743 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS); 744 745 /* Can't do offload because of lack of store/forward */ 746 dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_ALL_CSUM); 747 } 748 } 749} 750 751static void sky2_mac_init(struct sky2_hw *hw, unsigned port) 752{ 753 struct sky2_port *sky2 = netdev_priv(hw->dev[port]); 754 u16 reg; 755 u32 rx_reg; 756 int i; 757 const u8 *addr = hw->dev[port]->dev_addr; 758 759 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 760 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); 761 762 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); 763 764 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) { 765 /* WA DEV_472 -- looks like crossed wires on port 2 */ 766 /* clear GMAC 1 Control reset */ 767 sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR); 768 do { 769 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET); 770 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR); 771 } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL || 772 gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 || 773 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0); 774 } 775 776 sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); 777 778 /* Enable Transmit FIFO Underrun */ 779 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); 780 781 spin_lock_bh(&sky2->phy_lock); 782 sky2_phy_init(hw, port); 783 spin_unlock_bh(&sky2->phy_lock); 784 785 /* MIB clear */ 786 reg = gma_read16(hw, port, GM_PHY_ADDR); 787 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); 788 789 for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4) 790 gma_read16(hw, port, i); 791 gma_write16(hw, port, GM_PHY_ADDR, reg); 792 793 /* transmit control */ 794 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 795 796 /* receive control reg: unicast + multicast + no FCS */ 797 gma_write16(hw, port, GM_RX_CTRL, 798 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); 799 800 /* transmit flow control */ 801 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); 802 803 /* transmit parameter */ 804 gma_write16(hw, port, GM_TX_PARAM, 805 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | 806 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 807 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | 808 TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 809 810 /* serial mode register */ 811 reg = DATA_BLIND_VAL(DATA_BLIND_DEF) | 812 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 813 814 if (hw->dev[port]->mtu > ETH_DATA_LEN) 815 reg |= GM_SMOD_JUMBO_ENA; 816 817 gma_write16(hw, port, GM_SERIAL_MODE, reg); 818 819 /* virtual address for data */ 820 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); 821 822 /* physical address: used for pause frames */ 823 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); 824 825 /* ignore counter overflows */ 826 gma_write16(hw, port, GM_TX_IRQ_MSK, 0); 827 gma_write16(hw, port, GM_RX_IRQ_MSK, 0); 828 gma_write16(hw, port, GM_TR_IRQ_MSK, 0); 829 830 /* Configure Rx MAC FIFO */ 831 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 832 rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 833 if (hw->chip_id == CHIP_ID_YUKON_EX || 834 hw->chip_id == CHIP_ID_YUKON_FE_P) 835 rx_reg |= GMF_RX_OVER_ON; 836 837 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg); 838 839 if (hw->chip_id == CHIP_ID_YUKON_XL) { 840 /* Hardware errata - clear flush mask */ 841 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0); 842 } else { 843 /* Flush Rx MAC FIFO on any flow control or error */ 844 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); 845 } 846 847 /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */ 848 reg = RX_GMF_FL_THR_DEF + 1; 849 /* Another magic mystery workaround from sk98lin */ 850 if (hw->chip_id == CHIP_ID_YUKON_FE_P && 851 hw->chip_rev == CHIP_REV_YU_FE2_A0) 852 reg = 0x178; 853 sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg); 854 855 /* Configure Tx MAC FIFO */ 856 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 857 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 858 859 /* On chips without ram buffer, pause is controled by MAC level */ 860 if (!(hw->flags & SKY2_HW_RAM_BUFFER)) { 861 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); 862 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); 863 864 sky2_set_tx_stfwd(hw, port); 865 } 866 867 if (hw->chip_id == CHIP_ID_YUKON_FE_P && 868 hw->chip_rev == CHIP_REV_YU_FE2_A0) { 869 /* disable dynamic watermark */ 870 reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA)); 871 reg &= ~TX_DYN_WM_ENA; 872 sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg); 873 } 874} 875 876/* Assign Ram Buffer allocation to queue */ 877static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) 878{ 879 u32 end; 880 881 /* convert from K bytes to qwords used for hw register */ 882 start *= 1024/8; 883 space *= 1024/8; 884 end = start + space - 1; 885 886 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); 887 sky2_write32(hw, RB_ADDR(q, RB_START), start); 888 sky2_write32(hw, RB_ADDR(q, RB_END), end); 889 sky2_write32(hw, RB_ADDR(q, RB_WP), start); 890 sky2_write32(hw, RB_ADDR(q, RB_RP), start); 891 892 if (q == Q_R1 || q == Q_R2) { 893 u32 tp = space - space/4; 894 895 /* On receive queue's set the thresholds 896 * give receiver priority when > 3/4 full 897 * send pause when down to 2K 898 */ 899 sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); 900 sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); 901 902 tp = space - 2048/8; 903 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); 904 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); 905 } else { 906 /* Enable store & forward on Tx queue's because 907 * Tx FIFO is only 1K on Yukon 908 */ 909 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); 910 } 911 912 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); 913 sky2_read8(hw, RB_ADDR(q, RB_CTRL)); 914} 915 916/* Setup Bus Memory Interface */ 917static void sky2_qset(struct sky2_hw *hw, u16 q) 918{ 919 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET); 920 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT); 921 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON); 922 sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT); 923} 924 925/* Setup prefetch unit registers. This is the interface between 926 * hardware and driver list elements 927 */ 928static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr, 929 u64 addr, u32 last) 930{ 931 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); 932 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR); 933 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32); 934 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr); 935 sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last); 936 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON); 937 938 sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL)); 939} 940 941static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2) 942{ 943 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod; 944 945 sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE); 946 le->ctrl = 0; 947 return le; 948} 949 950static void tx_init(struct sky2_port *sky2) 951{ 952 struct sky2_tx_le *le; 953 954 sky2->tx_prod = sky2->tx_cons = 0; 955 sky2->tx_tcpsum = 0; 956 sky2->tx_last_mss = 0; 957 958 le = get_tx_le(sky2); 959 le->addr = 0; 960 le->opcode = OP_ADDR64 | HW_OWNER; 961} 962 963static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2, 964 struct sky2_tx_le *le) 965{ 966 return sky2->tx_ring + (le - sky2->tx_le); 967} 968 969/* Update chip's next pointer */ 970static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) 971{ 972 /* Make sure write' to descriptors are complete before we tell hardware */ 973 wmb(); 974 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); 975 976 /* Synchronize I/O on since next processor may write to tail */ 977 mmiowb(); 978} 979 980 981static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) 982{ 983 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; 984 sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE); 985 le->ctrl = 0; 986 return le; 987} 988 989/* Build description to hardware for one receive segment */ 990static void sky2_rx_add(struct sky2_port *sky2, u8 op, 991 dma_addr_t map, unsigned len) 992{ 993 struct sky2_rx_le *le; 994 995 if (sizeof(dma_addr_t) > sizeof(u32)) { 996 le = sky2_next_rx(sky2); 997 le->addr = cpu_to_le32(upper_32_bits(map)); 998 le->opcode = OP_ADDR64 | HW_OWNER; 999 } 1000 1001 le = sky2_next_rx(sky2); 1002 le->addr = cpu_to_le32((u32) map); 1003 le->length = cpu_to_le16(len); 1004 le->opcode = op | HW_OWNER; 1005} 1006 1007/* Build description to hardware for one possibly fragmented skb */ 1008static void sky2_rx_submit(struct sky2_port *sky2, 1009 const struct rx_ring_info *re) 1010{ 1011 int i; 1012 1013 sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size); 1014 1015 for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++) 1016 sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE); 1017} 1018 1019 1020static void sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, 1021 unsigned size) 1022{ 1023 struct sk_buff *skb = re->skb; 1024 int i; 1025 1026 re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE); 1027 pci_unmap_len_set(re, data_size, size); 1028 1029 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1030 re->frag_addr[i] = pci_map_page(pdev, 1031 skb_shinfo(skb)->frags[i].page, 1032 skb_shinfo(skb)->frags[i].page_offset, 1033 skb_shinfo(skb)->frags[i].size, 1034 PCI_DMA_FROMDEVICE); 1035} 1036 1037static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) 1038{ 1039 struct sk_buff *skb = re->skb; 1040 int i; 1041 1042 pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size), 1043 PCI_DMA_FROMDEVICE); 1044 1045 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1046 pci_unmap_page(pdev, re->frag_addr[i], 1047 skb_shinfo(skb)->frags[i].size, 1048 PCI_DMA_FROMDEVICE); 1049} 1050 1051/* Tell chip where to start receive checksum. 1052 * Actually has two checksums, but set both same to avoid possible byte 1053 * order problems. 1054 */ 1055static void rx_set_checksum(struct sky2_port *sky2) 1056{ 1057 struct sky2_rx_le *le = sky2_next_rx(sky2); 1058 1059 le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN); 1060 le->ctrl = 0; 1061 le->opcode = OP_TCPSTART | HW_OWNER; 1062 1063 sky2_write32(sky2->hw, 1064 Q_ADDR(rxqaddr[sky2->port], Q_CSR), 1065 sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 1066} 1067 1068/* 1069 * The RX Stop command will not work for Yukon-2 if the BMU does not 1070 * reach the end of packet and since we can't make sure that we have 1071 * incoming data, we must reset the BMU while it is not doing a DMA 1072 * transfer. Since it is possible that the RX path is still active, 1073 * the RX RAM buffer will be stopped first, so any possible incoming 1074 * data will not trigger a DMA. After the RAM buffer is stopped, the 1075 * BMU is polled until any DMA in progress is ended and only then it 1076 * will be reset. 1077 */ 1078static void sky2_rx_stop(struct sky2_port *sky2) 1079{ 1080 struct sky2_hw *hw = sky2->hw; 1081 unsigned rxq = rxqaddr[sky2->port]; 1082 int i; 1083 1084 /* disable the RAM Buffer receive queue */ 1085 sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD); 1086 1087 for (i = 0; i < 0xffff; i++) 1088 if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL)) 1089 == sky2_read8(hw, RB_ADDR(rxq, Q_RL))) 1090 goto stopped; 1091 1092 printk(KERN_WARNING PFX "%s: receiver stop failed\n", 1093 sky2->netdev->name); 1094stopped: 1095 sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); 1096 1097 /* reset the Rx prefetch unit */ 1098 sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); 1099 mmiowb(); 1100} 1101 1102/* Clean out receive buffer area, assumes receiver hardware stopped */ 1103static void sky2_rx_clean(struct sky2_port *sky2) 1104{ 1105 unsigned i; 1106 1107 memset(sky2->rx_le, 0, RX_LE_BYTES); 1108 for (i = 0; i < sky2->rx_pending; i++) { 1109 struct rx_ring_info *re = sky2->rx_ring + i; 1110 1111 if (re->skb) { 1112 sky2_rx_unmap_skb(sky2->hw->pdev, re); 1113 kfree_skb(re->skb); 1114 re->skb = NULL; 1115 } 1116 } 1117} 1118 1119/* Basic MII support */ 1120static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1121{ 1122 struct mii_ioctl_data *data = if_mii(ifr); 1123 struct sky2_port *sky2 = netdev_priv(dev); 1124 struct sky2_hw *hw = sky2->hw; 1125 int err = -EOPNOTSUPP; 1126 1127 if (!netif_running(dev)) 1128 return -ENODEV; /* Phy still in reset */ 1129 1130 switch (cmd) { 1131 case SIOCGMIIPHY: 1132 data->phy_id = PHY_ADDR_MARV; 1133 1134 /* fallthru */ 1135 case SIOCGMIIREG: { 1136 u16 val = 0; 1137 1138 spin_lock_bh(&sky2->phy_lock); 1139 err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val); 1140 spin_unlock_bh(&sky2->phy_lock); 1141 1142 data->val_out = val; 1143 break; 1144 } 1145 1146 case SIOCSMIIREG: 1147 if (!capable(CAP_NET_ADMIN)) 1148 return -EPERM; 1149 1150 spin_lock_bh(&sky2->phy_lock); 1151 err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f, 1152 data->val_in); 1153 spin_unlock_bh(&sky2->phy_lock); 1154 break; 1155 } 1156 return err; 1157} 1158 1159#ifdef SKY2_VLAN_TAG_USED 1160static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 1161{ 1162 struct sky2_port *sky2 = netdev_priv(dev); 1163 struct sky2_hw *hw = sky2->hw; 1164 u16 port = sky2->port; 1165 1166 netif_tx_lock_bh(dev); 1167 napi_disable(&hw->napi); 1168 1169 sky2->vlgrp = grp; 1170 if (grp) { 1171 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1172 RX_VLAN_STRIP_ON); 1173 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1174 TX_VLAN_TAG_ON); 1175 } else { 1176 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1177 RX_VLAN_STRIP_OFF); 1178 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1179 TX_VLAN_TAG_OFF); 1180 } 1181 1182 sky2_read32(hw, B0_Y2_SP_LISR); 1183 napi_enable(&hw->napi); 1184 netif_tx_unlock_bh(dev); 1185} 1186#endif 1187 1188/* 1189 * Allocate an skb for receiving. If the MTU is large enough 1190 * make the skb non-linear with a fragment list of pages. 1191 */ 1192static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2) 1193{ 1194 struct sk_buff *skb; 1195 int i; 1196 1197 if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) { 1198 unsigned char *start; 1199 /* 1200 * Workaround for a bug in FIFO that cause hang 1201 * if the FIFO if the receive buffer is not 64 byte aligned. 1202 * The buffer returned from netdev_alloc_skb is 1203 * aligned except if slab debugging is enabled. 1204 */ 1205 skb = netdev_alloc_skb(sky2->netdev, sky2->rx_data_size + 8); 1206 if (!skb) 1207 goto nomem; 1208 start = PTR_ALIGN(skb->data, 8); 1209 skb_reserve(skb, start - skb->data); 1210 } else { 1211 skb = netdev_alloc_skb(sky2->netdev, 1212 sky2->rx_data_size + NET_IP_ALIGN); 1213 if (!skb) 1214 goto nomem; 1215 skb_reserve(skb, NET_IP_ALIGN); 1216 } 1217 1218 for (i = 0; i < sky2->rx_nfrags; i++) { 1219 struct page *page = alloc_page(GFP_ATOMIC); 1220 1221 if (!page) 1222 goto free_partial; 1223 skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE); 1224 } 1225 1226 return skb; 1227free_partial: 1228 kfree_skb(skb); 1229nomem: 1230 return NULL; 1231} 1232 1233static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq) 1234{ 1235 sky2_put_idx(sky2->hw, rxq, sky2->rx_put); 1236} 1237 1238/* 1239 * Allocate and setup receiver buffer pool. 1240 * Normal case this ends up creating one list element for skb 1241 * in the receive ring. Worst case if using large MTU and each 1242 * allocation falls on a different 64 bit region, that results 1243 * in 6 list elements per ring entry. 1244 * One element is used for checksum enable/disable, and one 1245 * extra to avoid wrap. 1246 */ 1247static int sky2_rx_start(struct sky2_port *sky2) 1248{ 1249 struct sky2_hw *hw = sky2->hw; 1250 struct rx_ring_info *re; 1251 unsigned rxq = rxqaddr[sky2->port]; 1252 unsigned i, size, thresh; 1253 1254 sky2->rx_put = sky2->rx_next = 0; 1255 sky2_qset(hw, rxq); 1256 1257 /* On PCI express lowering the watermark gives better performance */ 1258 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) 1259 sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX); 1260 1261 /* These chips have no ram buffer? 1262 * MAC Rx RAM Read is controlled by hardware */ 1263 if (hw->chip_id == CHIP_ID_YUKON_EC_U && 1264 (hw->chip_rev == CHIP_REV_YU_EC_U_A1 1265 || hw->chip_rev == CHIP_REV_YU_EC_U_B0)) 1266 sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS); 1267 1268 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); 1269 1270 if (!(hw->flags & SKY2_HW_NEW_LE)) 1271 rx_set_checksum(sky2); 1272 1273 /* Space needed for frame data + headers rounded up */ 1274 size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); 1275 1276 /* Stopping point for hardware truncation */ 1277 thresh = (size - 8) / sizeof(u32); 1278 1279 sky2->rx_nfrags = size >> PAGE_SHIFT; 1280 BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr)); 1281 1282 /* Compute residue after pages */ 1283 size -= sky2->rx_nfrags << PAGE_SHIFT; 1284 1285 /* Optimize to handle small packets and headers */ 1286 if (size < copybreak) 1287 size = copybreak; 1288 if (size < ETH_HLEN) 1289 size = ETH_HLEN; 1290 1291 sky2->rx_data_size = size; 1292 1293 /* Fill Rx ring */ 1294 for (i = 0; i < sky2->rx_pending; i++) { 1295 re = sky2->rx_ring + i; 1296 1297 re->skb = sky2_rx_alloc(sky2); 1298 if (!re->skb) 1299 goto nomem; 1300 1301 sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size); 1302 sky2_rx_submit(sky2, re); 1303 } 1304 1305 /* 1306 * The receiver hangs if it receives frames larger than the 1307 * packet buffer. As a workaround, truncate oversize frames, but 1308 * the register is limited to 9 bits, so if you do frames > 2052 1309 * you better get the MTU right! 1310 */ 1311 if (thresh > 0x1ff) 1312 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF); 1313 else { 1314 sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh); 1315 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON); 1316 } 1317 1318 /* Tell chip about available buffers */ 1319 sky2_rx_update(sky2, rxq); 1320 return 0; 1321nomem: 1322 sky2_rx_clean(sky2); 1323 return -ENOMEM; 1324} 1325 1326/* Bring up network interface. */ 1327static int sky2_up(struct net_device *dev) 1328{ 1329 struct sky2_port *sky2 = netdev_priv(dev); 1330 struct sky2_hw *hw = sky2->hw; 1331 unsigned port = sky2->port; 1332 u32 imask, ramsize; 1333 int cap, err = -ENOMEM; 1334 struct net_device *otherdev = hw->dev[sky2->port^1]; 1335 1336 /* 1337 * On dual port PCI-X card, there is an problem where status 1338 * can be received out of order due to split transactions 1339 */ 1340 if (otherdev && netif_running(otherdev) && 1341 (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { 1342 u16 cmd; 1343 1344 cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); 1345 cmd &= ~PCI_X_CMD_MAX_SPLIT; 1346 sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); 1347 1348 } 1349 1350 if (netif_msg_ifup(sky2)) 1351 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 1352 1353 netif_carrier_off(dev); 1354 1355 /* must be power of 2 */ 1356 sky2->tx_le = pci_alloc_consistent(hw->pdev, 1357 TX_RING_SIZE * 1358 sizeof(struct sky2_tx_le), 1359 &sky2->tx_le_map); 1360 if (!sky2->tx_le) 1361 goto err_out; 1362 1363 sky2->tx_ring = kcalloc(TX_RING_SIZE, sizeof(struct tx_ring_info), 1364 GFP_KERNEL); 1365 if (!sky2->tx_ring) 1366 goto err_out; 1367 1368 tx_init(sky2); 1369 1370 sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES, 1371 &sky2->rx_le_map); 1372 if (!sky2->rx_le) 1373 goto err_out; 1374 memset(sky2->rx_le, 0, RX_LE_BYTES); 1375 1376 sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info), 1377 GFP_KERNEL); 1378 if (!sky2->rx_ring) 1379 goto err_out; 1380 1381 sky2_phy_power(hw, port, 1); 1382 1383 sky2_mac_init(hw, port); 1384 1385 /* Register is number of 4K blocks on internal RAM buffer. */ 1386 ramsize = sky2_read8(hw, B2_E_0) * 4; 1387 if (ramsize > 0) { 1388 u32 rxspace; 1389 1390 hw->flags |= SKY2_HW_RAM_BUFFER; 1391 pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize); 1392 if (ramsize < 16) 1393 rxspace = ramsize / 2; 1394 else 1395 rxspace = 8 + (2*(ramsize - 16))/3; 1396 1397 sky2_ramset(hw, rxqaddr[port], 0, rxspace); 1398 sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace); 1399 1400 /* Make sure SyncQ is disabled */ 1401 sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL), 1402 RB_RST_SET); 1403 } 1404 1405 sky2_qset(hw, txqaddr[port]); 1406 1407 /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */ 1408 if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0) 1409 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF); 1410 1411 /* Set almost empty threshold */ 1412 if (hw->chip_id == CHIP_ID_YUKON_EC_U 1413 && hw->chip_rev == CHIP_REV_YU_EC_U_A0) 1414 sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV); 1415 1416 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1417 TX_RING_SIZE - 1); 1418 1419 err = sky2_rx_start(sky2); 1420 if (err) 1421 goto err_out; 1422 1423 /* Enable interrupts from phy/mac for port */ 1424 imask = sky2_read32(hw, B0_IMSK); 1425 imask |= portirq_msk[port]; 1426 sky2_write32(hw, B0_IMSK, imask); 1427 1428 sky2_set_multicast(dev); 1429 return 0; 1430 1431err_out: 1432 if (sky2->rx_le) { 1433 pci_free_consistent(hw->pdev, RX_LE_BYTES, 1434 sky2->rx_le, sky2->rx_le_map); 1435 sky2->rx_le = NULL; 1436 } 1437 if (sky2->tx_le) { 1438 pci_free_consistent(hw->pdev, 1439 TX_RING_SIZE * sizeof(struct sky2_tx_le), 1440 sky2->tx_le, sky2->tx_le_map); 1441 sky2->tx_le = NULL; 1442 } 1443 kfree(sky2->tx_ring); 1444 kfree(sky2->rx_ring); 1445 1446 sky2->tx_ring = NULL; 1447 sky2->rx_ring = NULL; 1448 return err; 1449} 1450 1451/* Modular subtraction in ring */ 1452static inline int tx_dist(unsigned tail, unsigned head) 1453{ 1454 return (head - tail) & (TX_RING_SIZE - 1); 1455} 1456 1457/* Number of list elements available for next tx */ 1458static inline int tx_avail(const struct sky2_port *sky2) 1459{ 1460 return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod); 1461} 1462 1463/* Estimate of number of transmit list elements required */ 1464static unsigned tx_le_req(const struct sk_buff *skb) 1465{ 1466 unsigned count; 1467 1468 count = sizeof(dma_addr_t) / sizeof(u32); 1469 count += skb_shinfo(skb)->nr_frags * count; 1470 1471 if (skb_is_gso(skb)) 1472 ++count; 1473 1474 if (skb->ip_summed == CHECKSUM_PARTIAL) 1475 ++count; 1476 1477 return count; 1478} 1479 1480/* 1481 * Put one packet in ring for transmit. 1482 * A single packet can generate multiple list elements, and 1483 * the number of ring elements will probably be less than the number 1484 * of list elements used. 1485 */ 1486static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) 1487{ 1488 struct sky2_port *sky2 = netdev_priv(dev); 1489 struct sky2_hw *hw = sky2->hw; 1490 struct sky2_tx_le *le = NULL; 1491 struct tx_ring_info *re; 1492 unsigned i, len; 1493 dma_addr_t mapping; 1494 u16 mss; 1495 u8 ctrl; 1496 1497 if (unlikely(tx_avail(sky2) < tx_le_req(skb))) 1498 return NETDEV_TX_BUSY; 1499 1500 if (unlikely(netif_msg_tx_queued(sky2))) 1501 printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n", 1502 dev->name, sky2->tx_prod, skb->len); 1503 1504 len = skb_headlen(skb); 1505 mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 1506 1507 /* Send high bits if needed */ 1508 if (sizeof(dma_addr_t) > sizeof(u32)) { 1509 le = get_tx_le(sky2); 1510 le->addr = cpu_to_le32(upper_32_bits(mapping)); 1511 le->opcode = OP_ADDR64 | HW_OWNER; 1512 } 1513 1514 /* Check for TCP Segmentation Offload */ 1515 mss = skb_shinfo(skb)->gso_size; 1516 if (mss != 0) { 1517 1518 if (!(hw->flags & SKY2_HW_NEW_LE)) 1519 mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); 1520 1521 if (mss != sky2->tx_last_mss) { 1522 le = get_tx_le(sky2); 1523 le->addr = cpu_to_le32(mss); 1524 1525 if (hw->flags & SKY2_HW_NEW_LE) 1526 le->opcode = OP_MSS | HW_OWNER; 1527 else 1528 le->opcode = OP_LRGLEN | HW_OWNER; 1529 sky2->tx_last_mss = mss; 1530 } 1531 } 1532 1533 ctrl = 0; 1534#ifdef SKY2_VLAN_TAG_USED 1535 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ 1536 if (sky2->vlgrp && vlan_tx_tag_present(skb)) { 1537 if (!le) { 1538 le = get_tx_le(sky2); 1539 le->addr = 0; 1540 le->opcode = OP_VLAN|HW_OWNER; 1541 } else 1542 le->opcode |= OP_VLAN; 1543 le->length = cpu_to_be16(vlan_tx_tag_get(skb)); 1544 ctrl |= INS_VLAN; 1545 } 1546#endif 1547 1548 /* Handle TCP checksum offload */ 1549 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1550 /* On Yukon EX (some versions) encoding change. */ 1551 if (hw->flags & SKY2_HW_AUTO_TX_SUM) 1552 ctrl |= CALSUM; /* auto checksum */ 1553 else { 1554 const unsigned offset = skb_transport_offset(skb); 1555 u32 tcpsum; 1556 1557 tcpsum = offset << 16; /* sum start */ 1558 tcpsum |= offset + skb->csum_offset; /* sum write */ 1559 1560 ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 1561 if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1562 ctrl |= UDPTCP; 1563 1564 if (tcpsum != sky2->tx_tcpsum) { 1565 sky2->tx_tcpsum = tcpsum; 1566 1567 le = get_tx_le(sky2); 1568 le->addr = cpu_to_le32(tcpsum); 1569 le->length = 0; /* initial checksum value */ 1570 le->ctrl = 1; /* one packet */ 1571 le->opcode = OP_TCPLISW | HW_OWNER; 1572 } 1573 } 1574 } 1575 1576 le = get_tx_le(sky2); 1577 le->addr = cpu_to_le32((u32) mapping); 1578 le->length = cpu_to_le16(len); 1579 le->ctrl = ctrl; 1580 le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER); 1581 1582 re = tx_le_re(sky2, le); 1583 re->skb = skb; 1584 pci_unmap_addr_set(re, mapaddr, mapping); 1585 pci_unmap_len_set(re, maplen, len); 1586 1587 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1588 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1589 1590 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, 1591 frag->size, PCI_DMA_TODEVICE); 1592 1593 if (sizeof(dma_addr_t) > sizeof(u32)) { 1594 le = get_tx_le(sky2); 1595 le->addr = cpu_to_le32(upper_32_bits(mapping)); 1596 le->ctrl = 0; 1597 le->opcode = OP_ADDR64 | HW_OWNER; 1598 } 1599 1600 le = get_tx_le(sky2); 1601 le->addr = cpu_to_le32((u32) mapping); 1602 le->length = cpu_to_le16(frag->size); 1603 le->ctrl = ctrl; 1604 le->opcode = OP_BUFFER | HW_OWNER; 1605 1606 re = tx_le_re(sky2, le); 1607 re->skb = skb; 1608 pci_unmap_addr_set(re, mapaddr, mapping); 1609 pci_unmap_len_set(re, maplen, frag->size); 1610 } 1611 1612 le->ctrl |= EOP; 1613 1614 if (tx_avail(sky2) <= MAX_SKB_TX_LE) 1615 netif_stop_queue(dev); 1616 1617 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); 1618 1619 dev->trans_start = jiffies; 1620 return NETDEV_TX_OK; 1621} 1622 1623/* 1624 * Free ring elements from starting at tx_cons until "done" 1625 * 1626 * NB: the hardware will tell us about partial completion of multi-part 1627 * buffers so make sure not to free skb to early. 1628 */ 1629static void sky2_tx_complete(struct sky2_port *sky2, u16 done) 1630{ 1631 struct net_device *dev = sky2->netdev; 1632 struct pci_dev *pdev = sky2->hw->pdev; 1633 unsigned idx; 1634 1635 BUG_ON(done >= TX_RING_SIZE); 1636 1637 for (idx = sky2->tx_cons; idx != done; 1638 idx = RING_NEXT(idx, TX_RING_SIZE)) { 1639 struct sky2_tx_le *le = sky2->tx_le + idx; 1640 struct tx_ring_info *re = sky2->tx_ring + idx; 1641 1642 switch(le->opcode & ~HW_OWNER) { 1643 case OP_LARGESEND: 1644 case OP_PACKET: 1645 pci_unmap_single(pdev, 1646 pci_unmap_addr(re, mapaddr), 1647 pci_unmap_len(re, maplen), 1648 PCI_DMA_TODEVICE); 1649 break; 1650 case OP_BUFFER: 1651 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), 1652 pci_unmap_len(re, maplen), 1653 PCI_DMA_TODEVICE); 1654 break; 1655 } 1656 1657 if (le->ctrl & EOP) { 1658 if (unlikely(netif_msg_tx_done(sky2))) 1659 printk(KERN_DEBUG "%s: tx done %u\n", 1660 dev->name, idx); 1661 1662 dev->stats.tx_packets++; 1663 dev->stats.tx_bytes += re->skb->len; 1664 1665 dev_kfree_skb_any(re->skb); 1666 sky2->tx_next = RING_NEXT(idx, TX_RING_SIZE); 1667 } 1668 } 1669 1670 sky2->tx_cons = idx; 1671 smp_mb(); 1672 1673 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) 1674 netif_wake_queue(dev); 1675} 1676 1677/* Cleanup all untransmitted buffers, assume transmitter not running */ 1678static void sky2_tx_clean(struct net_device *dev) 1679{ 1680 struct sky2_port *sky2 = netdev_priv(dev); 1681 1682 netif_tx_lock_bh(dev); 1683 sky2_tx_complete(sky2, sky2->tx_prod); 1684 netif_tx_unlock_bh(dev); 1685} 1686 1687/* Network shutdown */ 1688static int sky2_down(struct net_device *dev) 1689{ 1690 struct sky2_port *sky2 = netdev_priv(dev); 1691 struct sky2_hw *hw = sky2->hw; 1692 unsigned port = sky2->port; 1693 u16 ctrl; 1694 u32 imask; 1695 1696 /* Never really got started! */ 1697 if (!sky2->tx_le) 1698 return 0; 1699 1700 if (netif_msg_ifdown(sky2)) 1701 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 1702 1703 /* Stop more packets from being queued */ 1704 netif_stop_queue(dev); 1705 1706 /* Disable port IRQ */ 1707 imask = sky2_read32(hw, B0_IMSK); 1708 imask &= ~portirq_msk[port]; 1709 sky2_write32(hw, B0_IMSK, imask); 1710 1711 synchronize_irq(hw->pdev->irq); 1712 1713 sky2_gmac_reset(hw, port); 1714 1715 /* Stop transmitter */ 1716 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP); 1717 sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR)); 1718 1719 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), 1720 RB_RST_SET | RB_DIS_OP_MD); 1721 1722 ctrl = gma_read16(hw, port, GM_GP_CTRL); 1723 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); 1724 gma_write16(hw, port, GM_GP_CTRL, ctrl); 1725 1726 /* Make sure no packets are pending */ 1727 napi_synchronize(&hw->napi); 1728 1729 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 1730 1731 /* Workaround shared GMAC reset */ 1732 if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 1733 && port == 0 && hw->dev[1] && netif_running(hw->dev[1]))) 1734 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); 1735 1736 /* Disable Force Sync bit and Enable Alloc bit */ 1737 sky2_write8(hw, SK_REG(port, TXA_CTRL), 1738 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 1739 1740 /* Stop Interval Timer and Limit Counter of Tx Arbiter */ 1741 sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); 1742 sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); 1743 1744 /* Reset the PCI FIFO of the async Tx queue */ 1745 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), 1746 BMU_RST_SET | BMU_FIFO_RST); 1747 1748 /* Reset the Tx prefetch units */ 1749 sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL), 1750 PREF_UNIT_RST_SET); 1751 1752 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); 1753 1754 sky2_rx_stop(sky2); 1755 1756 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 1757 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 1758 1759 sky2_phy_power(hw, port, 0); 1760 1761 netif_carrier_off(dev); 1762 1763 /* turn off LED's */ 1764 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); 1765 1766 sky2_tx_clean(dev); 1767 sky2_rx_clean(sky2); 1768 1769 pci_free_consistent(hw->pdev, RX_LE_BYTES, 1770 sky2->rx_le, sky2->rx_le_map); 1771 kfree(sky2->rx_ring); 1772 1773 pci_free_consistent(hw->pdev, 1774 TX_RING_SIZE * sizeof(struct sky2_tx_le), 1775 sky2->tx_le, sky2->tx_le_map); 1776 kfree(sky2->tx_ring); 1777 1778 sky2->tx_le = NULL; 1779 sky2->rx_le = NULL; 1780 1781 sky2->rx_ring = NULL; 1782 sky2->tx_ring = NULL; 1783 1784 return 0; 1785} 1786 1787static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux) 1788{ 1789 if (hw->flags & SKY2_HW_FIBRE_PHY) 1790 return SPEED_1000; 1791 1792 if (!(hw->flags & SKY2_HW_GIGABIT)) { 1793 if (aux & PHY_M_PS_SPEED_100) 1794 return SPEED_100; 1795 else 1796 return SPEED_10; 1797 } 1798 1799 switch (aux & PHY_M_PS_SPEED_MSK) { 1800 case PHY_M_PS_SPEED_1000: 1801 return SPEED_1000; 1802 case PHY_M_PS_SPEED_100: 1803 return SPEED_100; 1804 default: 1805 return SPEED_10; 1806 } 1807} 1808 1809static void sky2_link_up(struct sky2_port *sky2) 1810{ 1811 struct sky2_hw *hw = sky2->hw; 1812 unsigned port = sky2->port; 1813 u16 reg; 1814 static const char *fc_name[] = { 1815 [FC_NONE] = "none", 1816 [FC_TX] = "tx", 1817 [FC_RX] = "rx", 1818 [FC_BOTH] = "both", 1819 }; 1820 1821 /* enable Rx/Tx */ 1822 reg = gma_read16(hw, port, GM_GP_CTRL); 1823 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 1824 gma_write16(hw, port, GM_GP_CTRL, reg); 1825 1826 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); 1827 1828 netif_carrier_on(sky2->netdev); 1829 1830 mod_timer(&hw->watchdog_timer, jiffies + 1); 1831 1832 /* Turn on link LED */ 1833 sky2_write8(hw, SK_REG(port, LNK_LED_REG), 1834 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); 1835 1836 if (netif_msg_link(sky2)) 1837 printk(KERN_INFO PFX 1838 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n", 1839 sky2->netdev->name, sky2->speed, 1840 sky2->duplex == DUPLEX_FULL ? "full" : "half", 1841 fc_name[sky2->flow_status]); 1842} 1843 1844static void sky2_link_down(struct sky2_port *sky2) 1845{ 1846 struct sky2_hw *hw = sky2->hw; 1847 unsigned port = sky2->port; 1848 u16 reg; 1849 1850 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); 1851 1852 reg = gma_read16(hw, port, GM_GP_CTRL); 1853 reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 1854 gma_write16(hw, port, GM_GP_CTRL, reg); 1855 1856 netif_carrier_off(sky2->netdev); 1857 1858 /* Turn on link LED */ 1859 sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); 1860 1861 if (netif_msg_link(sky2)) 1862 printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name); 1863 1864 sky2_phy_init(hw, port); 1865} 1866 1867static enum flow_control sky2_flow(int rx, int tx) 1868{ 1869 if (rx) 1870 return tx ? FC_BOTH : FC_RX; 1871 else 1872 return tx ? FC_TX : FC_NONE; 1873} 1874 1875static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux) 1876{ 1877 struct sky2_hw *hw = sky2->hw; 1878 unsigned port = sky2->port; 1879 u16 advert, lpa; 1880 1881 advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); 1882 lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP); 1883 if (lpa & PHY_M_AN_RF) { 1884 printk(KERN_ERR PFX "%s: remote fault", sky2->netdev->name); 1885 return -1; 1886 } 1887 1888 if (!(aux & PHY_M_PS_SPDUP_RES)) { 1889 printk(KERN_ERR PFX "%s: speed/duplex mismatch", 1890 sky2->netdev->name); 1891 return -1; 1892 } 1893 1894 sky2->speed = sky2_phy_speed(hw, aux); 1895 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; 1896 1897 /* Since the pause result bits seem to in different positions on 1898 * different chips. look at registers. 1899 */ 1900 if (hw->flags & SKY2_HW_FIBRE_PHY) { 1901 /* Shift for bits in fiber PHY */ 1902 advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM); 1903 lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM); 1904 1905 if (advert & ADVERTISE_1000XPAUSE) 1906 advert |= ADVERTISE_PAUSE_CAP; 1907 if (advert & ADVERTISE_1000XPSE_ASYM) 1908 advert |= ADVERTISE_PAUSE_ASYM; 1909 if (lpa & LPA_1000XPAUSE) 1910 lpa |= LPA_PAUSE_CAP; 1911 if (lpa & LPA_1000XPAUSE_ASYM) 1912 lpa |= LPA_PAUSE_ASYM; 1913 } 1914 1915 sky2->flow_status = FC_NONE; 1916 if (advert & ADVERTISE_PAUSE_CAP) { 1917 if (lpa & LPA_PAUSE_CAP) 1918 sky2->flow_status = FC_BOTH; 1919 else if (advert & ADVERTISE_PAUSE_ASYM) 1920 sky2->flow_status = FC_RX; 1921 } else if (advert & ADVERTISE_PAUSE_ASYM) { 1922 if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM)) 1923 sky2->flow_status = FC_TX; 1924 } 1925 1926 if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 1927 && !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX)) 1928 sky2->flow_status = FC_NONE; 1929 1930 if (sky2->flow_status & FC_TX) 1931 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); 1932 else 1933 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 1934 1935 return 0; 1936} 1937 1938/* Interrupt from PHY */ 1939static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) 1940{ 1941 struct net_device *dev = hw->dev[port]; 1942 struct sky2_port *sky2 = netdev_priv(dev); 1943 u16 istatus, phystat; 1944 1945 if (!netif_running(dev)) 1946 return; 1947 1948 spin_lock(&sky2->phy_lock); 1949 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); 1950 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); 1951 1952 if (netif_msg_intr(sky2)) 1953 printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n", 1954 sky2->netdev->name, istatus, phystat); 1955 1956 if (sky2->autoneg == AUTONEG_ENABLE && (istatus & PHY_M_IS_AN_COMPL)) { 1957 if (sky2_autoneg_done(sky2, phystat) == 0) 1958 sky2_link_up(sky2); 1959 goto out; 1960 } 1961 1962 if (istatus & PHY_M_IS_LSP_CHANGE) 1963 sky2->speed = sky2_phy_speed(hw, phystat); 1964 1965 if (istatus & PHY_M_IS_DUP_CHANGE) 1966 sky2->duplex = 1967 (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; 1968 1969 if (istatus & PHY_M_IS_LST_CHANGE) { 1970 if (phystat & PHY_M_PS_LINK_UP) 1971 sky2_link_up(sky2); 1972 else 1973 sky2_link_down(sky2); 1974 } 1975out: 1976 spin_unlock(&sky2->phy_lock); 1977} 1978 1979/* Transmit timeout is only called if we are running, carrier is up 1980 * and tx queue is full (stopped). 1981 */ 1982static void sky2_tx_timeout(struct net_device *dev) 1983{ 1984 struct sky2_port *sky2 = netdev_priv(dev); 1985 struct sky2_hw *hw = sky2->hw; 1986 1987 if (netif_msg_timer(sky2)) 1988 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name); 1989 1990 printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n", 1991 dev->name, sky2->tx_cons, sky2->tx_prod, 1992 sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), 1993 sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE))); 1994 1995 /* can't restart safely under softirq */ 1996 schedule_work(&hw->restart_work); 1997} 1998 1999static int sky2_change_mtu(struct net_device *dev, int new_mtu) 2000{ 2001 struct sky2_port *sky2 = netdev_priv(dev); 2002 struct sky2_hw *hw = sky2->hw; 2003 unsigned port = sky2->port; 2004 int err; 2005 u16 ctl, mode; 2006 u32 imask; 2007 2008 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 2009 return -EINVAL; 2010 2011 if (new_mtu > ETH_DATA_LEN && 2012 (hw->chip_id == CHIP_ID_YUKON_FE || 2013 hw->chip_id == CHIP_ID_YUKON_FE_P)) 2014 return -EINVAL; 2015 2016 if (!netif_running(dev)) { 2017 dev->mtu = new_mtu; 2018 return 0; 2019 } 2020 2021 imask = sky2_read32(hw, B0_IMSK); 2022 sky2_write32(hw, B0_IMSK, 0); 2023 2024 dev->trans_start = jiffies; /* prevent tx timeout */ 2025 netif_stop_queue(dev); 2026 napi_disable(&hw->napi); 2027 2028 synchronize_irq(hw->pdev->irq); 2029 2030 if (!(hw->flags & SKY2_HW_RAM_BUFFER)) 2031 sky2_set_tx_stfwd(hw, port); 2032 2033 ctl = gma_read16(hw, port, GM_GP_CTRL); 2034 gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); 2035 sky2_rx_stop(sky2); 2036 sky2_rx_clean(sky2); 2037 2038 dev->mtu = new_mtu; 2039 2040 mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | 2041 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 2042 2043 if (dev->mtu > ETH_DATA_LEN) 2044 mode |= GM_SMOD_JUMBO_ENA; 2045 2046 gma_write16(hw, port, GM_SERIAL_MODE, mode); 2047 2048 sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD); 2049 2050 err = sky2_rx_start(sky2); 2051 sky2_write32(hw, B0_IMSK, imask); 2052 2053 sky2_read32(hw, B0_Y2_SP_LISR); 2054 napi_enable(&hw->napi); 2055 2056 if (err) 2057 dev_close(dev); 2058 else { 2059 gma_write16(hw, port, GM_GP_CTRL, ctl); 2060 2061 netif_wake_queue(dev); 2062 } 2063 2064 return err; 2065} 2066 2067/* For small just reuse existing skb for next receive */ 2068static struct sk_buff *receive_copy(struct sky2_port *sky2, 2069 const struct rx_ring_info *re, 2070 unsigned length) 2071{ 2072 struct sk_buff *skb; 2073 2074 skb = netdev_alloc_skb(sky2->netdev, length + 2); 2075 if (likely(skb)) { 2076 skb_reserve(skb, 2); 2077 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, 2078 length, PCI_DMA_FROMDEVICE); 2079 skb_copy_from_linear_data(re->skb, skb->data, length); 2080 skb->ip_summed = re->skb->ip_summed; 2081 skb->csum = re->skb->csum; 2082 pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, 2083 length, PCI_DMA_FROMDEVICE); 2084 re->skb->ip_summed = CHECKSUM_NONE; 2085 skb_put(skb, length); 2086 } 2087 return skb; 2088} 2089 2090/* Adjust length of skb with fragments to match received data */ 2091static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, 2092 unsigned int length) 2093{ 2094 int i, num_frags; 2095 unsigned int size; 2096 2097 /* put header into skb */ 2098 size = min(length, hdr_space); 2099 skb->tail += size; 2100 skb->len += size; 2101 length -= size; 2102 2103 num_frags = skb_shinfo(skb)->nr_frags; 2104 for (i = 0; i < num_frags; i++) { 2105 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2106 2107 if (length == 0) { 2108 /* don't need this page */ 2109 __free_page(frag->page); 2110 --skb_shinfo(skb)->nr_frags; 2111 } else { 2112 size = min(length, (unsigned) PAGE_SIZE); 2113 2114 frag->size = size; 2115 skb->data_len += size; 2116 skb->truesize += size; 2117 skb->len += size; 2118 length -= size; 2119 } 2120 } 2121} 2122 2123/* Normal packet - take skb from ring element and put in a new one */ 2124static struct sk_buff *receive_new(struct sky2_port *sky2, 2125 struct rx_ring_info *re, 2126 unsigned int length) 2127{ 2128 struct sk_buff *skb, *nskb; 2129 unsigned hdr_space = sky2->rx_data_size; 2130 2131 /* Don't be tricky about reusing pages (yet) */ 2132 nskb = sky2_rx_alloc(sky2); 2133 if (unlikely(!nskb)) 2134 return NULL; 2135 2136 skb = re->skb; 2137 sky2_rx_unmap_skb(sky2->hw->pdev, re); 2138 2139 prefetch(skb->data); 2140 re->skb = nskb; 2141 sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space); 2142 2143 if (skb_shinfo(skb)->nr_frags) 2144 skb_put_frags(skb, hdr_space, length); 2145 else 2146 skb_put(skb, length); 2147 return skb; 2148} 2149 2150/* 2151 * Receive one packet. 2152 * For larger packets, get new buffer. 2153 */ 2154static struct sk_buff *sky2_receive(struct net_device *dev, 2155 u16 length, u32 status) 2156{ 2157 struct sky2_port *sky2 = netdev_priv(dev); 2158 struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next; 2159 struct sk_buff *skb = NULL; 2160 u16 count = (status & GMR_FS_LEN) >> 16; 2161 2162#ifdef SKY2_VLAN_TAG_USED 2163 /* Account for vlan tag */ 2164 if (sky2->vlgrp && (status & GMR_FS_VLAN)) 2165 count -= VLAN_HLEN; 2166#endif 2167 2168 if (unlikely(netif_msg_rx_status(sky2))) 2169 printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n", 2170 dev->name, sky2->rx_next, status, length); 2171 2172 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; 2173 prefetch(sky2->rx_ring + sky2->rx_next); 2174 2175 /* This chip has hardware problems that generates bogus status. 2176 * So do only marginal checking and expect higher level protocols 2177 * to handle crap frames. 2178 */ 2179 if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && 2180 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 && 2181 length != count) 2182 goto okay; 2183 2184 if (status & GMR_FS_ANY_ERR) 2185 goto error; 2186 2187 if (!(status & GMR_FS_RX_OK)) 2188 goto resubmit; 2189 2190 /* if length reported by DMA does not match PHY, packet was truncated */ 2191 if (length != count) 2192 goto len_error; 2193 2194okay: 2195 if (length < copybreak) 2196 skb = receive_copy(sky2, re, length); 2197 else 2198 skb = receive_new(sky2, re, length); 2199resubmit: 2200 sky2_rx_submit(sky2, re); 2201 2202 return skb; 2203 2204len_error: 2205 /* Truncation of overlength packets 2206 causes PHY length to not match MAC length */ 2207 ++dev->stats.rx_length_errors; 2208 if (netif_msg_rx_err(sky2) && net_ratelimit()) 2209 pr_info(PFX "%s: rx length error: status %#x length %d\n", 2210 dev->name, status, length); 2211 goto resubmit; 2212 2213error: 2214 ++dev->stats.rx_errors; 2215 if (status & GMR_FS_RX_FF_OV) { 2216 dev->stats.rx_over_errors++; 2217 goto resubmit; 2218 } 2219 2220 if (netif_msg_rx_err(sky2) && net_ratelimit()) 2221 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n", 2222 dev->name, status, length); 2223 2224 if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE)) 2225 dev->stats.rx_length_errors++; 2226 if (status & GMR_FS_FRAGMENT) 2227 dev->stats.rx_frame_errors++; 2228 if (status & GMR_FS_CRC_ERR) 2229 dev->stats.rx_crc_errors++; 2230 2231 goto resubmit; 2232} 2233 2234/* Transmit complete */ 2235static inline void sky2_tx_done(struct net_device *dev, u16 last) 2236{ 2237 struct sky2_port *sky2 = netdev_priv(dev); 2238 2239 if (netif_running(dev)) { 2240 netif_tx_lock(dev); 2241 sky2_tx_complete(sky2, last); 2242 netif_tx_unlock(dev); 2243 } 2244} 2245 2246/* Process status response ring */ 2247static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) 2248{ 2249 int work_done = 0; 2250 unsigned rx[2] = { 0, 0 }; 2251 2252 rmb(); 2253 do { 2254 struct sky2_port *sky2; 2255 struct sky2_status_le *le = hw->st_le + hw->st_idx; 2256 unsigned port; 2257 struct net_device *dev; 2258 struct sk_buff *skb; 2259 u32 status; 2260 u16 length; 2261 u8 opcode = le->opcode; 2262 2263 if (!(opcode & HW_OWNER)) 2264 break; 2265 2266 hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE); 2267 2268 port = le->css & CSS_LINK_BIT; 2269 dev = hw->dev[port]; 2270 sky2 = netdev_priv(dev); 2271 length = le16_to_cpu(le->length); 2272 status = le32_to_cpu(le->status); 2273 2274 le->opcode = 0; 2275 switch (opcode & ~HW_OWNER) { 2276 case OP_RXSTAT: 2277 ++rx[port]; 2278 skb = sky2_receive(dev, length, status); 2279 if (unlikely(!skb)) { 2280 dev->stats.rx_dropped++; 2281 break; 2282 } 2283 2284 /* This chip reports checksum status differently */ 2285 if (hw->flags & SKY2_HW_NEW_LE) { 2286 if (sky2->rx_csum && 2287 (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) && 2288 (le->css & CSS_TCPUDPCSOK)) 2289 skb->ip_summed = CHECKSUM_UNNECESSARY; 2290 else 2291 skb->ip_summed = CHECKSUM_NONE; 2292 } 2293 2294 skb->protocol = eth_type_trans(skb, dev); 2295 dev->stats.rx_packets++; 2296 dev->stats.rx_bytes += skb->len; 2297 dev->last_rx = jiffies; 2298 2299#ifdef SKY2_VLAN_TAG_USED 2300 if (sky2->vlgrp && (status & GMR_FS_VLAN)) { 2301 vlan_hwaccel_receive_skb(skb, 2302 sky2->vlgrp, 2303 be16_to_cpu(sky2->rx_tag)); 2304 } else 2305#endif 2306 netif_receive_skb(skb); 2307 2308 /* Stop after net poll weight */ 2309 if (++work_done >= to_do) 2310 goto exit_loop; 2311 break; 2312 2313#ifdef SKY2_VLAN_TAG_USED 2314 case OP_RXVLAN: 2315 sky2->rx_tag = length; 2316 break; 2317 2318 case OP_RXCHKSVLAN: 2319 sky2->rx_tag = length; 2320 /* fall through */ 2321#endif 2322 case OP_RXCHKS: 2323 if (!sky2->rx_csum) 2324 break; 2325 2326 /* If this happens then driver assuming wrong format */ 2327 if (unlikely(hw->flags & SKY2_HW_NEW_LE)) { 2328 if (net_ratelimit()) 2329 printk(KERN_NOTICE "%s: unexpected" 2330 " checksum status\n", 2331 dev->name); 2332 break; 2333 } 2334 2335 /* Both checksum counters are programmed to start at 2336 * the same offset, so unless there is a problem they 2337 * should match. This failure is an early indication that 2338 * hardware receive checksumming won't work. 2339 */ 2340 if (likely(status >> 16 == (status & 0xffff))) { 2341 skb = sky2->rx_ring[sky2->rx_next].skb; 2342 skb->ip_summed = CHECKSUM_COMPLETE; 2343 skb->csum = status & 0xffff; 2344 } else { 2345 printk(KERN_NOTICE PFX "%s: hardware receive " 2346 "checksum problem (status = %#x)\n", 2347 dev->name, status); 2348 sky2->rx_csum = 0; 2349 sky2_write32(sky2->hw, 2350 Q_ADDR(rxqaddr[port], Q_CSR), 2351 BMU_DIS_RX_CHKSUM); 2352 } 2353 break; 2354 2355 case OP_TXINDEXLE: 2356 /* TX index reports status for both ports */ 2357 BUILD_BUG_ON(TX_RING_SIZE > 0x1000); 2358 sky2_tx_done(hw->dev[0], status & 0xfff); 2359 if (hw->dev[1]) 2360 sky2_tx_done(hw->dev[1], 2361 ((status >> 24) & 0xff) 2362 | (u16)(length & 0xf) << 8); 2363 break; 2364 2365 default: 2366 if (net_ratelimit()) 2367 printk(KERN_WARNING PFX 2368 "unknown status opcode 0x%x\n", opcode); 2369 } 2370 } while (hw->st_idx != idx); 2371 2372 /* Fully processed status ring so clear irq */ 2373 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 2374 2375exit_loop: 2376 if (rx[0]) 2377 sky2_rx_update(netdev_priv(hw->dev[0]), Q_R1); 2378 2379 if (rx[1]) 2380 sky2_rx_update(netdev_priv(hw->dev[1]), Q_R2); 2381 2382 return work_done; 2383} 2384 2385static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) 2386{ 2387 struct net_device *dev = hw->dev[port]; 2388 2389 if (net_ratelimit()) 2390 printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n", 2391 dev->name, status); 2392 2393 if (status & Y2_IS_PAR_RD1) { 2394 if (net_ratelimit()) 2395 printk(KERN_ERR PFX "%s: ram data read parity error\n", 2396 dev->name); 2397 /* Clear IRQ */ 2398 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR); 2399 } 2400 2401 if (status & Y2_IS_PAR_WR1) { 2402 if (net_ratelimit()) 2403 printk(KERN_ERR PFX "%s: ram data write parity error\n", 2404 dev->name); 2405 2406 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR); 2407 } 2408 2409 if (status & Y2_IS_PAR_MAC1) { 2410 if (net_ratelimit()) 2411 printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name); 2412 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE); 2413 } 2414 2415 if (status & Y2_IS_PAR_RX1) { 2416 if (net_ratelimit()) 2417 printk(KERN_ERR PFX "%s: RX parity error\n", dev->name); 2418 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR); 2419 } 2420 2421 if (status & Y2_IS_TCP_TXA1) { 2422 if (net_ratelimit()) 2423 printk(KERN_ERR PFX "%s: TCP segmentation error\n", 2424 dev->name); 2425 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP); 2426 } 2427} 2428 2429static void sky2_hw_intr(struct sky2_hw *hw) 2430{ 2431 struct pci_dev *pdev = hw->pdev; 2432 u32 status = sky2_read32(hw, B0_HWE_ISRC); 2433 u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); 2434 2435 status &= hwmsk; 2436 2437 if (status & Y2_IS_TIST_OV) 2438 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 2439 2440 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { 2441 u16 pci_err; 2442 2443 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2444 pci_err = sky2_pci_read16(hw, PCI_STATUS); 2445 if (net_ratelimit()) 2446 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", 2447 pci_err); 2448 2449 sky2_pci_write16(hw, PCI_STATUS, 2450 pci_err | PCI_STATUS_ERROR_BITS); 2451 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2452 } 2453 2454 if (status & Y2_IS_PCI_EXP) { 2455 /* PCI-Express uncorrectable Error occurred */ 2456 u32 err; 2457 2458 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2459 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2460 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, 2461 0xfffffffful); 2462 if (net_ratelimit()) 2463 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); 2464 2465 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2466 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2467 } 2468 2469 if (status & Y2_HWE_L1_MASK) 2470 sky2_hw_error(hw, 0, status); 2471 status >>= 8; 2472 if (status & Y2_HWE_L1_MASK) 2473 sky2_hw_error(hw, 1, status); 2474} 2475 2476static void sky2_mac_intr(struct sky2_hw *hw, unsigned port) 2477{ 2478 struct net_device *dev = hw->dev[port]; 2479 struct sky2_port *sky2 = netdev_priv(dev); 2480 u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); 2481 2482 if (netif_msg_intr(sky2)) 2483 printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n", 2484 dev->name, status); 2485 2486 if (status & GM_IS_RX_CO_OV) 2487 gma_read16(hw, port, GM_RX_IRQ_SRC); 2488 2489 if (status & GM_IS_TX_CO_OV) 2490 gma_read16(hw, port, GM_TX_IRQ_SRC); 2491 2492 if (status & GM_IS_RX_FF_OR) { 2493 ++dev->stats.rx_fifo_errors; 2494 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); 2495 } 2496 2497 if (status & GM_IS_TX_FF_UR) { 2498 ++dev->stats.tx_fifo_errors; 2499 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); 2500 } 2501} 2502 2503/* This should never happen it is a bug. */ 2504static void sky2_le_error(struct sky2_hw *hw, unsigned port, 2505 u16 q, unsigned ring_size) 2506{ 2507 struct net_device *dev = hw->dev[port]; 2508 struct sky2_port *sky2 = netdev_priv(dev); 2509 unsigned idx; 2510 const u64 *le = (q == Q_R1 || q == Q_R2) 2511 ? (u64 *) sky2->rx_le : (u64 *) sky2->tx_le; 2512 2513 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); 2514 printk(KERN_ERR PFX "%s: descriptor error q=%#x get=%u [%llx] put=%u\n", 2515 dev->name, (unsigned) q, idx, (unsigned long long) le[idx], 2516 (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX))); 2517 2518 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK); 2519} 2520 2521static int sky2_rx_hung(struct net_device *dev) 2522{ 2523 struct sky2_port *sky2 = netdev_priv(dev); 2524 struct sky2_hw *hw = sky2->hw; 2525 unsigned port = sky2->port; 2526 unsigned rxq = rxqaddr[port]; 2527 u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP)); 2528 u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV)); 2529 u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP)); 2530 u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL)); 2531 2532 /* If idle and MAC or PCI is stuck */ 2533 if (sky2->check.last == dev->last_rx && 2534 ((mac_rp == sky2->check.mac_rp && 2535 mac_lev != 0 && mac_lev >= sky2->check.mac_lev) || 2536 /* Check if the PCI RX hang */ 2537 (fifo_rp == sky2->check.fifo_rp && 2538 fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) { 2539 printk(KERN_DEBUG PFX "%s: hung mac %d:%d fifo %d (%d:%d)\n", 2540 dev->name, mac_lev, mac_rp, fifo_lev, fifo_rp, 2541 sky2_read8(hw, Q_ADDR(rxq, Q_WP))); 2542 return 1; 2543 } else { 2544 sky2->check.last = dev->last_rx; 2545 sky2->check.mac_rp = mac_rp; 2546 sky2->check.mac_lev = mac_lev; 2547 sky2->check.fifo_rp = fifo_rp; 2548 sky2->check.fifo_lev = fifo_lev; 2549 return 0; 2550 } 2551} 2552 2553static void sky2_watchdog(unsigned long arg) 2554{ 2555 struct sky2_hw *hw = (struct sky2_hw *) arg; 2556 2557 /* Check for lost IRQ once a second */ 2558 if (sky2_read32(hw, B0_ISRC)) { 2559 napi_schedule(&hw->napi); 2560 } else { 2561 int i, active = 0; 2562 2563 for (i = 0; i < hw->ports; i++) { 2564 struct net_device *dev = hw->dev[i]; 2565 if (!netif_running(dev)) 2566 continue; 2567 ++active; 2568 2569 /* For chips with Rx FIFO, check if stuck */ 2570 if ((hw->flags & SKY2_HW_RAM_BUFFER) && 2571 sky2_rx_hung(dev)) { 2572 pr_info(PFX "%s: receiver hang detected\n", 2573 dev->name); 2574 schedule_work(&hw->restart_work); 2575 return; 2576 } 2577 } 2578 2579 if (active == 0) 2580 return; 2581 } 2582 2583 mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ)); 2584} 2585 2586/* Hardware/software error handling */ 2587static void sky2_err_intr(struct sky2_hw *hw, u32 status) 2588{ 2589 if (net_ratelimit()) 2590 dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status); 2591 2592 if (status & Y2_IS_HW_ERR) 2593 sky2_hw_intr(hw); 2594 2595 if (status & Y2_IS_IRQ_MAC1) 2596 sky2_mac_intr(hw, 0); 2597 2598 if (status & Y2_IS_IRQ_MAC2) 2599 sky2_mac_intr(hw, 1); 2600 2601 if (status & Y2_IS_CHK_RX1) 2602 sky2_le_error(hw, 0, Q_R1, RX_LE_SIZE); 2603 2604 if (status & Y2_IS_CHK_RX2) 2605 sky2_le_error(hw, 1, Q_R2, RX_LE_SIZE); 2606 2607 if (status & Y2_IS_CHK_TXA1) 2608 sky2_le_error(hw, 0, Q_XA1, TX_RING_SIZE); 2609 2610 if (status & Y2_IS_CHK_TXA2) 2611 sky2_le_error(hw, 1, Q_XA2, TX_RING_SIZE); 2612} 2613 2614static int sky2_poll(struct napi_struct *napi, int work_limit) 2615{ 2616 struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi); 2617 u32 status = sky2_read32(hw, B0_Y2_SP_EISR); 2618 int work_done = 0; 2619 u16 idx; 2620 2621 if (unlikely(status & Y2_IS_ERROR)) 2622 sky2_err_intr(hw, status); 2623 2624 if (status & Y2_IS_IRQ_PHY1) 2625 sky2_phy_intr(hw, 0); 2626 2627 if (status & Y2_IS_IRQ_PHY2) 2628 sky2_phy_intr(hw, 1); 2629 2630 while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) { 2631 work_done += sky2_status_intr(hw, work_limit - work_done, idx); 2632 2633 if (work_done >= work_limit) 2634 goto done; 2635 } 2636 2637 /* Bug/Errata workaround? 2638 * Need to kick the TX irq moderation timer. 2639 */ 2640 if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) { 2641 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); 2642 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); 2643 } 2644 napi_complete(napi); 2645 sky2_read32(hw, B0_Y2_SP_LISR); 2646done: 2647 2648 return work_done; 2649} 2650 2651static irqreturn_t sky2_intr(int irq, void *dev_id) 2652{ 2653 struct sky2_hw *hw = dev_id; 2654 u32 status; 2655 2656 /* Reading this mask interrupts as side effect */ 2657 status = sky2_read32(hw, B0_Y2_SP_ISRC2); 2658 if (status == 0 || status == ~0) 2659 return IRQ_NONE; 2660 2661 prefetch(&hw->st_le[hw->st_idx]); 2662 2663 napi_schedule(&hw->napi); 2664 2665 return IRQ_HANDLED; 2666} 2667 2668#ifdef CONFIG_NET_POLL_CONTROLLER 2669static void sky2_netpoll(struct net_device *dev) 2670{ 2671 struct sky2_port *sky2 = netdev_priv(dev); 2672 2673 napi_schedule(&sky2->hw->napi); 2674} 2675#endif 2676 2677/* Chip internal frequency for clock calculations */ 2678static u32 sky2_mhz(const struct sky2_hw *hw) 2679{ 2680 switch (hw->chip_id) { 2681 case CHIP_ID_YUKON_EC: 2682 case CHIP_ID_YUKON_EC_U: 2683 case CHIP_ID_YUKON_EX: 2684 case CHIP_ID_YUKON_SUPR: 2685 return 125; 2686 2687 case CHIP_ID_YUKON_FE: 2688 return 100; 2689 2690 case CHIP_ID_YUKON_FE_P: 2691 return 50; 2692 2693 case CHIP_ID_YUKON_XL: 2694 return 156; 2695 2696 default: 2697 BUG(); 2698 } 2699} 2700 2701static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us) 2702{ 2703 return sky2_mhz(hw) * us; 2704} 2705 2706static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk) 2707{ 2708 return clk / sky2_mhz(hw); 2709} 2710 2711 2712static int __devinit sky2_init(struct sky2_hw *hw) 2713{ 2714 u8 t8; 2715 2716 /* Enable all clocks and check for bad PCI access */ 2717 sky2_pci_write32(hw, PCI_DEV_REG3, 0); 2718 2719 sky2_write8(hw, B0_CTST, CS_RST_CLR); 2720 2721 hw->chip_id = sky2_read8(hw, B2_CHIP_ID); 2722 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; 2723 2724 switch(hw->chip_id) { 2725 case CHIP_ID_YUKON_XL: 2726 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY; 2727 break; 2728 2729 case CHIP_ID_YUKON_EC_U: 2730 hw->flags = SKY2_HW_GIGABIT 2731 | SKY2_HW_NEWER_PHY 2732 | SKY2_HW_ADV_POWER_CTL; 2733 break; 2734 2735 case CHIP_ID_YUKON_EX: 2736 hw->flags = SKY2_HW_GIGABIT 2737 | SKY2_HW_NEWER_PHY 2738 | SKY2_HW_NEW_LE 2739 | SKY2_HW_ADV_POWER_CTL; 2740 2741 /* New transmit checksum */ 2742 if (hw->chip_rev != CHIP_REV_YU_EX_B0) 2743 hw->flags |= SKY2_HW_AUTO_TX_SUM; 2744 break; 2745 2746 case CHIP_ID_YUKON_EC: 2747 /* This rev is really old, and requires untested workarounds */ 2748 if (hw->chip_rev == CHIP_REV_YU_EC_A1) { 2749 dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); 2750 return -EOPNOTSUPP; 2751 } 2752 hw->flags = SKY2_HW_GIGABIT; 2753 break; 2754 2755 case CHIP_ID_YUKON_FE: 2756 break; 2757 2758 case CHIP_ID_YUKON_FE_P: 2759 hw->flags = SKY2_HW_NEWER_PHY 2760 | SKY2_HW_NEW_LE 2761 | SKY2_HW_AUTO_TX_SUM 2762 | SKY2_HW_ADV_POWER_CTL; 2763 break; 2764 2765 case CHIP_ID_YUKON_SUPR: 2766 hw->flags = SKY2_HW_GIGABIT 2767 | SKY2_HW_NEWER_PHY 2768 | SKY2_HW_NEW_LE 2769 | SKY2_HW_AUTO_TX_SUM 2770 | SKY2_HW_ADV_POWER_CTL; 2771 break; 2772 2773 default: 2774 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", 2775 hw->chip_id); 2776 return -EOPNOTSUPP; 2777 } 2778 2779 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); 2780 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') 2781 hw->flags |= SKY2_HW_FIBRE_PHY; 2782 2783 2784 hw->ports = 1; 2785 t8 = sky2_read8(hw, B2_Y2_HW_RES); 2786 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { 2787 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 2788 ++hw->ports; 2789 } 2790 2791 return 0; 2792} 2793 2794static void sky2_reset(struct sky2_hw *hw) 2795{ 2796 struct pci_dev *pdev = hw->pdev; 2797 u16 status; 2798 int i, cap; 2799 u32 hwe_mask = Y2_HWE_ALL_MASK; 2800 2801 /* disable ASF */ 2802 if (hw->chip_id == CHIP_ID_YUKON_EX) { 2803 status = sky2_read16(hw, HCU_CCSR); 2804 status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE | 2805 HCU_CCSR_UC_STATE_MSK); 2806 sky2_write16(hw, HCU_CCSR, status); 2807 } else 2808 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 2809 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); 2810 2811 /* do a SW reset */ 2812 sky2_write8(hw, B0_CTST, CS_RST_SET); 2813 sky2_write8(hw, B0_CTST, CS_RST_CLR); 2814 2815 /* allow writes to PCI config */ 2816 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2817 2818 /* clear PCI errors, if any */ 2819 status = sky2_pci_read16(hw, PCI_STATUS); 2820 status |= PCI_STATUS_ERROR_BITS; 2821 sky2_pci_write16(hw, PCI_STATUS, status); 2822 2823 sky2_write8(hw, B0_CTST, CS_MRST_CLR); 2824 2825 cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); 2826 if (cap) { 2827 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, 2828 0xfffffffful); 2829 2830 /* If error bit is stuck on ignore it */ 2831 if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP) 2832 dev_info(&pdev->dev, "ignoring stuck error report bit\n"); 2833 else 2834 hwe_mask |= Y2_IS_PCI_EXP; 2835 } 2836 2837 sky2_power_on(hw); 2838 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2839 2840 for (i = 0; i < hw->ports; i++) { 2841 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 2842 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); 2843 2844 if (hw->chip_id == CHIP_ID_YUKON_EX || 2845 hw->chip_id == CHIP_ID_YUKON_SUPR) 2846 sky2_write16(hw, SK_REG(i, GMAC_CTRL), 2847 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON 2848 | GMC_BYP_RETR_ON); 2849 } 2850 2851 /* Clear I2C IRQ noise */ 2852 sky2_write32(hw, B2_I2C_IRQ, 1); 2853 2854 /* turn off hardware timer (unused) */ 2855 sky2_write8(hw, B2_TI_CTRL, TIM_STOP); 2856 sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); 2857 2858 sky2_write8(hw, B0_Y2LED, LED_STAT_ON); 2859 2860 /* Turn off descriptor polling */ 2861 sky2_write32(hw, B28_DPT_CTRL, DPT_STOP); 2862 2863 /* Turn off receive timestamp */ 2864 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP); 2865 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 2866 2867 /* enable the Tx Arbiters */ 2868 for (i = 0; i < hw->ports; i++) 2869 sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); 2870 2871 /* Initialize ram interface */ 2872 for (i = 0; i < hw->ports; i++) { 2873 sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 2874 2875 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53); 2876 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53); 2877 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53); 2878 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53); 2879 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53); 2880 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53); 2881 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53); 2882 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53); 2883 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53); 2884 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53); 2885 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53); 2886 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53); 2887 } 2888 2889 sky2_write32(hw, B0_HWE_IMSK, hwe_mask); 2890 2891 for (i = 0; i < hw->ports; i++) 2892 sky2_gmac_reset(hw, i); 2893 2894 memset(hw->st_le, 0, STATUS_LE_BYTES); 2895 hw->st_idx = 0; 2896 2897 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET); 2898 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR); 2899 2900 sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma); 2901 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32); 2902 2903 /* Set the list last index */ 2904 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1); 2905 2906 sky2_write16(hw, STAT_TX_IDX_TH, 10); 2907 sky2_write8(hw, STAT_FIFO_WM, 16); 2908 2909 /* set Status-FIFO ISR watermark */ 2910 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0) 2911 sky2_write8(hw, STAT_FIFO_ISR_WM, 4); 2912 else 2913 sky2_write8(hw, STAT_FIFO_ISR_WM, 16); 2914 2915 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); 2916 sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20)); 2917 sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100)); 2918 2919 /* enable status unit */ 2920 sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON); 2921 2922 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); 2923 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); 2924 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); 2925} 2926 2927static void sky2_restart(struct work_struct *work) 2928{ 2929 struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work); 2930 struct net_device *dev; 2931 int i, err; 2932 2933 rtnl_lock(); 2934 for (i = 0; i < hw->ports; i++) { 2935 dev = hw->dev[i]; 2936 if (netif_running(dev)) 2937 sky2_down(dev); 2938 } 2939 2940 napi_disable(&hw->napi); 2941 sky2_write32(hw, B0_IMSK, 0); 2942 sky2_reset(hw); 2943 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 2944 napi_enable(&hw->napi); 2945 2946 for (i = 0; i < hw->ports; i++) { 2947 dev = hw->dev[i]; 2948 if (netif_running(dev)) { 2949 err = sky2_up(dev); 2950 if (err) { 2951 printk(KERN_INFO PFX "%s: could not restart %d\n", 2952 dev->name, err); 2953 dev_close(dev); 2954 } 2955 } 2956 } 2957 2958 rtnl_unlock(); 2959} 2960 2961static inline u8 sky2_wol_supported(const struct sky2_hw *hw) 2962{ 2963 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; 2964} 2965 2966static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2967{ 2968 const struct sky2_port *sky2 = netdev_priv(dev); 2969 2970 wol->supported = sky2_wol_supported(sky2->hw); 2971 wol->wolopts = sky2->wol; 2972} 2973 2974static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2975{ 2976 struct sky2_port *sky2 = netdev_priv(dev); 2977 struct sky2_hw *hw = sky2->hw; 2978 2979 if (wol->wolopts & ~sky2_wol_supported(sky2->hw)) 2980 return -EOPNOTSUPP; 2981 2982 sky2->wol = wol->wolopts; 2983 2984 if (hw->chip_id == CHIP_ID_YUKON_EC_U || 2985 hw->chip_id == CHIP_ID_YUKON_EX || 2986 hw->chip_id == CHIP_ID_YUKON_FE_P) 2987 sky2_write32(hw, B0_CTST, sky2->wol 2988 ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF); 2989 2990 if (!netif_running(dev)) 2991 sky2_wol_init(sky2); 2992 return 0; 2993} 2994 2995static u32 sky2_supported_modes(const struct sky2_hw *hw) 2996{ 2997 if (sky2_is_copper(hw)) { 2998 u32 modes = SUPPORTED_10baseT_Half 2999 | SUPPORTED_10baseT_Full 3000 | SUPPORTED_100baseT_Half 3001 | SUPPORTED_100baseT_Full 3002 | SUPPORTED_Autoneg | SUPPORTED_TP; 3003 3004 if (hw->flags & SKY2_HW_GIGABIT) 3005 modes |= SUPPORTED_1000baseT_Half 3006 | SUPPORTED_1000baseT_Full; 3007 return modes; 3008 } else 3009 return SUPPORTED_1000baseT_Half 3010 | SUPPORTED_1000baseT_Full 3011 | SUPPORTED_Autoneg 3012 | SUPPORTED_FIBRE; 3013} 3014 3015static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 3016{ 3017 struct sky2_port *sky2 = netdev_priv(dev); 3018 struct sky2_hw *hw = sky2->hw; 3019 3020 ecmd->transceiver = XCVR_INTERNAL; 3021 ecmd->supported = sky2_supported_modes(hw); 3022 ecmd->phy_address = PHY_ADDR_MARV; 3023 if (sky2_is_copper(hw)) { 3024 ecmd->port = PORT_TP; 3025 ecmd->speed = sky2->speed; 3026 } else { 3027 ecmd->speed = SPEED_1000; 3028 ecmd->port = PORT_FIBRE; 3029 } 3030 3031 ecmd->advertising = sky2->advertising; 3032 ecmd->autoneg = sky2->autoneg; 3033 ecmd->duplex = sky2->duplex; 3034 return 0; 3035} 3036 3037static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 3038{ 3039 struct sky2_port *sky2 = netdev_priv(dev); 3040 const struct sky2_hw *hw = sky2->hw; 3041 u32 supported = sky2_supported_modes(hw); 3042 3043 if (ecmd->autoneg == AUTONEG_ENABLE) { 3044 ecmd->advertising = supported; 3045 sky2->duplex = -1; 3046 sky2->speed = -1; 3047 } else { 3048 u32 setting; 3049 3050 switch (ecmd->speed) { 3051 case SPEED_1000: 3052 if (ecmd->duplex == DUPLEX_FULL) 3053 setting = SUPPORTED_1000baseT_Full; 3054 else if (ecmd->duplex == DUPLEX_HALF) 3055 setting = SUPPORTED_1000baseT_Half; 3056 else 3057 return -EINVAL; 3058 break; 3059 case SPEED_100: 3060 if (ecmd->duplex == DUPLEX_FULL) 3061 setting = SUPPORTED_100baseT_Full; 3062 else if (ecmd->duplex == DUPLEX_HALF) 3063 setting = SUPPORTED_100baseT_Half; 3064 else 3065 return -EINVAL; 3066 break; 3067 3068 case SPEED_10: 3069 if (ecmd->duplex == DUPLEX_FULL) 3070 setting = SUPPORTED_10baseT_Full; 3071 else if (ecmd->duplex == DUPLEX_HALF) 3072 setting = SUPPORTED_10baseT_Half; 3073 else 3074 return -EINVAL; 3075 break; 3076 default: 3077 return -EINVAL; 3078 } 3079 3080 if ((setting & supported) == 0) 3081 return -EINVAL; 3082 3083 sky2->speed = ecmd->speed; 3084 sky2->duplex = ecmd->duplex; 3085 } 3086 3087 sky2->autoneg = ecmd->autoneg; 3088 sky2->advertising = ecmd->advertising; 3089 3090 if (netif_running(dev)) { 3091 sky2_phy_reinit(sky2); 3092 sky2_set_multicast(dev); 3093 } 3094 3095 return 0; 3096} 3097 3098static void sky2_get_drvinfo(struct net_device *dev, 3099 struct ethtool_drvinfo *info) 3100{ 3101 struct sky2_port *sky2 = netdev_priv(dev); 3102 3103 strcpy(info->driver, DRV_NAME); 3104 strcpy(info->version, DRV_VERSION); 3105 strcpy(info->fw_version, "N/A"); 3106 strcpy(info->bus_info, pci_name(sky2->hw->pdev)); 3107} 3108 3109static const struct sky2_stat { 3110 char name[ETH_GSTRING_LEN]; 3111 u16 offset; 3112} sky2_stats[] = { 3113 { "tx_bytes", GM_TXO_OK_HI }, 3114 { "rx_bytes", GM_RXO_OK_HI }, 3115 { "tx_broadcast", GM_TXF_BC_OK }, 3116 { "rx_broadcast", GM_RXF_BC_OK }, 3117 { "tx_multicast", GM_TXF_MC_OK }, 3118 { "rx_multicast", GM_RXF_MC_OK }, 3119 { "tx_unicast", GM_TXF_UC_OK }, 3120 { "rx_unicast", GM_RXF_UC_OK }, 3121 { "tx_mac_pause", GM_TXF_MPAUSE }, 3122 { "rx_mac_pause", GM_RXF_MPAUSE }, 3123 { "collisions", GM_TXF_COL }, 3124 { "late_collision",GM_TXF_LAT_COL }, 3125 { "aborted", GM_TXF_ABO_COL }, 3126 { "single_collisions", GM_TXF_SNG_COL }, 3127 { "multi_collisions", GM_TXF_MUL_COL }, 3128 3129 { "rx_short", GM_RXF_SHT }, 3130 { "rx_runt", GM_RXE_FRAG }, 3131 { "rx_64_byte_packets", GM_RXF_64B }, 3132 { "rx_65_to_127_byte_packets", GM_RXF_127B }, 3133 { "rx_128_to_255_byte_packets", GM_RXF_255B }, 3134 { "rx_256_to_511_byte_packets", GM_RXF_511B }, 3135 { "rx_512_to_1023_byte_packets", GM_RXF_1023B }, 3136 { "rx_1024_to_1518_byte_packets", GM_RXF_1518B }, 3137 { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ }, 3138 { "rx_too_long", GM_RXF_LNG_ERR }, 3139 { "rx_fifo_overflow", GM_RXE_FIFO_OV }, 3140 { "rx_jabber", GM_RXF_JAB_PKT }, 3141 { "rx_fcs_error", GM_RXF_FCS_ERR }, 3142 3143 { "tx_64_byte_packets", GM_TXF_64B }, 3144 { "tx_65_to_127_byte_packets", GM_TXF_127B }, 3145 { "tx_128_to_255_byte_packets", GM_TXF_255B }, 3146 { "tx_256_to_511_byte_packets", GM_TXF_511B }, 3147 { "tx_512_to_1023_byte_packets", GM_TXF_1023B }, 3148 { "tx_1024_to_1518_byte_packets", GM_TXF_1518B }, 3149 { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ }, 3150 { "tx_fifo_underrun", GM_TXE_FIFO_UR }, 3151}; 3152 3153static u32 sky2_get_rx_csum(struct net_device *dev) 3154{ 3155 struct sky2_port *sky2 = netdev_priv(dev); 3156 3157 return sky2->rx_csum; 3158} 3159 3160static int sky2_set_rx_csum(struct net_device *dev, u32 data) 3161{ 3162 struct sky2_port *sky2 = netdev_priv(dev); 3163 3164 sky2->rx_csum = data; 3165 3166 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), 3167 data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 3168 3169 return 0; 3170} 3171 3172static u32 sky2_get_msglevel(struct net_device *netdev) 3173{ 3174 struct sky2_port *sky2 = netdev_priv(netdev); 3175 return sky2->msg_enable; 3176} 3177 3178static int sky2_nway_reset(struct net_device *dev) 3179{ 3180 struct sky2_port *sky2 = netdev_priv(dev); 3181 3182 if (!netif_running(dev) || sky2->autoneg != AUTONEG_ENABLE) 3183 return -EINVAL; 3184 3185 sky2_phy_reinit(sky2); 3186 sky2_set_multicast(dev); 3187 3188 return 0; 3189} 3190 3191static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count) 3192{ 3193 struct sky2_hw *hw = sky2->hw; 3194 unsigned port = sky2->port; 3195 int i; 3196 3197 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 3198 | (u64) gma_read32(hw, port, GM_TXO_OK_LO); 3199 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 3200 | (u64) gma_read32(hw, port, GM_RXO_OK_LO); 3201 3202 for (i = 2; i < count; i++) 3203 data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset); 3204} 3205 3206static void sky2_set_msglevel(struct net_device *netdev, u32 value) 3207{ 3208 struct sky2_port *sky2 = netdev_priv(netdev); 3209 sky2->msg_enable = value; 3210} 3211 3212static int sky2_get_sset_count(struct net_device *dev, int sset) 3213{ 3214 switch (sset) { 3215 case ETH_SS_STATS: 3216 return ARRAY_SIZE(sky2_stats); 3217 default: 3218 return -EOPNOTSUPP; 3219 } 3220} 3221 3222static void sky2_get_ethtool_stats(struct net_device *dev, 3223 struct ethtool_stats *stats, u64 * data) 3224{ 3225 struct sky2_port *sky2 = netdev_priv(dev); 3226 3227 sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats)); 3228} 3229 3230static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data) 3231{ 3232 int i; 3233 3234 switch (stringset) { 3235 case ETH_SS_STATS: 3236 for (i = 0; i < ARRAY_SIZE(sky2_stats); i++) 3237 memcpy(data + i * ETH_GSTRING_LEN, 3238 sky2_stats[i].name, ETH_GSTRING_LEN); 3239 break; 3240 } 3241} 3242 3243static int sky2_set_mac_address(struct net_device *dev, void *p) 3244{ 3245 struct sky2_port *sky2 = netdev_priv(dev); 3246 struct sky2_hw *hw = sky2->hw; 3247 unsigned port = sky2->port; 3248 const struct sockaddr *addr = p; 3249 3250 if (!is_valid_ether_addr(addr->sa_data)) 3251 return -EADDRNOTAVAIL; 3252 3253 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 3254 memcpy_toio(hw->regs + B2_MAC_1 + port * 8, 3255 dev->dev_addr, ETH_ALEN); 3256 memcpy_toio(hw->regs + B2_MAC_2 + port * 8, 3257 dev->dev_addr, ETH_ALEN); 3258 3259 /* virtual address for data */ 3260 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); 3261 3262 /* physical address: used for pause frames */ 3263 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); 3264 3265 return 0; 3266} 3267 3268static void inline sky2_add_filter(u8 filter[8], const u8 *addr) 3269{ 3270 u32 bit; 3271 3272 bit = ether_crc(ETH_ALEN, addr) & 63; 3273 filter[bit >> 3] |= 1 << (bit & 7); 3274} 3275 3276static void sky2_set_multicast(struct net_device *dev) 3277{ 3278 struct sky2_port *sky2 = netdev_priv(dev); 3279 struct sky2_hw *hw = sky2->hw; 3280 unsigned port = sky2->port; 3281 struct dev_mc_list *list = dev->mc_list; 3282 u16 reg; 3283 u8 filter[8]; 3284 int rx_pause; 3285 static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; 3286 3287 rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH); 3288 memset(filter, 0, sizeof(filter)); 3289 3290 reg = gma_read16(hw, port, GM_RX_CTRL); 3291 reg |= GM_RXCR_UCF_ENA; 3292 3293 if (dev->flags & IFF_PROMISC) /* promiscuous */ 3294 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 3295 else if (dev->flags & IFF_ALLMULTI) 3296 memset(filter, 0xff, sizeof(filter)); 3297 else if (dev->mc_count == 0 && !rx_pause) 3298 reg &= ~GM_RXCR_MCF_ENA; 3299 else { 3300 int i; 3301 reg |= GM_RXCR_MCF_ENA; 3302 3303 if (rx_pause) 3304 sky2_add_filter(filter, pause_mc_addr); 3305 3306 for (i = 0; list && i < dev->mc_count; i++, list = list->next) 3307 sky2_add_filter(filter, list->dmi_addr); 3308 } 3309 3310 gma_write16(hw, port, GM_MC_ADDR_H1, 3311 (u16) filter[0] | ((u16) filter[1] << 8)); 3312 gma_write16(hw, port, GM_MC_ADDR_H2, 3313 (u16) filter[2] | ((u16) filter[3] << 8)); 3314 gma_write16(hw, port, GM_MC_ADDR_H3, 3315 (u16) filter[4] | ((u16) filter[5] << 8)); 3316 gma_write16(hw, port, GM_MC_ADDR_H4, 3317 (u16) filter[6] | ((u16) filter[7] << 8)); 3318 3319 gma_write16(hw, port, GM_RX_CTRL, reg); 3320} 3321 3322/* Can have one global because blinking is controlled by 3323 * ethtool and that is always under RTNL mutex 3324 */ 3325static void sky2_led(struct sky2_hw *hw, unsigned port, int on) 3326{ 3327 u16 pg; 3328 3329 switch (hw->chip_id) { 3330 case CHIP_ID_YUKON_XL: 3331 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 3332 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 3333 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, 3334 on ? (PHY_M_LEDC_LOS_CTRL(1) | 3335 PHY_M_LEDC_INIT_CTRL(7) | 3336 PHY_M_LEDC_STA1_CTRL(7) | 3337 PHY_M_LEDC_STA0_CTRL(7)) 3338 : 0); 3339 3340 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 3341 break; 3342 3343 default: 3344 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); 3345 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 3346 on ? PHY_M_LED_ALL : 0); 3347 } 3348} 3349 3350/* blink LED's for finding board */ 3351static int sky2_phys_id(struct net_device *dev, u32 data) 3352{ 3353 struct sky2_port *sky2 = netdev_priv(dev); 3354 struct sky2_hw *hw = sky2->hw; 3355 unsigned port = sky2->port; 3356 u16 ledctrl, ledover = 0; 3357 long ms; 3358 int interrupted; 3359 int onoff = 1; 3360 3361 if (!data || data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ)) 3362 ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT); 3363 else 3364 ms = data * 1000; 3365 3366 /* save initial values */ 3367 spin_lock_bh(&sky2->phy_lock); 3368 if (hw->chip_id == CHIP_ID_YUKON_XL) { 3369 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 3370 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 3371 ledctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); 3372 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 3373 } else { 3374 ledctrl = gm_phy_read(hw, port, PHY_MARV_LED_CTRL); 3375 ledover = gm_phy_read(hw, port, PHY_MARV_LED_OVER); 3376 } 3377 3378 interrupted = 0; 3379 while (!interrupted && ms > 0) { 3380 sky2_led(hw, port, onoff); 3381 onoff = !onoff; 3382 3383 spin_unlock_bh(&sky2->phy_lock); 3384 interrupted = msleep_interruptible(250); 3385 spin_lock_bh(&sky2->phy_lock); 3386 3387 ms -= 250; 3388 } 3389 3390 /* resume regularly scheduled programming */ 3391 if (hw->chip_id == CHIP_ID_YUKON_XL) { 3392 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 3393 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 3394 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ledctrl); 3395 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 3396 } else { 3397 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 3398 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); 3399 } 3400 spin_unlock_bh(&sky2->phy_lock); 3401 3402 return 0; 3403} 3404 3405static void sky2_get_pauseparam(struct net_device *dev, 3406 struct ethtool_pauseparam *ecmd) 3407{ 3408 struct sky2_port *sky2 = netdev_priv(dev); 3409 3410 switch (sky2->flow_mode) { 3411 case FC_NONE: 3412 ecmd->tx_pause = ecmd->rx_pause = 0; 3413 break; 3414 case FC_TX: 3415 ecmd->tx_pause = 1, ecmd->rx_pause = 0; 3416 break; 3417 case FC_RX: 3418 ecmd->tx_pause = 0, ecmd->rx_pause = 1; 3419 break; 3420 case FC_BOTH: 3421 ecmd->tx_pause = ecmd->rx_pause = 1; 3422 } 3423 3424 ecmd->autoneg = sky2->autoneg; 3425} 3426 3427static int sky2_set_pauseparam(struct net_device *dev, 3428 struct ethtool_pauseparam *ecmd) 3429{ 3430 struct sky2_port *sky2 = netdev_priv(dev); 3431 3432 sky2->autoneg = ecmd->autoneg; 3433 sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause); 3434 3435 if (netif_running(dev)) 3436 sky2_phy_reinit(sky2); 3437 3438 return 0; 3439} 3440 3441static int sky2_get_coalesce(struct net_device *dev, 3442 struct ethtool_coalesce *ecmd) 3443{ 3444 struct sky2_port *sky2 = netdev_priv(dev); 3445 struct sky2_hw *hw = sky2->hw; 3446 3447 if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP) 3448 ecmd->tx_coalesce_usecs = 0; 3449 else { 3450 u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI); 3451 ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks); 3452 } 3453 ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH); 3454 3455 if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP) 3456 ecmd->rx_coalesce_usecs = 0; 3457 else { 3458 u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI); 3459 ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks); 3460 } 3461 ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM); 3462 3463 if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP) 3464 ecmd->rx_coalesce_usecs_irq = 0; 3465 else { 3466 u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI); 3467 ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks); 3468 } 3469 3470 ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM); 3471 3472 return 0; 3473} 3474 3475/* Note: this affect both ports */ 3476static int sky2_set_coalesce(struct net_device *dev, 3477 struct ethtool_coalesce *ecmd) 3478{ 3479 struct sky2_port *sky2 = netdev_priv(dev); 3480 struct sky2_hw *hw = sky2->hw; 3481 const u32 tmax = sky2_clk2us(hw, 0x0ffffff); 3482 3483 if (ecmd->tx_coalesce_usecs > tmax || 3484 ecmd->rx_coalesce_usecs > tmax || 3485 ecmd->rx_coalesce_usecs_irq > tmax) 3486 return -EINVAL; 3487 3488 if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1) 3489 return -EINVAL; 3490 if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING) 3491 return -EINVAL; 3492 if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING) 3493 return -EINVAL; 3494 3495 if (ecmd->tx_coalesce_usecs == 0) 3496 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); 3497 else { 3498 sky2_write32(hw, STAT_TX_TIMER_INI, 3499 sky2_us2clk(hw, ecmd->tx_coalesce_usecs)); 3500 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); 3501 } 3502 sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames); 3503 3504 if (ecmd->rx_coalesce_usecs == 0) 3505 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP); 3506 else { 3507 sky2_write32(hw, STAT_LEV_TIMER_INI, 3508 sky2_us2clk(hw, ecmd->rx_coalesce_usecs)); 3509 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); 3510 } 3511 sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames); 3512 3513 if (ecmd->rx_coalesce_usecs_irq == 0) 3514 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP); 3515 else { 3516 sky2_write32(hw, STAT_ISR_TIMER_INI, 3517 sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq)); 3518 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); 3519 } 3520 sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq); 3521 return 0; 3522} 3523 3524static void sky2_get_ringparam(struct net_device *dev, 3525 struct ethtool_ringparam *ering) 3526{ 3527 struct sky2_port *sky2 = netdev_priv(dev); 3528 3529 ering->rx_max_pending = RX_MAX_PENDING; 3530 ering->rx_mini_max_pending = 0; 3531 ering->rx_jumbo_max_pending = 0; 3532 ering->tx_max_pending = TX_RING_SIZE - 1; 3533 3534 ering->rx_pending = sky2->rx_pending; 3535 ering->rx_mini_pending = 0; 3536 ering->rx_jumbo_pending = 0; 3537 ering->tx_pending = sky2->tx_pending; 3538} 3539 3540static int sky2_set_ringparam(struct net_device *dev, 3541 struct ethtool_ringparam *ering) 3542{ 3543 struct sky2_port *sky2 = netdev_priv(dev); 3544 int err = 0; 3545 3546 if (ering->rx_pending > RX_MAX_PENDING || 3547 ering->rx_pending < 8 || 3548 ering->tx_pending < MAX_SKB_TX_LE || 3549 ering->tx_pending > TX_RING_SIZE - 1) 3550 return -EINVAL; 3551 3552 if (netif_running(dev)) 3553 sky2_down(dev); 3554 3555 sky2->rx_pending = ering->rx_pending; 3556 sky2->tx_pending = ering->tx_pending; 3557 3558 if (netif_running(dev)) { 3559 err = sky2_up(dev); 3560 if (err) 3561 dev_close(dev); 3562 } 3563 3564 return err; 3565} 3566 3567static int sky2_get_regs_len(struct net_device *dev) 3568{ 3569 return 0x4000; 3570} 3571 3572/* 3573 * Returns copy of control register region 3574 * Note: ethtool_get_regs always provides full size (16k) buffer 3575 */ 3576static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs, 3577 void *p) 3578{ 3579 const struct sky2_port *sky2 = netdev_priv(dev); 3580 const void __iomem *io = sky2->hw->regs; 3581 unsigned int b; 3582 3583 regs->version = 1; 3584 3585 for (b = 0; b < 128; b++) { 3586 /* This complicated switch statement is to make sure and 3587 * only access regions that are unreserved. 3588 * Some blocks are only valid on dual port cards. 3589 * and block 3 has some special diagnostic registers that 3590 * are poison. 3591 */ 3592 switch (b) { 3593 case 3: 3594 /* skip diagnostic ram region */ 3595 memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10); 3596 break; 3597 3598 /* dual port cards only */ 3599 case 5: /* Tx Arbiter 2 */ 3600 case 9: /* RX2 */ 3601 case 14 ... 15: /* TX2 */ 3602 case 17: case 19: /* Ram Buffer 2 */ 3603 case 22 ... 23: /* Tx Ram Buffer 2 */ 3604 case 25: /* Rx MAC Fifo 1 */ 3605 case 27: /* Tx MAC Fifo 2 */ 3606 case 31: /* GPHY 2 */ 3607 case 40 ... 47: /* Pattern Ram 2 */ 3608 case 52: case 54: /* TCP Segmentation 2 */ 3609 case 112 ... 116: /* GMAC 2 */ 3610 if (sky2->hw->ports == 1) 3611 goto reserved; 3612 /* fall through */ 3613 case 0: /* Control */ 3614 case 2: /* Mac address */ 3615 case 4: /* Tx Arbiter 1 */ 3616 case 7: /* PCI express reg */ 3617 case 8: /* RX1 */ 3618 case 12 ... 13: /* TX1 */ 3619 case 16: case 18:/* Rx Ram Buffer 1 */ 3620 case 20 ... 21: /* Tx Ram Buffer 1 */ 3621 case 24: /* Rx MAC Fifo 1 */ 3622 case 26: /* Tx MAC Fifo 1 */ 3623 case 28 ... 29: /* Descriptor and status unit */ 3624 case 30: /* GPHY 1*/ 3625 case 32 ... 39: /* Pattern Ram 1 */ 3626 case 48: case 50: /* TCP Segmentation 1 */ 3627 case 56 ... 60: /* PCI space */ 3628 case 80 ... 84: /* GMAC 1 */ 3629 memcpy_fromio(p, io, 128); 3630 break; 3631 default: 3632reserved: 3633 memset(p, 0, 128); 3634 } 3635 3636 p += 128; 3637 io += 128; 3638 } 3639} 3640 3641/* In order to do Jumbo packets on these chips, need to turn off the 3642 * transmit store/forward. Therefore checksum offload won't work. 3643 */ 3644static int no_tx_offload(struct net_device *dev) 3645{ 3646 const struct sky2_port *sky2 = netdev_priv(dev); 3647 const struct sky2_hw *hw = sky2->hw; 3648 3649 return dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U; 3650} 3651 3652static int sky2_set_tx_csum(struct net_device *dev, u32 data) 3653{ 3654 if (data && no_tx_offload(dev)) 3655 return -EINVAL; 3656 3657 return ethtool_op_set_tx_csum(dev, data); 3658} 3659 3660 3661static int sky2_set_tso(struct net_device *dev, u32 data) 3662{ 3663 if (data && no_tx_offload(dev)) 3664 return -EINVAL; 3665 3666 return ethtool_op_set_tso(dev, data); 3667} 3668 3669static int sky2_get_eeprom_len(struct net_device *dev) 3670{ 3671 struct sky2_port *sky2 = netdev_priv(dev); 3672 struct sky2_hw *hw = sky2->hw; 3673 u16 reg2; 3674 3675 reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); 3676 return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); 3677} 3678 3679static u32 sky2_vpd_read(struct sky2_hw *hw, int cap, u16 offset) 3680{ 3681 u32 val; 3682 3683 sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset); 3684 3685 do { 3686 offset = sky2_pci_read16(hw, cap + PCI_VPD_ADDR); 3687 } while (!(offset & PCI_VPD_ADDR_F)); 3688 3689 val = sky2_pci_read32(hw, cap + PCI_VPD_DATA); 3690 return val; 3691} 3692 3693static void sky2_vpd_write(struct sky2_hw *hw, int cap, u16 offset, u32 val) 3694{ 3695 sky2_pci_write16(hw, cap + PCI_VPD_DATA, val); 3696 sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); 3697 do { 3698 offset = sky2_pci_read16(hw, cap + PCI_VPD_ADDR); 3699 } while (offset & PCI_VPD_ADDR_F); 3700} 3701 3702static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 3703 u8 *data) 3704{ 3705 struct sky2_port *sky2 = netdev_priv(dev); 3706 int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); 3707 int length = eeprom->len; 3708 u16 offset = eeprom->offset; 3709 3710 if (!cap) 3711 return -EINVAL; 3712 3713 eeprom->magic = SKY2_EEPROM_MAGIC; 3714 3715 while (length > 0) { 3716 u32 val = sky2_vpd_read(sky2->hw, cap, offset); 3717 int n = min_t(int, length, sizeof(val)); 3718 3719 memcpy(data, &val, n); 3720 length -= n; 3721 data += n; 3722 offset += n; 3723 } 3724 return 0; 3725} 3726 3727static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 3728 u8 *data) 3729{ 3730 struct sky2_port *sky2 = netdev_priv(dev); 3731 int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); 3732 int length = eeprom->len; 3733 u16 offset = eeprom->offset; 3734 3735 if (!cap) 3736 return -EINVAL; 3737 3738 if (eeprom->magic != SKY2_EEPROM_MAGIC) 3739 return -EINVAL; 3740 3741 while (length > 0) { 3742 u32 val; 3743 int n = min_t(int, length, sizeof(val)); 3744 3745 if (n < sizeof(val)) 3746 val = sky2_vpd_read(sky2->hw, cap, offset); 3747 memcpy(&val, data, n); 3748 3749 sky2_vpd_write(sky2->hw, cap, offset, val); 3750 3751 length -= n; 3752 data += n; 3753 offset += n; 3754 } 3755 return 0; 3756} 3757 3758 3759static const struct ethtool_ops sky2_ethtool_ops = { 3760 .get_settings = sky2_get_settings, 3761 .set_settings = sky2_set_settings, 3762 .get_drvinfo = sky2_get_drvinfo, 3763 .get_wol = sky2_get_wol, 3764 .set_wol = sky2_set_wol, 3765 .get_msglevel = sky2_get_msglevel, 3766 .set_msglevel = sky2_set_msglevel, 3767 .nway_reset = sky2_nway_reset, 3768 .get_regs_len = sky2_get_regs_len, 3769 .get_regs = sky2_get_regs, 3770 .get_link = ethtool_op_get_link, 3771 .get_eeprom_len = sky2_get_eeprom_len, 3772 .get_eeprom = sky2_get_eeprom, 3773 .set_eeprom = sky2_set_eeprom, 3774 .set_sg = ethtool_op_set_sg, 3775 .set_tx_csum = sky2_set_tx_csum, 3776 .set_tso = sky2_set_tso, 3777 .get_rx_csum = sky2_get_rx_csum, 3778 .set_rx_csum = sky2_set_rx_csum, 3779 .get_strings = sky2_get_strings, 3780 .get_coalesce = sky2_get_coalesce, 3781 .set_coalesce = sky2_set_coalesce, 3782 .get_ringparam = sky2_get_ringparam, 3783 .set_ringparam = sky2_set_ringparam, 3784 .get_pauseparam = sky2_get_pauseparam, 3785 .set_pauseparam = sky2_set_pauseparam, 3786 .phys_id = sky2_phys_id, 3787 .get_sset_count = sky2_get_sset_count, 3788 .get_ethtool_stats = sky2_get_ethtool_stats, 3789}; 3790 3791#ifdef CONFIG_SKY2_DEBUG 3792 3793static struct dentry *sky2_debug; 3794 3795static int sky2_debug_show(struct seq_file *seq, void *v) 3796{ 3797 struct net_device *dev = seq->private; 3798 const struct sky2_port *sky2 = netdev_priv(dev); 3799 struct sky2_hw *hw = sky2->hw; 3800 unsigned port = sky2->port; 3801 unsigned idx, last; 3802 int sop; 3803 3804 if (!netif_running(dev)) 3805 return -ENETDOWN; 3806 3807 seq_printf(seq, "IRQ src=%x mask=%x control=%x\n", 3808 sky2_read32(hw, B0_ISRC), 3809 sky2_read32(hw, B0_IMSK), 3810 sky2_read32(hw, B0_Y2_SP_ICR)); 3811 3812 napi_disable(&hw->napi); 3813 last = sky2_read16(hw, STAT_PUT_IDX); 3814 3815 if (hw->st_idx == last) 3816 seq_puts(seq, "Status ring (empty)\n"); 3817 else { 3818 seq_puts(seq, "Status ring\n"); 3819 for (idx = hw->st_idx; idx != last && idx < STATUS_RING_SIZE; 3820 idx = RING_NEXT(idx, STATUS_RING_SIZE)) { 3821 const struct sky2_status_le *le = hw->st_le + idx; 3822 seq_printf(seq, "[%d] %#x %d %#x\n", 3823 idx, le->opcode, le->length, le->status); 3824 } 3825 seq_puts(seq, "\n"); 3826 } 3827 3828 seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n", 3829 sky2->tx_cons, sky2->tx_prod, 3830 sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), 3831 sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE))); 3832 3833 /* Dump contents of tx ring */ 3834 sop = 1; 3835 for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < TX_RING_SIZE; 3836 idx = RING_NEXT(idx, TX_RING_SIZE)) { 3837 const struct sky2_tx_le *le = sky2->tx_le + idx; 3838 u32 a = le32_to_cpu(le->addr); 3839 3840 if (sop) 3841 seq_printf(seq, "%u:", idx); 3842 sop = 0; 3843 3844 switch(le->opcode & ~HW_OWNER) { 3845 case OP_ADDR64: 3846 seq_printf(seq, " %#x:", a); 3847 break; 3848 case OP_LRGLEN: 3849 seq_printf(seq, " mtu=%d", a); 3850 break; 3851 case OP_VLAN: 3852 seq_printf(seq, " vlan=%d", be16_to_cpu(le->length)); 3853 break; 3854 case OP_TCPLISW: 3855 seq_printf(seq, " csum=%#x", a); 3856 break; 3857 case OP_LARGESEND: 3858 seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length)); 3859 break; 3860 case OP_PACKET: 3861 seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length)); 3862 break; 3863 case OP_BUFFER: 3864 seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length)); 3865 break; 3866 default: 3867 seq_printf(seq, " op=%#x,%#x(%d)", le->opcode, 3868 a, le16_to_cpu(le->length)); 3869 } 3870 3871 if (le->ctrl & EOP) { 3872 seq_putc(seq, '\n'); 3873 sop = 1; 3874 } 3875 } 3876 3877 seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n", 3878 sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)), 3879 last = sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)), 3880 sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX))); 3881 3882 sky2_read32(hw, B0_Y2_SP_LISR); 3883 napi_enable(&hw->napi); 3884 return 0; 3885} 3886 3887static int sky2_debug_open(struct inode *inode, struct file *file) 3888{ 3889 return single_open(file, sky2_debug_show, inode->i_private); 3890} 3891 3892static const struct file_operations sky2_debug_fops = { 3893 .owner = THIS_MODULE, 3894 .open = sky2_debug_open, 3895 .read = seq_read, 3896 .llseek = seq_lseek, 3897 .release = single_release, 3898}; 3899 3900/* 3901 * Use network device events to create/remove/rename 3902 * debugfs file entries 3903 */ 3904static int sky2_device_event(struct notifier_block *unused, 3905 unsigned long event, void *ptr) 3906{ 3907 struct net_device *dev = ptr; 3908 struct sky2_port *sky2 = netdev_priv(dev); 3909 3910 if (dev->open != sky2_up || !sky2_debug) 3911 return NOTIFY_DONE; 3912 3913 switch(event) { 3914 case NETDEV_CHANGENAME: 3915 if (sky2->debugfs) { 3916 sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs, 3917 sky2_debug, dev->name); 3918 } 3919 break; 3920 3921 case NETDEV_GOING_DOWN: 3922 if (sky2->debugfs) { 3923 printk(KERN_DEBUG PFX "%s: remove debugfs\n", 3924 dev->name); 3925 debugfs_remove(sky2->debugfs); 3926 sky2->debugfs = NULL; 3927 } 3928 break; 3929 3930 case NETDEV_UP: 3931 sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO, 3932 sky2_debug, dev, 3933 &sky2_debug_fops); 3934 if (IS_ERR(sky2->debugfs)) 3935 sky2->debugfs = NULL; 3936 } 3937 3938 return NOTIFY_DONE; 3939} 3940 3941static struct notifier_block sky2_notifier = { 3942 .notifier_call = sky2_device_event, 3943}; 3944 3945 3946static __init void sky2_debug_init(void) 3947{ 3948 struct dentry *ent; 3949 3950 ent = debugfs_create_dir("sky2", NULL); 3951 if (!ent || IS_ERR(ent)) 3952 return; 3953 3954 sky2_debug = ent; 3955 register_netdevice_notifier(&sky2_notifier); 3956} 3957 3958static __exit void sky2_debug_cleanup(void) 3959{ 3960 if (sky2_debug) { 3961 unregister_netdevice_notifier(&sky2_notifier); 3962 debugfs_remove(sky2_debug); 3963 sky2_debug = NULL; 3964 } 3965} 3966 3967#else 3968#define sky2_debug_init() 3969#define sky2_debug_cleanup() 3970#endif 3971 3972 3973/* Initialize network device */ 3974static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, 3975 unsigned port, 3976 int highmem, int wol) 3977{ 3978 struct sky2_port *sky2; 3979 struct net_device *dev = alloc_etherdev(sizeof(*sky2)); 3980 3981 if (!dev) { 3982 dev_err(&hw->pdev->dev, "etherdev alloc failed\n"); 3983 return NULL; 3984 } 3985 3986 SET_NETDEV_DEV(dev, &hw->pdev->dev); 3987 dev->irq = hw->pdev->irq; 3988 dev->open = sky2_up; 3989 dev->stop = sky2_down; 3990 dev->do_ioctl = sky2_ioctl; 3991 dev->hard_start_xmit = sky2_xmit_frame; 3992 dev->set_multicast_list = sky2_set_multicast; 3993 dev->set_mac_address = sky2_set_mac_address; 3994 dev->change_mtu = sky2_change_mtu; 3995 SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops); 3996 dev->tx_timeout = sky2_tx_timeout; 3997 dev->watchdog_timeo = TX_WATCHDOG; 3998#ifdef CONFIG_NET_POLL_CONTROLLER 3999 if (port == 0) 4000 dev->poll_controller = sky2_netpoll; 4001#endif 4002 4003 sky2 = netdev_priv(dev); 4004 sky2->netdev = dev; 4005 sky2->hw = hw; 4006 sky2->msg_enable = netif_msg_init(debug, default_msg); 4007 4008 /* Auto speed and flow control */ 4009 sky2->autoneg = AUTONEG_ENABLE; 4010 sky2->flow_mode = FC_BOTH; 4011 4012 sky2->duplex = -1; 4013 sky2->speed = -1; 4014 sky2->advertising = sky2_supported_modes(hw); 4015 sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); 4016 sky2->wol = wol; 4017 4018 spin_lock_init(&sky2->phy_lock); 4019 sky2->tx_pending = TX_DEF_PENDING; 4020 sky2->rx_pending = RX_DEF_PENDING; 4021 4022 hw->dev[port] = dev; 4023 4024 sky2->port = port; 4025 4026 dev->features |= NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_SG; 4027 if (highmem) 4028 dev->features |= NETIF_F_HIGHDMA; 4029 4030#ifdef SKY2_VLAN_TAG_USED 4031 /* The workaround for FE+ status conflicts with VLAN tag detection. */ 4032 if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && 4033 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) { 4034 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 4035 dev->vlan_rx_register = sky2_vlan_rx_register; 4036 } 4037#endif 4038 4039 /* read the mac address */ 4040 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); 4041 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 4042 4043 return dev; 4044} 4045 4046static void __devinit sky2_show_addr(struct net_device *dev) 4047{ 4048 const struct sky2_port *sky2 = netdev_priv(dev); 4049 DECLARE_MAC_BUF(mac); 4050 4051 if (netif_msg_probe(sky2)) 4052 printk(KERN_INFO PFX "%s: addr %s\n", 4053 dev->name, print_mac(mac, dev->dev_addr)); 4054} 4055 4056/* Handle software interrupt used during MSI test */ 4057static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id) 4058{ 4059 struct sky2_hw *hw = dev_id; 4060 u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2); 4061 4062 if (status == 0) 4063 return IRQ_NONE; 4064 4065 if (status & Y2_IS_IRQ_SW) { 4066 hw->flags |= SKY2_HW_USE_MSI; 4067 wake_up(&hw->msi_wait); 4068 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); 4069 } 4070 sky2_write32(hw, B0_Y2_SP_ICR, 2); 4071 4072 return IRQ_HANDLED; 4073} 4074 4075/* Test interrupt path by forcing a a software IRQ */ 4076static int __devinit sky2_test_msi(struct sky2_hw *hw) 4077{ 4078 struct pci_dev *pdev = hw->pdev; 4079 int err; 4080 4081 init_waitqueue_head (&hw->msi_wait); 4082 4083 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); 4084 4085 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); 4086 if (err) { 4087 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); 4088 return err; 4089 } 4090 4091 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); 4092 sky2_read8(hw, B0_CTST); 4093 4094 wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10); 4095 4096 if (!(hw->flags & SKY2_HW_USE_MSI)) { 4097 /* MSI test failed, go back to INTx mode */ 4098 dev_info(&pdev->dev, "No interrupt generated using MSI, " 4099 "switching to INTx mode.\n"); 4100 4101 err = -EOPNOTSUPP; 4102 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); 4103 } 4104 4105 sky2_write32(hw, B0_IMSK, 0); 4106 sky2_read32(hw, B0_IMSK); 4107 4108 free_irq(pdev->irq, hw); 4109 4110 return err; 4111} 4112 4113static int __devinit pci_wake_enabled(struct pci_dev *dev) 4114{ 4115 int pm = pci_find_capability(dev, PCI_CAP_ID_PM); 4116 u16 value; 4117 4118 if (!pm) 4119 return 0; 4120 if (pci_read_config_word(dev, pm + PCI_PM_CTRL, &value)) 4121 return 0; 4122 return value & PCI_PM_CTRL_PME_ENABLE; 4123} 4124 4125static int __devinit sky2_probe(struct pci_dev *pdev, 4126 const struct pci_device_id *ent) 4127{ 4128 struct net_device *dev; 4129 struct sky2_hw *hw; 4130 int err, using_dac = 0, wol_default; 4131 4132 err = pci_enable_device(pdev); 4133 if (err) { 4134 dev_err(&pdev->dev, "cannot enable PCI device\n"); 4135 goto err_out; 4136 } 4137 4138 err = pci_request_regions(pdev, DRV_NAME); 4139 if (err) { 4140 dev_err(&pdev->dev, "cannot obtain PCI resources\n"); 4141 goto err_out_disable; 4142 } 4143 4144 pci_set_master(pdev); 4145 4146 if (sizeof(dma_addr_t) > sizeof(u32) && 4147 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { 4148 using_dac = 1; 4149 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 4150 if (err < 0) { 4151 dev_err(&pdev->dev, "unable to obtain 64 bit DMA " 4152 "for consistent allocations\n"); 4153 goto err_out_free_regions; 4154 } 4155 } else { 4156 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 4157 if (err) { 4158 dev_err(&pdev->dev, "no usable DMA configuration\n"); 4159 goto err_out_free_regions; 4160 } 4161 } 4162 4163 wol_default = pci_wake_enabled(pdev) ? WAKE_MAGIC : 0; 4164 4165 err = -ENOMEM; 4166 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 4167 if (!hw) { 4168 dev_err(&pdev->dev, "cannot allocate hardware struct\n"); 4169 goto err_out_free_regions; 4170 } 4171 4172 hw->pdev = pdev; 4173 4174 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 4175 if (!hw->regs) { 4176 dev_err(&pdev->dev, "cannot map device registers\n"); 4177 goto err_out_free_hw; 4178 } 4179 4180#ifdef __BIG_ENDIAN 4181 /* The sk98lin vendor driver uses hardware byte swapping but 4182 * this driver uses software swapping. 4183 */ 4184 { 4185 u32 reg; 4186 reg = sky2_pci_read32(hw, PCI_DEV_REG2); 4187 reg &= ~PCI_REV_DESC; 4188 sky2_pci_write32(hw, PCI_DEV_REG2, reg); 4189 } 4190#endif 4191 4192 /* ring for status responses */ 4193 hw->st_le = pci_alloc_consistent(pdev, STATUS_LE_BYTES, &hw->st_dma); 4194 if (!hw->st_le) 4195 goto err_out_iounmap; 4196 4197 err = sky2_init(hw); 4198 if (err) 4199 goto err_out_iounmap; 4200 4201 dev_info(&pdev->dev, "v%s addr 0x%llx irq %d Yukon-%s (0x%x) rev %d\n", 4202 DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0), 4203 pdev->irq, yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL], 4204 hw->chip_id, hw->chip_rev); 4205 4206 sky2_reset(hw); 4207 4208 dev = sky2_init_netdev(hw, 0, using_dac, wol_default); 4209 if (!dev) { 4210 err = -ENOMEM; 4211 goto err_out_free_pci; 4212 } 4213 4214 if (!disable_msi && pci_enable_msi(pdev) == 0) { 4215 err = sky2_test_msi(hw); 4216 if (err == -EOPNOTSUPP) 4217 pci_disable_msi(pdev); 4218 else if (err) 4219 goto err_out_free_netdev; 4220 } 4221 4222 err = register_netdev(dev); 4223 if (err) { 4224 dev_err(&pdev->dev, "cannot register net device\n"); 4225 goto err_out_free_netdev; 4226 } 4227 4228 netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); 4229 4230 err = request_irq(pdev->irq, sky2_intr, 4231 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, 4232 dev->name, hw); 4233 if (err) { 4234 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); 4235 goto err_out_unregister; 4236 } 4237 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 4238 napi_enable(&hw->napi); 4239 4240 sky2_show_addr(dev); 4241 4242 if (hw->ports > 1) { 4243 struct net_device *dev1; 4244 4245 dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default); 4246 if (!dev1) 4247 dev_warn(&pdev->dev, "allocation for second device failed\n"); 4248 else if ((err = register_netdev(dev1))) { 4249 dev_warn(&pdev->dev, 4250 "register of second port failed (%d)\n", err); 4251 hw->dev[1] = NULL; 4252 free_netdev(dev1); 4253 } else 4254 sky2_show_addr(dev1); 4255 } 4256 4257 setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw); 4258 INIT_WORK(&hw->restart_work, sky2_restart); 4259 4260 pci_set_drvdata(pdev, hw); 4261 4262 return 0; 4263 4264err_out_unregister: 4265 if (hw->flags & SKY2_HW_USE_MSI) 4266 pci_disable_msi(pdev); 4267 unregister_netdev(dev); 4268err_out_free_netdev: 4269 free_netdev(dev); 4270err_out_free_pci: 4271 sky2_write8(hw, B0_CTST, CS_RST_SET); 4272 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); 4273err_out_iounmap: 4274 iounmap(hw->regs); 4275err_out_free_hw: 4276 kfree(hw); 4277err_out_free_regions: 4278 pci_release_regions(pdev); 4279err_out_disable: 4280 pci_disable_device(pdev); 4281err_out: 4282 pci_set_drvdata(pdev, NULL); 4283 return err; 4284} 4285 4286static void __devexit sky2_remove(struct pci_dev *pdev) 4287{ 4288 struct sky2_hw *hw = pci_get_drvdata(pdev); 4289 int i; 4290 4291 if (!hw) 4292 return; 4293 4294 del_timer_sync(&hw->watchdog_timer); 4295 cancel_work_sync(&hw->restart_work); 4296 4297 for (i = hw->ports-1; i >= 0; --i) 4298 unregister_netdev(hw->dev[i]); 4299 4300 sky2_write32(hw, B0_IMSK, 0); 4301 4302 sky2_power_aux(hw); 4303 4304 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); 4305 sky2_write8(hw, B0_CTST, CS_RST_SET); 4306 sky2_read8(hw, B0_CTST); 4307 4308 free_irq(pdev->irq, hw); 4309 if (hw->flags & SKY2_HW_USE_MSI) 4310 pci_disable_msi(pdev); 4311 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); 4312 pci_release_regions(pdev); 4313 pci_disable_device(pdev); 4314 4315 for (i = hw->ports-1; i >= 0; --i) 4316 free_netdev(hw->dev[i]); 4317 4318 iounmap(hw->regs); 4319 kfree(hw); 4320 4321 pci_set_drvdata(pdev, NULL); 4322} 4323 4324#ifdef CONFIG_PM 4325static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) 4326{ 4327 struct sky2_hw *hw = pci_get_drvdata(pdev); 4328 int i, wol = 0; 4329 4330 if (!hw) 4331 return 0; 4332 4333 for (i = 0; i < hw->ports; i++) { 4334 struct net_device *dev = hw->dev[i]; 4335 struct sky2_port *sky2 = netdev_priv(dev); 4336 4337 if (netif_running(dev)) 4338 sky2_down(dev); 4339 4340 if (sky2->wol) 4341 sky2_wol_init(sky2); 4342 4343 wol |= sky2->wol; 4344 } 4345 4346 sky2_write32(hw, B0_IMSK, 0); 4347 napi_disable(&hw->napi); 4348 sky2_power_aux(hw); 4349 4350 pci_save_state(pdev); 4351 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 4352 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4353 4354 return 0; 4355} 4356 4357static int sky2_resume(struct pci_dev *pdev) 4358{ 4359 struct sky2_hw *hw = pci_get_drvdata(pdev); 4360 int i, err; 4361 4362 if (!hw) 4363 return 0; 4364 4365 err = pci_set_power_state(pdev, PCI_D0); 4366 if (err) 4367 goto out; 4368 4369 err = pci_restore_state(pdev); 4370 if (err) 4371 goto out; 4372 4373 pci_enable_wake(pdev, PCI_D0, 0); 4374 4375 /* Re-enable all clocks */ 4376 if (hw->chip_id == CHIP_ID_YUKON_EX || 4377 hw->chip_id == CHIP_ID_YUKON_EC_U || 4378 hw->chip_id == CHIP_ID_YUKON_FE_P) 4379 sky2_pci_write32(hw, PCI_DEV_REG3, 0); 4380 4381 sky2_reset(hw); 4382 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 4383 napi_enable(&hw->napi); 4384 4385 for (i = 0; i < hw->ports; i++) { 4386 struct net_device *dev = hw->dev[i]; 4387 if (netif_running(dev)) { 4388 err = sky2_up(dev); 4389 if (err) { 4390 printk(KERN_ERR PFX "%s: could not up: %d\n", 4391 dev->name, err); 4392 dev_close(dev); 4393 goto out; 4394 } 4395 } 4396 } 4397 4398 return 0; 4399out: 4400 dev_err(&pdev->dev, "resume failed (%d)\n", err); 4401 pci_disable_device(pdev); 4402 return err; 4403} 4404#endif 4405 4406static void sky2_shutdown(struct pci_dev *pdev) 4407{ 4408 struct sky2_hw *hw = pci_get_drvdata(pdev); 4409 int i, wol = 0; 4410 4411 if (!hw) 4412 return; 4413 4414 del_timer_sync(&hw->watchdog_timer); 4415 4416 for (i = 0; i < hw->ports; i++) { 4417 struct net_device *dev = hw->dev[i]; 4418 struct sky2_port *sky2 = netdev_priv(dev); 4419 4420 if (sky2->wol) { 4421 wol = 1; 4422 sky2_wol_init(sky2); 4423 } 4424 } 4425 4426 if (wol) 4427 sky2_power_aux(hw); 4428 4429 pci_enable_wake(pdev, PCI_D3hot, wol); 4430 pci_enable_wake(pdev, PCI_D3cold, wol); 4431 4432 pci_disable_device(pdev); 4433 pci_set_power_state(pdev, PCI_D3hot); 4434 4435} 4436 4437static struct pci_driver sky2_driver = { 4438 .name = DRV_NAME, 4439 .id_table = sky2_id_table, 4440 .probe = sky2_probe, 4441 .remove = __devexit_p(sky2_remove), 4442#ifdef CONFIG_PM 4443 .suspend = sky2_suspend, 4444 .resume = sky2_resume, 4445#endif 4446 .shutdown = sky2_shutdown, 4447}; 4448 4449static int __init sky2_init_module(void) 4450{ 4451 sky2_debug_init(); 4452 return pci_register_driver(&sky2_driver); 4453} 4454 4455static void __exit sky2_cleanup_module(void) 4456{ 4457 pci_unregister_driver(&sky2_driver); 4458 sky2_debug_cleanup(); 4459} 4460 4461module_init(sky2_init_module); 4462module_exit(sky2_cleanup_module); 4463 4464MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver"); 4465MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); 4466MODULE_LICENSE("GPL"); 4467MODULE_VERSION(DRV_VERSION);