Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.28-rc3 4182 lines 109 kB view raw
1/* 2 * New driver for Marvell Yukon chipset and SysKonnect Gigabit 3 * Ethernet adapters. Based on earlier sk98lin, e100 and 4 * FreeBSD if_sk drivers. 5 * 6 * This driver intentionally does not support all the features 7 * of the original driver such as link fail-over and link management because 8 * those should be done at higher levels. 9 * 10 * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 24 */ 25 26#include <linux/in.h> 27#include <linux/kernel.h> 28#include <linux/module.h> 29#include <linux/moduleparam.h> 30#include <linux/netdevice.h> 31#include <linux/etherdevice.h> 32#include <linux/ethtool.h> 33#include <linux/pci.h> 34#include <linux/if_vlan.h> 35#include <linux/ip.h> 36#include <linux/delay.h> 37#include <linux/crc32.h> 38#include <linux/dma-mapping.h> 39#include <linux/debugfs.h> 40#include <linux/seq_file.h> 41#include <linux/mii.h> 42#include <asm/irq.h> 43 44#include "skge.h" 45 46#define DRV_NAME "skge" 47#define DRV_VERSION "1.13" 48#define PFX DRV_NAME " " 49 50#define DEFAULT_TX_RING_SIZE 128 51#define DEFAULT_RX_RING_SIZE 512 52#define MAX_TX_RING_SIZE 1024 53#define TX_LOW_WATER (MAX_SKB_FRAGS + 1) 54#define MAX_RX_RING_SIZE 4096 55#define RX_COPY_THRESHOLD 128 56#define RX_BUF_SIZE 1536 57#define PHY_RETRIES 1000 58#define ETH_JUMBO_MTU 9000 59#define TX_WATCHDOG (5 * HZ) 60#define NAPI_WEIGHT 64 61#define BLINK_MS 250 62#define LINK_HZ HZ 63 64#define SKGE_EEPROM_MAGIC 0x9933aabb 65 66 67MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); 68MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); 69MODULE_LICENSE("GPL"); 70MODULE_VERSION(DRV_VERSION); 71 72static const u32 default_msg 73 = NETIF_MSG_DRV| NETIF_MSG_PROBE| NETIF_MSG_LINK 74 | NETIF_MSG_IFUP| NETIF_MSG_IFDOWN; 75 76static int debug = -1; /* defaults above */ 77module_param(debug, int, 0); 78MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 79 80static const struct pci_device_id skge_id_table[] = { 81 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) }, 82 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) }, 83 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, 84 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, 85 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T) }, 86 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */ 87 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, 88 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ 89 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, 90 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) }, 91 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, 92 { 0 } 93}; 94MODULE_DEVICE_TABLE(pci, skge_id_table); 95 96static int skge_up(struct net_device *dev); 97static int skge_down(struct net_device *dev); 98static void skge_phy_reset(struct skge_port *skge); 99static void skge_tx_clean(struct net_device *dev); 100static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 101static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 102static void genesis_get_stats(struct skge_port *skge, u64 *data); 103static void yukon_get_stats(struct skge_port *skge, u64 *data); 104static void yukon_init(struct skge_hw *hw, int port); 105static void genesis_mac_init(struct skge_hw *hw, int port); 106static void genesis_link_up(struct skge_port *skge); 107 108/* Avoid conditionals by using array */ 109static const int txqaddr[] = { Q_XA1, Q_XA2 }; 110static const int rxqaddr[] = { Q_R1, Q_R2 }; 111static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; 112static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 113static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; 114static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 }; 115 116static int skge_get_regs_len(struct net_device *dev) 117{ 118 return 0x4000; 119} 120 121/* 122 * Returns copy of whole control register region 123 * Note: skip RAM address register because accessing it will 124 * cause bus hangs! 125 */ 126static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, 127 void *p) 128{ 129 const struct skge_port *skge = netdev_priv(dev); 130 const void __iomem *io = skge->hw->regs; 131 132 regs->version = 1; 133 memset(p, 0, regs->len); 134 memcpy_fromio(p, io, B3_RAM_ADDR); 135 136 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, 137 regs->len - B3_RI_WTO_R1); 138} 139 140/* Wake on Lan only supported on Yukon chips with rev 1 or above */ 141static u32 wol_supported(const struct skge_hw *hw) 142{ 143 if (hw->chip_id == CHIP_ID_GENESIS) 144 return 0; 145 146 if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) 147 return 0; 148 149 return WAKE_MAGIC | WAKE_PHY; 150} 151 152static u32 pci_wake_enabled(struct pci_dev *dev) 153{ 154 int pm = pci_find_capability(dev, PCI_CAP_ID_PM); 155 u16 value; 156 157 /* If device doesn't support PM Capabilities, but request is to disable 158 * wake events, it's a nop; otherwise fail */ 159 if (!pm) 160 return 0; 161 162 pci_read_config_word(dev, pm + PCI_PM_PMC, &value); 163 164 value &= PCI_PM_CAP_PME_MASK; 165 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ 166 167 return value != 0; 168} 169 170static void skge_wol_init(struct skge_port *skge) 171{ 172 struct skge_hw *hw = skge->hw; 173 int port = skge->port; 174 u16 ctrl; 175 176 skge_write16(hw, B0_CTST, CS_RST_CLR); 177 skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); 178 179 /* Turn on Vaux */ 180 skge_write8(hw, B0_POWER_CTRL, 181 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 182 183 /* WA code for COMA mode -- clear PHY reset */ 184 if (hw->chip_id == CHIP_ID_YUKON_LITE && 185 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { 186 u32 reg = skge_read32(hw, B2_GP_IO); 187 reg |= GP_DIR_9; 188 reg &= ~GP_IO_9; 189 skge_write32(hw, B2_GP_IO, reg); 190 } 191 192 skge_write32(hw, SK_REG(port, GPHY_CTRL), 193 GPC_DIS_SLEEP | 194 GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | 195 GPC_ANEG_1 | GPC_RST_SET); 196 197 skge_write32(hw, SK_REG(port, GPHY_CTRL), 198 GPC_DIS_SLEEP | 199 GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | 200 GPC_ANEG_1 | GPC_RST_CLR); 201 202 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); 203 204 /* Force to 10/100 skge_reset will re-enable on resume */ 205 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, 206 PHY_AN_100FULL | PHY_AN_100HALF | 207 PHY_AN_10FULL | PHY_AN_10HALF| PHY_AN_CSMA); 208 /* no 1000 HD/FD */ 209 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0); 210 gm_phy_write(hw, port, PHY_MARV_CTRL, 211 PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE | 212 PHY_CT_RE_CFG | PHY_CT_DUP_MD); 213 214 215 /* Set GMAC to no flow control and auto update for speed/duplex */ 216 gma_write16(hw, port, GM_GP_CTRL, 217 GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| 218 GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); 219 220 /* Set WOL address */ 221 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), 222 skge->netdev->dev_addr, ETH_ALEN); 223 224 /* Turn on appropriate WOL control bits */ 225 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); 226 ctrl = 0; 227 if (skge->wol & WAKE_PHY) 228 ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; 229 else 230 ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; 231 232 if (skge->wol & WAKE_MAGIC) 233 ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; 234 else 235 ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;; 236 237 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; 238 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); 239 240 /* block receiver */ 241 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 242} 243 244static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 245{ 246 struct skge_port *skge = netdev_priv(dev); 247 248 wol->supported = wol_supported(skge->hw); 249 wol->wolopts = skge->wol; 250} 251 252static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 253{ 254 struct skge_port *skge = netdev_priv(dev); 255 struct skge_hw *hw = skge->hw; 256 257 if (wol->wolopts & ~wol_supported(hw)) 258 return -EOPNOTSUPP; 259 260 skge->wol = wol->wolopts; 261 return 0; 262} 263 264/* Determine supported/advertised modes based on hardware. 265 * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx 266 */ 267static u32 skge_supported_modes(const struct skge_hw *hw) 268{ 269 u32 supported; 270 271 if (hw->copper) { 272 supported = SUPPORTED_10baseT_Half 273 | SUPPORTED_10baseT_Full 274 | SUPPORTED_100baseT_Half 275 | SUPPORTED_100baseT_Full 276 | SUPPORTED_1000baseT_Half 277 | SUPPORTED_1000baseT_Full 278 | SUPPORTED_Autoneg| SUPPORTED_TP; 279 280 if (hw->chip_id == CHIP_ID_GENESIS) 281 supported &= ~(SUPPORTED_10baseT_Half 282 | SUPPORTED_10baseT_Full 283 | SUPPORTED_100baseT_Half 284 | SUPPORTED_100baseT_Full); 285 286 else if (hw->chip_id == CHIP_ID_YUKON) 287 supported &= ~SUPPORTED_1000baseT_Half; 288 } else 289 supported = SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half 290 | SUPPORTED_FIBRE | SUPPORTED_Autoneg; 291 292 return supported; 293} 294 295static int skge_get_settings(struct net_device *dev, 296 struct ethtool_cmd *ecmd) 297{ 298 struct skge_port *skge = netdev_priv(dev); 299 struct skge_hw *hw = skge->hw; 300 301 ecmd->transceiver = XCVR_INTERNAL; 302 ecmd->supported = skge_supported_modes(hw); 303 304 if (hw->copper) { 305 ecmd->port = PORT_TP; 306 ecmd->phy_address = hw->phy_addr; 307 } else 308 ecmd->port = PORT_FIBRE; 309 310 ecmd->advertising = skge->advertising; 311 ecmd->autoneg = skge->autoneg; 312 ecmd->speed = skge->speed; 313 ecmd->duplex = skge->duplex; 314 return 0; 315} 316 317static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 318{ 319 struct skge_port *skge = netdev_priv(dev); 320 const struct skge_hw *hw = skge->hw; 321 u32 supported = skge_supported_modes(hw); 322 int err = 0; 323 324 if (ecmd->autoneg == AUTONEG_ENABLE) { 325 ecmd->advertising = supported; 326 skge->duplex = -1; 327 skge->speed = -1; 328 } else { 329 u32 setting; 330 331 switch (ecmd->speed) { 332 case SPEED_1000: 333 if (ecmd->duplex == DUPLEX_FULL) 334 setting = SUPPORTED_1000baseT_Full; 335 else if (ecmd->duplex == DUPLEX_HALF) 336 setting = SUPPORTED_1000baseT_Half; 337 else 338 return -EINVAL; 339 break; 340 case SPEED_100: 341 if (ecmd->duplex == DUPLEX_FULL) 342 setting = SUPPORTED_100baseT_Full; 343 else if (ecmd->duplex == DUPLEX_HALF) 344 setting = SUPPORTED_100baseT_Half; 345 else 346 return -EINVAL; 347 break; 348 349 case SPEED_10: 350 if (ecmd->duplex == DUPLEX_FULL) 351 setting = SUPPORTED_10baseT_Full; 352 else if (ecmd->duplex == DUPLEX_HALF) 353 setting = SUPPORTED_10baseT_Half; 354 else 355 return -EINVAL; 356 break; 357 default: 358 return -EINVAL; 359 } 360 361 if ((setting & supported) == 0) 362 return -EINVAL; 363 364 skge->speed = ecmd->speed; 365 skge->duplex = ecmd->duplex; 366 } 367 368 skge->autoneg = ecmd->autoneg; 369 skge->advertising = ecmd->advertising; 370 371 if (netif_running(dev)) { 372 skge_down(dev); 373 err = skge_up(dev); 374 if (err) { 375 dev_close(dev); 376 return err; 377 } 378 } 379 380 return (0); 381} 382 383static void skge_get_drvinfo(struct net_device *dev, 384 struct ethtool_drvinfo *info) 385{ 386 struct skge_port *skge = netdev_priv(dev); 387 388 strcpy(info->driver, DRV_NAME); 389 strcpy(info->version, DRV_VERSION); 390 strcpy(info->fw_version, "N/A"); 391 strcpy(info->bus_info, pci_name(skge->hw->pdev)); 392} 393 394static const struct skge_stat { 395 char name[ETH_GSTRING_LEN]; 396 u16 xmac_offset; 397 u16 gma_offset; 398} skge_stats[] = { 399 { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI }, 400 { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI }, 401 402 { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK }, 403 { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK }, 404 { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK }, 405 { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK }, 406 { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK }, 407 { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK }, 408 { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE }, 409 { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE }, 410 411 { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL }, 412 { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL }, 413 { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL }, 414 { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL }, 415 { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR }, 416 { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV }, 417 418 { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, 419 { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT }, 420 { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG }, 421 { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, 422 { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR }, 423}; 424 425static int skge_get_sset_count(struct net_device *dev, int sset) 426{ 427 switch (sset) { 428 case ETH_SS_STATS: 429 return ARRAY_SIZE(skge_stats); 430 default: 431 return -EOPNOTSUPP; 432 } 433} 434 435static void skge_get_ethtool_stats(struct net_device *dev, 436 struct ethtool_stats *stats, u64 *data) 437{ 438 struct skge_port *skge = netdev_priv(dev); 439 440 if (skge->hw->chip_id == CHIP_ID_GENESIS) 441 genesis_get_stats(skge, data); 442 else 443 yukon_get_stats(skge, data); 444} 445 446/* Use hardware MIB variables for critical path statistics and 447 * transmit feedback not reported at interrupt. 448 * Other errors are accounted for in interrupt handler. 449 */ 450static struct net_device_stats *skge_get_stats(struct net_device *dev) 451{ 452 struct skge_port *skge = netdev_priv(dev); 453 u64 data[ARRAY_SIZE(skge_stats)]; 454 455 if (skge->hw->chip_id == CHIP_ID_GENESIS) 456 genesis_get_stats(skge, data); 457 else 458 yukon_get_stats(skge, data); 459 460 dev->stats.tx_bytes = data[0]; 461 dev->stats.rx_bytes = data[1]; 462 dev->stats.tx_packets = data[2] + data[4] + data[6]; 463 dev->stats.rx_packets = data[3] + data[5] + data[7]; 464 dev->stats.multicast = data[3] + data[5]; 465 dev->stats.collisions = data[10]; 466 dev->stats.tx_aborted_errors = data[12]; 467 468 return &dev->stats; 469} 470 471static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) 472{ 473 int i; 474 475 switch (stringset) { 476 case ETH_SS_STATS: 477 for (i = 0; i < ARRAY_SIZE(skge_stats); i++) 478 memcpy(data + i * ETH_GSTRING_LEN, 479 skge_stats[i].name, ETH_GSTRING_LEN); 480 break; 481 } 482} 483 484static void skge_get_ring_param(struct net_device *dev, 485 struct ethtool_ringparam *p) 486{ 487 struct skge_port *skge = netdev_priv(dev); 488 489 p->rx_max_pending = MAX_RX_RING_SIZE; 490 p->tx_max_pending = MAX_TX_RING_SIZE; 491 p->rx_mini_max_pending = 0; 492 p->rx_jumbo_max_pending = 0; 493 494 p->rx_pending = skge->rx_ring.count; 495 p->tx_pending = skge->tx_ring.count; 496 p->rx_mini_pending = 0; 497 p->rx_jumbo_pending = 0; 498} 499 500static int skge_set_ring_param(struct net_device *dev, 501 struct ethtool_ringparam *p) 502{ 503 struct skge_port *skge = netdev_priv(dev); 504 int err = 0; 505 506 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || 507 p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE) 508 return -EINVAL; 509 510 skge->rx_ring.count = p->rx_pending; 511 skge->tx_ring.count = p->tx_pending; 512 513 if (netif_running(dev)) { 514 skge_down(dev); 515 err = skge_up(dev); 516 if (err) 517 dev_close(dev); 518 } 519 520 return err; 521} 522 523static u32 skge_get_msglevel(struct net_device *netdev) 524{ 525 struct skge_port *skge = netdev_priv(netdev); 526 return skge->msg_enable; 527} 528 529static void skge_set_msglevel(struct net_device *netdev, u32 value) 530{ 531 struct skge_port *skge = netdev_priv(netdev); 532 skge->msg_enable = value; 533} 534 535static int skge_nway_reset(struct net_device *dev) 536{ 537 struct skge_port *skge = netdev_priv(dev); 538 539 if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev)) 540 return -EINVAL; 541 542 skge_phy_reset(skge); 543 return 0; 544} 545 546static int skge_set_sg(struct net_device *dev, u32 data) 547{ 548 struct skge_port *skge = netdev_priv(dev); 549 struct skge_hw *hw = skge->hw; 550 551 if (hw->chip_id == CHIP_ID_GENESIS && data) 552 return -EOPNOTSUPP; 553 return ethtool_op_set_sg(dev, data); 554} 555 556static int skge_set_tx_csum(struct net_device *dev, u32 data) 557{ 558 struct skge_port *skge = netdev_priv(dev); 559 struct skge_hw *hw = skge->hw; 560 561 if (hw->chip_id == CHIP_ID_GENESIS && data) 562 return -EOPNOTSUPP; 563 564 return ethtool_op_set_tx_csum(dev, data); 565} 566 567static u32 skge_get_rx_csum(struct net_device *dev) 568{ 569 struct skge_port *skge = netdev_priv(dev); 570 571 return skge->rx_csum; 572} 573 574/* Only Yukon supports checksum offload. */ 575static int skge_set_rx_csum(struct net_device *dev, u32 data) 576{ 577 struct skge_port *skge = netdev_priv(dev); 578 579 if (skge->hw->chip_id == CHIP_ID_GENESIS && data) 580 return -EOPNOTSUPP; 581 582 skge->rx_csum = data; 583 return 0; 584} 585 586static void skge_get_pauseparam(struct net_device *dev, 587 struct ethtool_pauseparam *ecmd) 588{ 589 struct skge_port *skge = netdev_priv(dev); 590 591 ecmd->rx_pause = (skge->flow_control == FLOW_MODE_SYMMETRIC) 592 || (skge->flow_control == FLOW_MODE_SYM_OR_REM); 593 ecmd->tx_pause = ecmd->rx_pause || (skge->flow_control == FLOW_MODE_LOC_SEND); 594 595 ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause; 596} 597 598static int skge_set_pauseparam(struct net_device *dev, 599 struct ethtool_pauseparam *ecmd) 600{ 601 struct skge_port *skge = netdev_priv(dev); 602 struct ethtool_pauseparam old; 603 int err = 0; 604 605 skge_get_pauseparam(dev, &old); 606 607 if (ecmd->autoneg != old.autoneg) 608 skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC; 609 else { 610 if (ecmd->rx_pause && ecmd->tx_pause) 611 skge->flow_control = FLOW_MODE_SYMMETRIC; 612 else if (ecmd->rx_pause && !ecmd->tx_pause) 613 skge->flow_control = FLOW_MODE_SYM_OR_REM; 614 else if (!ecmd->rx_pause && ecmd->tx_pause) 615 skge->flow_control = FLOW_MODE_LOC_SEND; 616 else 617 skge->flow_control = FLOW_MODE_NONE; 618 } 619 620 if (netif_running(dev)) { 621 skge_down(dev); 622 err = skge_up(dev); 623 if (err) { 624 dev_close(dev); 625 return err; 626 } 627 } 628 629 return 0; 630} 631 632/* Chip internal frequency for clock calculations */ 633static inline u32 hwkhz(const struct skge_hw *hw) 634{ 635 return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125; 636} 637 638/* Chip HZ to microseconds */ 639static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) 640{ 641 return (ticks * 1000) / hwkhz(hw); 642} 643 644/* Microseconds to chip HZ */ 645static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) 646{ 647 return hwkhz(hw) * usec / 1000; 648} 649 650static int skge_get_coalesce(struct net_device *dev, 651 struct ethtool_coalesce *ecmd) 652{ 653 struct skge_port *skge = netdev_priv(dev); 654 struct skge_hw *hw = skge->hw; 655 int port = skge->port; 656 657 ecmd->rx_coalesce_usecs = 0; 658 ecmd->tx_coalesce_usecs = 0; 659 660 if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) { 661 u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI)); 662 u32 msk = skge_read32(hw, B2_IRQM_MSK); 663 664 if (msk & rxirqmask[port]) 665 ecmd->rx_coalesce_usecs = delay; 666 if (msk & txirqmask[port]) 667 ecmd->tx_coalesce_usecs = delay; 668 } 669 670 return 0; 671} 672 673/* Note: interrupt timer is per board, but can turn on/off per port */ 674static int skge_set_coalesce(struct net_device *dev, 675 struct ethtool_coalesce *ecmd) 676{ 677 struct skge_port *skge = netdev_priv(dev); 678 struct skge_hw *hw = skge->hw; 679 int port = skge->port; 680 u32 msk = skge_read32(hw, B2_IRQM_MSK); 681 u32 delay = 25; 682 683 if (ecmd->rx_coalesce_usecs == 0) 684 msk &= ~rxirqmask[port]; 685 else if (ecmd->rx_coalesce_usecs < 25 || 686 ecmd->rx_coalesce_usecs > 33333) 687 return -EINVAL; 688 else { 689 msk |= rxirqmask[port]; 690 delay = ecmd->rx_coalesce_usecs; 691 } 692 693 if (ecmd->tx_coalesce_usecs == 0) 694 msk &= ~txirqmask[port]; 695 else if (ecmd->tx_coalesce_usecs < 25 || 696 ecmd->tx_coalesce_usecs > 33333) 697 return -EINVAL; 698 else { 699 msk |= txirqmask[port]; 700 delay = min(delay, ecmd->rx_coalesce_usecs); 701 } 702 703 skge_write32(hw, B2_IRQM_MSK, msk); 704 if (msk == 0) 705 skge_write32(hw, B2_IRQM_CTRL, TIM_STOP); 706 else { 707 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay)); 708 skge_write32(hw, B2_IRQM_CTRL, TIM_START); 709 } 710 return 0; 711} 712 713enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST }; 714static void skge_led(struct skge_port *skge, enum led_mode mode) 715{ 716 struct skge_hw *hw = skge->hw; 717 int port = skge->port; 718 719 spin_lock_bh(&hw->phy_lock); 720 if (hw->chip_id == CHIP_ID_GENESIS) { 721 switch (mode) { 722 case LED_MODE_OFF: 723 if (hw->phy_type == SK_PHY_BCOM) 724 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF); 725 else { 726 skge_write32(hw, SK_REG(port, TX_LED_VAL), 0); 727 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF); 728 } 729 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); 730 skge_write32(hw, SK_REG(port, RX_LED_VAL), 0); 731 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF); 732 break; 733 734 case LED_MODE_ON: 735 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON); 736 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); 737 738 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); 739 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); 740 741 break; 742 743 case LED_MODE_TST: 744 skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON); 745 skge_write32(hw, SK_REG(port, RX_LED_VAL), 100); 746 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); 747 748 if (hw->phy_type == SK_PHY_BCOM) 749 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON); 750 else { 751 skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON); 752 skge_write32(hw, SK_REG(port, TX_LED_VAL), 100); 753 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); 754 } 755 756 } 757 } else { 758 switch (mode) { 759 case LED_MODE_OFF: 760 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); 761 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 762 PHY_M_LED_MO_DUP(MO_LED_OFF) | 763 PHY_M_LED_MO_10(MO_LED_OFF) | 764 PHY_M_LED_MO_100(MO_LED_OFF) | 765 PHY_M_LED_MO_1000(MO_LED_OFF) | 766 PHY_M_LED_MO_RX(MO_LED_OFF)); 767 break; 768 case LED_MODE_ON: 769 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 770 PHY_M_LED_PULS_DUR(PULS_170MS) | 771 PHY_M_LED_BLINK_RT(BLINK_84MS) | 772 PHY_M_LEDC_TX_CTRL | 773 PHY_M_LEDC_DP_CTRL); 774 775 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 776 PHY_M_LED_MO_RX(MO_LED_OFF) | 777 (skge->speed == SPEED_100 ? 778 PHY_M_LED_MO_100(MO_LED_ON) : 0)); 779 break; 780 case LED_MODE_TST: 781 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); 782 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 783 PHY_M_LED_MO_DUP(MO_LED_ON) | 784 PHY_M_LED_MO_10(MO_LED_ON) | 785 PHY_M_LED_MO_100(MO_LED_ON) | 786 PHY_M_LED_MO_1000(MO_LED_ON) | 787 PHY_M_LED_MO_RX(MO_LED_ON)); 788 } 789 } 790 spin_unlock_bh(&hw->phy_lock); 791} 792 793/* blink LED's for finding board */ 794static int skge_phys_id(struct net_device *dev, u32 data) 795{ 796 struct skge_port *skge = netdev_priv(dev); 797 unsigned long ms; 798 enum led_mode mode = LED_MODE_TST; 799 800 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 801 ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT / HZ) * 1000; 802 else 803 ms = data * 1000; 804 805 while (ms > 0) { 806 skge_led(skge, mode); 807 mode ^= LED_MODE_TST; 808 809 if (msleep_interruptible(BLINK_MS)) 810 break; 811 ms -= BLINK_MS; 812 } 813 814 /* back to regular LED state */ 815 skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF); 816 817 return 0; 818} 819 820static int skge_get_eeprom_len(struct net_device *dev) 821{ 822 struct skge_port *skge = netdev_priv(dev); 823 u32 reg2; 824 825 pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2); 826 return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); 827} 828 829static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset) 830{ 831 u32 val; 832 833 pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); 834 835 do { 836 pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); 837 } while (!(offset & PCI_VPD_ADDR_F)); 838 839 pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); 840 return val; 841} 842 843static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) 844{ 845 pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val); 846 pci_write_config_word(pdev, cap + PCI_VPD_ADDR, 847 offset | PCI_VPD_ADDR_F); 848 849 do { 850 pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); 851 } while (offset & PCI_VPD_ADDR_F); 852} 853 854static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 855 u8 *data) 856{ 857 struct skge_port *skge = netdev_priv(dev); 858 struct pci_dev *pdev = skge->hw->pdev; 859 int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); 860 int length = eeprom->len; 861 u16 offset = eeprom->offset; 862 863 if (!cap) 864 return -EINVAL; 865 866 eeprom->magic = SKGE_EEPROM_MAGIC; 867 868 while (length > 0) { 869 u32 val = skge_vpd_read(pdev, cap, offset); 870 int n = min_t(int, length, sizeof(val)); 871 872 memcpy(data, &val, n); 873 length -= n; 874 data += n; 875 offset += n; 876 } 877 return 0; 878} 879 880static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 881 u8 *data) 882{ 883 struct skge_port *skge = netdev_priv(dev); 884 struct pci_dev *pdev = skge->hw->pdev; 885 int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); 886 int length = eeprom->len; 887 u16 offset = eeprom->offset; 888 889 if (!cap) 890 return -EINVAL; 891 892 if (eeprom->magic != SKGE_EEPROM_MAGIC) 893 return -EINVAL; 894 895 while (length > 0) { 896 u32 val; 897 int n = min_t(int, length, sizeof(val)); 898 899 if (n < sizeof(val)) 900 val = skge_vpd_read(pdev, cap, offset); 901 memcpy(&val, data, n); 902 903 skge_vpd_write(pdev, cap, offset, val); 904 905 length -= n; 906 data += n; 907 offset += n; 908 } 909 return 0; 910} 911 912static const struct ethtool_ops skge_ethtool_ops = { 913 .get_settings = skge_get_settings, 914 .set_settings = skge_set_settings, 915 .get_drvinfo = skge_get_drvinfo, 916 .get_regs_len = skge_get_regs_len, 917 .get_regs = skge_get_regs, 918 .get_wol = skge_get_wol, 919 .set_wol = skge_set_wol, 920 .get_msglevel = skge_get_msglevel, 921 .set_msglevel = skge_set_msglevel, 922 .nway_reset = skge_nway_reset, 923 .get_link = ethtool_op_get_link, 924 .get_eeprom_len = skge_get_eeprom_len, 925 .get_eeprom = skge_get_eeprom, 926 .set_eeprom = skge_set_eeprom, 927 .get_ringparam = skge_get_ring_param, 928 .set_ringparam = skge_set_ring_param, 929 .get_pauseparam = skge_get_pauseparam, 930 .set_pauseparam = skge_set_pauseparam, 931 .get_coalesce = skge_get_coalesce, 932 .set_coalesce = skge_set_coalesce, 933 .set_sg = skge_set_sg, 934 .set_tx_csum = skge_set_tx_csum, 935 .get_rx_csum = skge_get_rx_csum, 936 .set_rx_csum = skge_set_rx_csum, 937 .get_strings = skge_get_strings, 938 .phys_id = skge_phys_id, 939 .get_sset_count = skge_get_sset_count, 940 .get_ethtool_stats = skge_get_ethtool_stats, 941}; 942 943/* 944 * Allocate ring elements and chain them together 945 * One-to-one association of board descriptors with ring elements 946 */ 947static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) 948{ 949 struct skge_tx_desc *d; 950 struct skge_element *e; 951 int i; 952 953 ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL); 954 if (!ring->start) 955 return -ENOMEM; 956 957 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { 958 e->desc = d; 959 if (i == ring->count - 1) { 960 e->next = ring->start; 961 d->next_offset = base; 962 } else { 963 e->next = e + 1; 964 d->next_offset = base + (i+1) * sizeof(*d); 965 } 966 } 967 ring->to_use = ring->to_clean = ring->start; 968 969 return 0; 970} 971 972/* Allocate and setup a new buffer for receiving */ 973static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 974 struct sk_buff *skb, unsigned int bufsize) 975{ 976 struct skge_rx_desc *rd = e->desc; 977 u64 map; 978 979 map = pci_map_single(skge->hw->pdev, skb->data, bufsize, 980 PCI_DMA_FROMDEVICE); 981 982 rd->dma_lo = map; 983 rd->dma_hi = map >> 32; 984 e->skb = skb; 985 rd->csum1_start = ETH_HLEN; 986 rd->csum2_start = ETH_HLEN; 987 rd->csum1 = 0; 988 rd->csum2 = 0; 989 990 wmb(); 991 992 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 993 pci_unmap_addr_set(e, mapaddr, map); 994 pci_unmap_len_set(e, maplen, bufsize); 995} 996 997/* Resume receiving using existing skb, 998 * Note: DMA address is not changed by chip. 999 * MTU not changed while receiver active. 1000 */ 1001static inline void skge_rx_reuse(struct skge_element *e, unsigned int size) 1002{ 1003 struct skge_rx_desc *rd = e->desc; 1004 1005 rd->csum2 = 0; 1006 rd->csum2_start = ETH_HLEN; 1007 1008 wmb(); 1009 1010 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size; 1011} 1012 1013 1014/* Free all buffers in receive ring, assumes receiver stopped */ 1015static void skge_rx_clean(struct skge_port *skge) 1016{ 1017 struct skge_hw *hw = skge->hw; 1018 struct skge_ring *ring = &skge->rx_ring; 1019 struct skge_element *e; 1020 1021 e = ring->start; 1022 do { 1023 struct skge_rx_desc *rd = e->desc; 1024 rd->control = 0; 1025 if (e->skb) { 1026 pci_unmap_single(hw->pdev, 1027 pci_unmap_addr(e, mapaddr), 1028 pci_unmap_len(e, maplen), 1029 PCI_DMA_FROMDEVICE); 1030 dev_kfree_skb(e->skb); 1031 e->skb = NULL; 1032 } 1033 } while ((e = e->next) != ring->start); 1034} 1035 1036 1037/* Allocate buffers for receive ring 1038 * For receive: to_clean is next received frame. 1039 */ 1040static int skge_rx_fill(struct net_device *dev) 1041{ 1042 struct skge_port *skge = netdev_priv(dev); 1043 struct skge_ring *ring = &skge->rx_ring; 1044 struct skge_element *e; 1045 1046 e = ring->start; 1047 do { 1048 struct sk_buff *skb; 1049 1050 skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN, 1051 GFP_KERNEL); 1052 if (!skb) 1053 return -ENOMEM; 1054 1055 skb_reserve(skb, NET_IP_ALIGN); 1056 skge_rx_setup(skge, e, skb, skge->rx_buf_size); 1057 } while ( (e = e->next) != ring->start); 1058 1059 ring->to_clean = ring->start; 1060 return 0; 1061} 1062 1063static const char *skge_pause(enum pause_status status) 1064{ 1065 switch(status) { 1066 case FLOW_STAT_NONE: 1067 return "none"; 1068 case FLOW_STAT_REM_SEND: 1069 return "rx only"; 1070 case FLOW_STAT_LOC_SEND: 1071 return "tx_only"; 1072 case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */ 1073 return "both"; 1074 default: 1075 return "indeterminated"; 1076 } 1077} 1078 1079 1080static void skge_link_up(struct skge_port *skge) 1081{ 1082 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), 1083 LED_BLK_OFF|LED_SYNC_OFF|LED_ON); 1084 1085 netif_carrier_on(skge->netdev); 1086 netif_wake_queue(skge->netdev); 1087 1088 if (netif_msg_link(skge)) { 1089 printk(KERN_INFO PFX 1090 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n", 1091 skge->netdev->name, skge->speed, 1092 skge->duplex == DUPLEX_FULL ? "full" : "half", 1093 skge_pause(skge->flow_status)); 1094 } 1095} 1096 1097static void skge_link_down(struct skge_port *skge) 1098{ 1099 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); 1100 netif_carrier_off(skge->netdev); 1101 netif_stop_queue(skge->netdev); 1102 1103 if (netif_msg_link(skge)) 1104 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name); 1105} 1106 1107 1108static void xm_link_down(struct skge_hw *hw, int port) 1109{ 1110 struct net_device *dev = hw->dev[port]; 1111 struct skge_port *skge = netdev_priv(dev); 1112 1113 xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); 1114 1115 if (netif_carrier_ok(dev)) 1116 skge_link_down(skge); 1117} 1118 1119static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) 1120{ 1121 int i; 1122 1123 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); 1124 *val = xm_read16(hw, port, XM_PHY_DATA); 1125 1126 if (hw->phy_type == SK_PHY_XMAC) 1127 goto ready; 1128 1129 for (i = 0; i < PHY_RETRIES; i++) { 1130 if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) 1131 goto ready; 1132 udelay(1); 1133 } 1134 1135 return -ETIMEDOUT; 1136 ready: 1137 *val = xm_read16(hw, port, XM_PHY_DATA); 1138 1139 return 0; 1140} 1141 1142static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) 1143{ 1144 u16 v = 0; 1145 if (__xm_phy_read(hw, port, reg, &v)) 1146 printk(KERN_WARNING PFX "%s: phy read timed out\n", 1147 hw->dev[port]->name); 1148 return v; 1149} 1150 1151static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) 1152{ 1153 int i; 1154 1155 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); 1156 for (i = 0; i < PHY_RETRIES; i++) { 1157 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) 1158 goto ready; 1159 udelay(1); 1160 } 1161 return -EIO; 1162 1163 ready: 1164 xm_write16(hw, port, XM_PHY_DATA, val); 1165 for (i = 0; i < PHY_RETRIES; i++) { 1166 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) 1167 return 0; 1168 udelay(1); 1169 } 1170 return -ETIMEDOUT; 1171} 1172 1173static void genesis_init(struct skge_hw *hw) 1174{ 1175 /* set blink source counter */ 1176 skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100); 1177 skge_write8(hw, B2_BSC_CTRL, BSC_START); 1178 1179 /* configure mac arbiter */ 1180 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); 1181 1182 /* configure mac arbiter timeout values */ 1183 skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53); 1184 skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53); 1185 skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53); 1186 skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53); 1187 1188 skge_write8(hw, B3_MA_RCINI_RX1, 0); 1189 skge_write8(hw, B3_MA_RCINI_RX2, 0); 1190 skge_write8(hw, B3_MA_RCINI_TX1, 0); 1191 skge_write8(hw, B3_MA_RCINI_TX2, 0); 1192 1193 /* configure packet arbiter timeout */ 1194 skge_write16(hw, B3_PA_CTRL, PA_RST_CLR); 1195 skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX); 1196 skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX); 1197 skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX); 1198 skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX); 1199} 1200 1201static void genesis_reset(struct skge_hw *hw, int port) 1202{ 1203 const u8 zero[8] = { 0 }; 1204 u32 reg; 1205 1206 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); 1207 1208 /* reset the statistics module */ 1209 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); 1210 xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); 1211 xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ 1212 xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ 1213 xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ 1214 1215 /* disable Broadcom PHY IRQ */ 1216 if (hw->phy_type == SK_PHY_BCOM) 1217 xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); 1218 1219 xm_outhash(hw, port, XM_HSM, zero); 1220 1221 /* Flush TX and RX fifo */ 1222 reg = xm_read32(hw, port, XM_MODE); 1223 xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF); 1224 xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF); 1225} 1226 1227 1228/* Convert mode to MII values */ 1229static const u16 phy_pause_map[] = { 1230 [FLOW_MODE_NONE] = 0, 1231 [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM, 1232 [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP, 1233 [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, 1234}; 1235 1236/* special defines for FIBER (88E1011S only) */ 1237static const u16 fiber_pause_map[] = { 1238 [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE, 1239 [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD, 1240 [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD, 1241 [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD, 1242}; 1243 1244 1245/* Check status of Broadcom phy link */ 1246static void bcom_check_link(struct skge_hw *hw, int port) 1247{ 1248 struct net_device *dev = hw->dev[port]; 1249 struct skge_port *skge = netdev_priv(dev); 1250 u16 status; 1251 1252 /* read twice because of latch */ 1253 xm_phy_read(hw, port, PHY_BCOM_STAT); 1254 status = xm_phy_read(hw, port, PHY_BCOM_STAT); 1255 1256 if ((status & PHY_ST_LSYNC) == 0) { 1257 xm_link_down(hw, port); 1258 return; 1259 } 1260 1261 if (skge->autoneg == AUTONEG_ENABLE) { 1262 u16 lpa, aux; 1263 1264 if (!(status & PHY_ST_AN_OVER)) 1265 return; 1266 1267 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); 1268 if (lpa & PHY_B_AN_RF) { 1269 printk(KERN_NOTICE PFX "%s: remote fault\n", 1270 dev->name); 1271 return; 1272 } 1273 1274 aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT); 1275 1276 /* Check Duplex mismatch */ 1277 switch (aux & PHY_B_AS_AN_RES_MSK) { 1278 case PHY_B_RES_1000FD: 1279 skge->duplex = DUPLEX_FULL; 1280 break; 1281 case PHY_B_RES_1000HD: 1282 skge->duplex = DUPLEX_HALF; 1283 break; 1284 default: 1285 printk(KERN_NOTICE PFX "%s: duplex mismatch\n", 1286 dev->name); 1287 return; 1288 } 1289 1290 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 1291 switch (aux & PHY_B_AS_PAUSE_MSK) { 1292 case PHY_B_AS_PAUSE_MSK: 1293 skge->flow_status = FLOW_STAT_SYMMETRIC; 1294 break; 1295 case PHY_B_AS_PRR: 1296 skge->flow_status = FLOW_STAT_REM_SEND; 1297 break; 1298 case PHY_B_AS_PRT: 1299 skge->flow_status = FLOW_STAT_LOC_SEND; 1300 break; 1301 default: 1302 skge->flow_status = FLOW_STAT_NONE; 1303 } 1304 skge->speed = SPEED_1000; 1305 } 1306 1307 if (!netif_carrier_ok(dev)) 1308 genesis_link_up(skge); 1309} 1310 1311/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional 1312 * Phy on for 100 or 10Mbit operation 1313 */ 1314static void bcom_phy_init(struct skge_port *skge) 1315{ 1316 struct skge_hw *hw = skge->hw; 1317 int port = skge->port; 1318 int i; 1319 u16 id1, r, ext, ctl; 1320 1321 /* magic workaround patterns for Broadcom */ 1322 static const struct { 1323 u16 reg; 1324 u16 val; 1325 } A1hack[] = { 1326 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, 1327 { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, 1328 { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, 1329 { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 1330 }, C0hack[] = { 1331 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, 1332 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, 1333 }; 1334 1335 /* read Id from external PHY (all have the same address) */ 1336 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); 1337 1338 /* Optimize MDIO transfer by suppressing preamble. */ 1339 r = xm_read16(hw, port, XM_MMU_CMD); 1340 r |= XM_MMU_NO_PRE; 1341 xm_write16(hw, port, XM_MMU_CMD,r); 1342 1343 switch (id1) { 1344 case PHY_BCOM_ID1_C0: 1345 /* 1346 * Workaround BCOM Errata for the C0 type. 1347 * Write magic patterns to reserved registers. 1348 */ 1349 for (i = 0; i < ARRAY_SIZE(C0hack); i++) 1350 xm_phy_write(hw, port, 1351 C0hack[i].reg, C0hack[i].val); 1352 1353 break; 1354 case PHY_BCOM_ID1_A1: 1355 /* 1356 * Workaround BCOM Errata for the A1 type. 1357 * Write magic patterns to reserved registers. 1358 */ 1359 for (i = 0; i < ARRAY_SIZE(A1hack); i++) 1360 xm_phy_write(hw, port, 1361 A1hack[i].reg, A1hack[i].val); 1362 break; 1363 } 1364 1365 /* 1366 * Workaround BCOM Errata (#10523) for all BCom PHYs. 1367 * Disable Power Management after reset. 1368 */ 1369 r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL); 1370 r |= PHY_B_AC_DIS_PM; 1371 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r); 1372 1373 /* Dummy read */ 1374 xm_read16(hw, port, XM_ISRC); 1375 1376 ext = PHY_B_PEC_EN_LTR; /* enable tx led */ 1377 ctl = PHY_CT_SP1000; /* always 1000mbit */ 1378 1379 if (skge->autoneg == AUTONEG_ENABLE) { 1380 /* 1381 * Workaround BCOM Errata #1 for the C5 type. 1382 * 1000Base-T Link Acquisition Failure in Slave Mode 1383 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register 1384 */ 1385 u16 adv = PHY_B_1000C_RD; 1386 if (skge->advertising & ADVERTISED_1000baseT_Half) 1387 adv |= PHY_B_1000C_AHD; 1388 if (skge->advertising & ADVERTISED_1000baseT_Full) 1389 adv |= PHY_B_1000C_AFD; 1390 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv); 1391 1392 ctl |= PHY_CT_ANE | PHY_CT_RE_CFG; 1393 } else { 1394 if (skge->duplex == DUPLEX_FULL) 1395 ctl |= PHY_CT_DUP_MD; 1396 /* Force to slave */ 1397 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE); 1398 } 1399 1400 /* Set autonegotiation pause parameters */ 1401 xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, 1402 phy_pause_map[skge->flow_control] | PHY_AN_CSMA); 1403 1404 /* Handle Jumbo frames */ 1405 if (hw->dev[port]->mtu > ETH_DATA_LEN) { 1406 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, 1407 PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK); 1408 1409 ext |= PHY_B_PEC_HIGH_LA; 1410 1411 } 1412 1413 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); 1414 xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); 1415 1416 /* Use link status change interrupt */ 1417 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); 1418} 1419 1420static void xm_phy_init(struct skge_port *skge) 1421{ 1422 struct skge_hw *hw = skge->hw; 1423 int port = skge->port; 1424 u16 ctrl = 0; 1425 1426 if (skge->autoneg == AUTONEG_ENABLE) { 1427 if (skge->advertising & ADVERTISED_1000baseT_Half) 1428 ctrl |= PHY_X_AN_HD; 1429 if (skge->advertising & ADVERTISED_1000baseT_Full) 1430 ctrl |= PHY_X_AN_FD; 1431 1432 ctrl |= fiber_pause_map[skge->flow_control]; 1433 1434 xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl); 1435 1436 /* Restart Auto-negotiation */ 1437 ctrl = PHY_CT_ANE | PHY_CT_RE_CFG; 1438 } else { 1439 /* Set DuplexMode in Config register */ 1440 if (skge->duplex == DUPLEX_FULL) 1441 ctrl |= PHY_CT_DUP_MD; 1442 /* 1443 * Do NOT enable Auto-negotiation here. This would hold 1444 * the link down because no IDLEs are transmitted 1445 */ 1446 } 1447 1448 xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); 1449 1450 /* Poll PHY for status changes */ 1451 mod_timer(&skge->link_timer, jiffies + LINK_HZ); 1452} 1453 1454static int xm_check_link(struct net_device *dev) 1455{ 1456 struct skge_port *skge = netdev_priv(dev); 1457 struct skge_hw *hw = skge->hw; 1458 int port = skge->port; 1459 u16 status; 1460 1461 /* read twice because of latch */ 1462 xm_phy_read(hw, port, PHY_XMAC_STAT); 1463 status = xm_phy_read(hw, port, PHY_XMAC_STAT); 1464 1465 if ((status & PHY_ST_LSYNC) == 0) { 1466 xm_link_down(hw, port); 1467 return 0; 1468 } 1469 1470 if (skge->autoneg == AUTONEG_ENABLE) { 1471 u16 lpa, res; 1472 1473 if (!(status & PHY_ST_AN_OVER)) 1474 return 0; 1475 1476 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); 1477 if (lpa & PHY_B_AN_RF) { 1478 printk(KERN_NOTICE PFX "%s: remote fault\n", 1479 dev->name); 1480 return 0; 1481 } 1482 1483 res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); 1484 1485 /* Check Duplex mismatch */ 1486 switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) { 1487 case PHY_X_RS_FD: 1488 skge->duplex = DUPLEX_FULL; 1489 break; 1490 case PHY_X_RS_HD: 1491 skge->duplex = DUPLEX_HALF; 1492 break; 1493 default: 1494 printk(KERN_NOTICE PFX "%s: duplex mismatch\n", 1495 dev->name); 1496 return 0; 1497 } 1498 1499 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 1500 if ((skge->flow_control == FLOW_MODE_SYMMETRIC || 1501 skge->flow_control == FLOW_MODE_SYM_OR_REM) && 1502 (lpa & PHY_X_P_SYM_MD)) 1503 skge->flow_status = FLOW_STAT_SYMMETRIC; 1504 else if (skge->flow_control == FLOW_MODE_SYM_OR_REM && 1505 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) 1506 /* Enable PAUSE receive, disable PAUSE transmit */ 1507 skge->flow_status = FLOW_STAT_REM_SEND; 1508 else if (skge->flow_control == FLOW_MODE_LOC_SEND && 1509 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) 1510 /* Disable PAUSE receive, enable PAUSE transmit */ 1511 skge->flow_status = FLOW_STAT_LOC_SEND; 1512 else 1513 skge->flow_status = FLOW_STAT_NONE; 1514 1515 skge->speed = SPEED_1000; 1516 } 1517 1518 if (!netif_carrier_ok(dev)) 1519 genesis_link_up(skge); 1520 return 1; 1521} 1522 1523/* Poll to check for link coming up. 1524 * 1525 * Since internal PHY is wired to a level triggered pin, can't 1526 * get an interrupt when carrier is detected, need to poll for 1527 * link coming up. 1528 */ 1529static void xm_link_timer(unsigned long arg) 1530{ 1531 struct skge_port *skge = (struct skge_port *) arg; 1532 struct net_device *dev = skge->netdev; 1533 struct skge_hw *hw = skge->hw; 1534 int port = skge->port; 1535 int i; 1536 unsigned long flags; 1537 1538 if (!netif_running(dev)) 1539 return; 1540 1541 spin_lock_irqsave(&hw->phy_lock, flags); 1542 1543 /* 1544 * Verify that the link by checking GPIO register three times. 1545 * This pin has the signal from the link_sync pin connected to it. 1546 */ 1547 for (i = 0; i < 3; i++) { 1548 if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) 1549 goto link_down; 1550 } 1551 1552 /* Re-enable interrupt to detect link down */ 1553 if (xm_check_link(dev)) { 1554 u16 msk = xm_read16(hw, port, XM_IMSK); 1555 msk &= ~XM_IS_INP_ASS; 1556 xm_write16(hw, port, XM_IMSK, msk); 1557 xm_read16(hw, port, XM_ISRC); 1558 } else { 1559link_down: 1560 mod_timer(&skge->link_timer, 1561 round_jiffies(jiffies + LINK_HZ)); 1562 } 1563 spin_unlock_irqrestore(&hw->phy_lock, flags); 1564} 1565 1566static void genesis_mac_init(struct skge_hw *hw, int port) 1567{ 1568 struct net_device *dev = hw->dev[port]; 1569 struct skge_port *skge = netdev_priv(dev); 1570 int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN; 1571 int i; 1572 u32 r; 1573 const u8 zero[6] = { 0 }; 1574 1575 for (i = 0; i < 10; i++) { 1576 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), 1577 MFF_SET_MAC_RST); 1578 if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST) 1579 goto reset_ok; 1580 udelay(1); 1581 } 1582 1583 printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name); 1584 1585 reset_ok: 1586 /* Unreset the XMAC. */ 1587 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); 1588 1589 /* 1590 * Perform additional initialization for external PHYs, 1591 * namely for the 1000baseTX cards that use the XMAC's 1592 * GMII mode. 1593 */ 1594 if (hw->phy_type != SK_PHY_XMAC) { 1595 /* Take external Phy out of reset */ 1596 r = skge_read32(hw, B2_GP_IO); 1597 if (port == 0) 1598 r |= GP_DIR_0|GP_IO_0; 1599 else 1600 r |= GP_DIR_2|GP_IO_2; 1601 1602 skge_write32(hw, B2_GP_IO, r); 1603 1604 /* Enable GMII interface */ 1605 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); 1606 } 1607 1608 1609 switch(hw->phy_type) { 1610 case SK_PHY_XMAC: 1611 xm_phy_init(skge); 1612 break; 1613 case SK_PHY_BCOM: 1614 bcom_phy_init(skge); 1615 bcom_check_link(hw, port); 1616 } 1617 1618 /* Set Station Address */ 1619 xm_outaddr(hw, port, XM_SA, dev->dev_addr); 1620 1621 /* We don't use match addresses so clear */ 1622 for (i = 1; i < 16; i++) 1623 xm_outaddr(hw, port, XM_EXM(i), zero); 1624 1625 /* Clear MIB counters */ 1626 xm_write16(hw, port, XM_STAT_CMD, 1627 XM_SC_CLR_RXC | XM_SC_CLR_TXC); 1628 /* Clear two times according to Errata #3 */ 1629 xm_write16(hw, port, XM_STAT_CMD, 1630 XM_SC_CLR_RXC | XM_SC_CLR_TXC); 1631 1632 /* configure Rx High Water Mark (XM_RX_HI_WM) */ 1633 xm_write16(hw, port, XM_RX_HI_WM, 1450); 1634 1635 /* We don't need the FCS appended to the packet. */ 1636 r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS; 1637 if (jumbo) 1638 r |= XM_RX_BIG_PK_OK; 1639 1640 if (skge->duplex == DUPLEX_HALF) { 1641 /* 1642 * If in manual half duplex mode the other side might be in 1643 * full duplex mode, so ignore if a carrier extension is not seen 1644 * on frames received 1645 */ 1646 r |= XM_RX_DIS_CEXT; 1647 } 1648 xm_write16(hw, port, XM_RX_CMD, r); 1649 1650 /* We want short frames padded to 60 bytes. */ 1651 xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); 1652 1653 /* Increase threshold for jumbo frames on dual port */ 1654 if (hw->ports > 1 && jumbo) 1655 xm_write16(hw, port, XM_TX_THR, 1020); 1656 else 1657 xm_write16(hw, port, XM_TX_THR, 512); 1658 1659 /* 1660 * Enable the reception of all error frames. This is is 1661 * a necessary evil due to the design of the XMAC. The 1662 * XMAC's receive FIFO is only 8K in size, however jumbo 1663 * frames can be up to 9000 bytes in length. When bad 1664 * frame filtering is enabled, the XMAC's RX FIFO operates 1665 * in 'store and forward' mode. For this to work, the 1666 * entire frame has to fit into the FIFO, but that means 1667 * that jumbo frames larger than 8192 bytes will be 1668 * truncated. Disabling all bad frame filtering causes 1669 * the RX FIFO to operate in streaming mode, in which 1670 * case the XMAC will start transferring frames out of the 1671 * RX FIFO as soon as the FIFO threshold is reached. 1672 */ 1673 xm_write32(hw, port, XM_MODE, XM_DEF_MODE); 1674 1675 1676 /* 1677 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK) 1678 * - Enable all bits excepting 'Octets Rx OK Low CntOv' 1679 * and 'Octets Rx OK Hi Cnt Ov'. 1680 */ 1681 xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK); 1682 1683 /* 1684 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK) 1685 * - Enable all bits excepting 'Octets Tx OK Low CntOv' 1686 * and 'Octets Tx OK Hi Cnt Ov'. 1687 */ 1688 xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK); 1689 1690 /* Configure MAC arbiter */ 1691 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); 1692 1693 /* configure timeout values */ 1694 skge_write8(hw, B3_MA_TOINI_RX1, 72); 1695 skge_write8(hw, B3_MA_TOINI_RX2, 72); 1696 skge_write8(hw, B3_MA_TOINI_TX1, 72); 1697 skge_write8(hw, B3_MA_TOINI_TX2, 72); 1698 1699 skge_write8(hw, B3_MA_RCINI_RX1, 0); 1700 skge_write8(hw, B3_MA_RCINI_RX2, 0); 1701 skge_write8(hw, B3_MA_RCINI_TX1, 0); 1702 skge_write8(hw, B3_MA_RCINI_TX2, 0); 1703 1704 /* Configure Rx MAC FIFO */ 1705 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); 1706 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); 1707 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); 1708 1709 /* Configure Tx MAC FIFO */ 1710 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); 1711 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); 1712 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); 1713 1714 if (jumbo) { 1715 /* Enable frame flushing if jumbo frames used */ 1716 skge_write16(hw, SK_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH); 1717 } else { 1718 /* enable timeout timers if normal frames */ 1719 skge_write16(hw, B3_PA_CTRL, 1720 (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); 1721 } 1722} 1723 1724static void genesis_stop(struct skge_port *skge) 1725{ 1726 struct skge_hw *hw = skge->hw; 1727 int port = skge->port; 1728 unsigned retries = 1000; 1729 u16 cmd; 1730 1731 /* Disable Tx and Rx */ 1732 cmd = xm_read16(hw, port, XM_MMU_CMD); 1733 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1734 xm_write16(hw, port, XM_MMU_CMD, cmd); 1735 1736 genesis_reset(hw, port); 1737 1738 /* Clear Tx packet arbiter timeout IRQ */ 1739 skge_write16(hw, B3_PA_CTRL, 1740 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); 1741 1742 /* Reset the MAC */ 1743 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); 1744 do { 1745 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); 1746 if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)) 1747 break; 1748 } while (--retries > 0); 1749 1750 /* For external PHYs there must be special handling */ 1751 if (hw->phy_type != SK_PHY_XMAC) { 1752 u32 reg = skge_read32(hw, B2_GP_IO); 1753 if (port == 0) { 1754 reg |= GP_DIR_0; 1755 reg &= ~GP_IO_0; 1756 } else { 1757 reg |= GP_DIR_2; 1758 reg &= ~GP_IO_2; 1759 } 1760 skge_write32(hw, B2_GP_IO, reg); 1761 skge_read32(hw, B2_GP_IO); 1762 } 1763 1764 xm_write16(hw, port, XM_MMU_CMD, 1765 xm_read16(hw, port, XM_MMU_CMD) 1766 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); 1767 1768 xm_read16(hw, port, XM_MMU_CMD); 1769} 1770 1771 1772static void genesis_get_stats(struct skge_port *skge, u64 *data) 1773{ 1774 struct skge_hw *hw = skge->hw; 1775 int port = skge->port; 1776 int i; 1777 unsigned long timeout = jiffies + HZ; 1778 1779 xm_write16(hw, port, 1780 XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); 1781 1782 /* wait for update to complete */ 1783 while (xm_read16(hw, port, XM_STAT_CMD) 1784 & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) { 1785 if (time_after(jiffies, timeout)) 1786 break; 1787 udelay(10); 1788 } 1789 1790 /* special case for 64 bit octet counter */ 1791 data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32 1792 | xm_read32(hw, port, XM_TXO_OK_LO); 1793 data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32 1794 | xm_read32(hw, port, XM_RXO_OK_LO); 1795 1796 for (i = 2; i < ARRAY_SIZE(skge_stats); i++) 1797 data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset); 1798} 1799 1800static void genesis_mac_intr(struct skge_hw *hw, int port) 1801{ 1802 struct net_device *dev = hw->dev[port]; 1803 struct skge_port *skge = netdev_priv(dev); 1804 u16 status = xm_read16(hw, port, XM_ISRC); 1805 1806 if (netif_msg_intr(skge)) 1807 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n", 1808 dev->name, status); 1809 1810 if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { 1811 xm_link_down(hw, port); 1812 mod_timer(&skge->link_timer, jiffies + 1); 1813 } 1814 1815 if (status & XM_IS_TXF_UR) { 1816 xm_write32(hw, port, XM_MODE, XM_MD_FTF); 1817 ++dev->stats.tx_fifo_errors; 1818 } 1819} 1820 1821static void genesis_link_up(struct skge_port *skge) 1822{ 1823 struct skge_hw *hw = skge->hw; 1824 int port = skge->port; 1825 u16 cmd, msk; 1826 u32 mode; 1827 1828 cmd = xm_read16(hw, port, XM_MMU_CMD); 1829 1830 /* 1831 * enabling pause frame reception is required for 1000BT 1832 * because the XMAC is not reset if the link is going down 1833 */ 1834 if (skge->flow_status == FLOW_STAT_NONE || 1835 skge->flow_status == FLOW_STAT_LOC_SEND) 1836 /* Disable Pause Frame Reception */ 1837 cmd |= XM_MMU_IGN_PF; 1838 else 1839 /* Enable Pause Frame Reception */ 1840 cmd &= ~XM_MMU_IGN_PF; 1841 1842 xm_write16(hw, port, XM_MMU_CMD, cmd); 1843 1844 mode = xm_read32(hw, port, XM_MODE); 1845 if (skge->flow_status== FLOW_STAT_SYMMETRIC || 1846 skge->flow_status == FLOW_STAT_LOC_SEND) { 1847 /* 1848 * Configure Pause Frame Generation 1849 * Use internal and external Pause Frame Generation. 1850 * Sending pause frames is edge triggered. 1851 * Send a Pause frame with the maximum pause time if 1852 * internal oder external FIFO full condition occurs. 1853 * Send a zero pause time frame to re-start transmission. 1854 */ 1855 /* XM_PAUSE_DA = '010000C28001' (default) */ 1856 /* XM_MAC_PTIME = 0xffff (maximum) */ 1857 /* remember this value is defined in big endian (!) */ 1858 xm_write16(hw, port, XM_MAC_PTIME, 0xffff); 1859 1860 mode |= XM_PAUSE_MODE; 1861 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); 1862 } else { 1863 /* 1864 * disable pause frame generation is required for 1000BT 1865 * because the XMAC is not reset if the link is going down 1866 */ 1867 /* Disable Pause Mode in Mode Register */ 1868 mode &= ~XM_PAUSE_MODE; 1869 1870 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); 1871 } 1872 1873 xm_write32(hw, port, XM_MODE, mode); 1874 1875 /* Turn on detection of Tx underrun */ 1876 msk = xm_read16(hw, port, XM_IMSK); 1877 msk &= ~XM_IS_TXF_UR; 1878 xm_write16(hw, port, XM_IMSK, msk); 1879 1880 xm_read16(hw, port, XM_ISRC); 1881 1882 /* get MMU Command Reg. */ 1883 cmd = xm_read16(hw, port, XM_MMU_CMD); 1884 if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) 1885 cmd |= XM_MMU_GMII_FD; 1886 1887 /* 1888 * Workaround BCOM Errata (#10523) for all BCom Phys 1889 * Enable Power Management after link up 1890 */ 1891 if (hw->phy_type == SK_PHY_BCOM) { 1892 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, 1893 xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) 1894 & ~PHY_B_AC_DIS_PM); 1895 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); 1896 } 1897 1898 /* enable Rx/Tx */ 1899 xm_write16(hw, port, XM_MMU_CMD, 1900 cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1901 skge_link_up(skge); 1902} 1903 1904 1905static inline void bcom_phy_intr(struct skge_port *skge) 1906{ 1907 struct skge_hw *hw = skge->hw; 1908 int port = skge->port; 1909 u16 isrc; 1910 1911 isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT); 1912 if (netif_msg_intr(skge)) 1913 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x\n", 1914 skge->netdev->name, isrc); 1915 1916 if (isrc & PHY_B_IS_PSE) 1917 printk(KERN_ERR PFX "%s: uncorrectable pair swap error\n", 1918 hw->dev[port]->name); 1919 1920 /* Workaround BCom Errata: 1921 * enable and disable loopback mode if "NO HCD" occurs. 1922 */ 1923 if (isrc & PHY_B_IS_NO_HDCL) { 1924 u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL); 1925 xm_phy_write(hw, port, PHY_BCOM_CTRL, 1926 ctrl | PHY_CT_LOOP); 1927 xm_phy_write(hw, port, PHY_BCOM_CTRL, 1928 ctrl & ~PHY_CT_LOOP); 1929 } 1930 1931 if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) 1932 bcom_check_link(hw, port); 1933 1934} 1935 1936static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) 1937{ 1938 int i; 1939 1940 gma_write16(hw, port, GM_SMI_DATA, val); 1941 gma_write16(hw, port, GM_SMI_CTRL, 1942 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); 1943 for (i = 0; i < PHY_RETRIES; i++) { 1944 udelay(1); 1945 1946 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) 1947 return 0; 1948 } 1949 1950 printk(KERN_WARNING PFX "%s: phy write timeout\n", 1951 hw->dev[port]->name); 1952 return -EIO; 1953} 1954 1955static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) 1956{ 1957 int i; 1958 1959 gma_write16(hw, port, GM_SMI_CTRL, 1960 GM_SMI_CT_PHY_AD(hw->phy_addr) 1961 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 1962 1963 for (i = 0; i < PHY_RETRIES; i++) { 1964 udelay(1); 1965 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) 1966 goto ready; 1967 } 1968 1969 return -ETIMEDOUT; 1970 ready: 1971 *val = gma_read16(hw, port, GM_SMI_DATA); 1972 return 0; 1973} 1974 1975static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) 1976{ 1977 u16 v = 0; 1978 if (__gm_phy_read(hw, port, reg, &v)) 1979 printk(KERN_WARNING PFX "%s: phy read timeout\n", 1980 hw->dev[port]->name); 1981 return v; 1982} 1983 1984/* Marvell Phy Initialization */ 1985static void yukon_init(struct skge_hw *hw, int port) 1986{ 1987 struct skge_port *skge = netdev_priv(hw->dev[port]); 1988 u16 ctrl, ct1000, adv; 1989 1990 if (skge->autoneg == AUTONEG_ENABLE) { 1991 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 1992 1993 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | 1994 PHY_M_EC_MAC_S_MSK); 1995 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); 1996 1997 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); 1998 1999 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); 2000 } 2001 2002 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); 2003 if (skge->autoneg == AUTONEG_DISABLE) 2004 ctrl &= ~PHY_CT_ANE; 2005 2006 ctrl |= PHY_CT_RESET; 2007 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 2008 2009 ctrl = 0; 2010 ct1000 = 0; 2011 adv = PHY_AN_CSMA; 2012 2013 if (skge->autoneg == AUTONEG_ENABLE) { 2014 if (hw->copper) { 2015 if (skge->advertising & ADVERTISED_1000baseT_Full) 2016 ct1000 |= PHY_M_1000C_AFD; 2017 if (skge->advertising & ADVERTISED_1000baseT_Half) 2018 ct1000 |= PHY_M_1000C_AHD; 2019 if (skge->advertising & ADVERTISED_100baseT_Full) 2020 adv |= PHY_M_AN_100_FD; 2021 if (skge->advertising & ADVERTISED_100baseT_Half) 2022 adv |= PHY_M_AN_100_HD; 2023 if (skge->advertising & ADVERTISED_10baseT_Full) 2024 adv |= PHY_M_AN_10_FD; 2025 if (skge->advertising & ADVERTISED_10baseT_Half) 2026 adv |= PHY_M_AN_10_HD; 2027 2028 /* Set Flow-control capabilities */ 2029 adv |= phy_pause_map[skge->flow_control]; 2030 } else { 2031 if (skge->advertising & ADVERTISED_1000baseT_Full) 2032 adv |= PHY_M_AN_1000X_AFD; 2033 if (skge->advertising & ADVERTISED_1000baseT_Half) 2034 adv |= PHY_M_AN_1000X_AHD; 2035 2036 adv |= fiber_pause_map[skge->flow_control]; 2037 } 2038 2039 /* Restart Auto-negotiation */ 2040 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; 2041 } else { 2042 /* forced speed/duplex settings */ 2043 ct1000 = PHY_M_1000C_MSE; 2044 2045 if (skge->duplex == DUPLEX_FULL) 2046 ctrl |= PHY_CT_DUP_MD; 2047 2048 switch (skge->speed) { 2049 case SPEED_1000: 2050 ctrl |= PHY_CT_SP1000; 2051 break; 2052 case SPEED_100: 2053 ctrl |= PHY_CT_SP100; 2054 break; 2055 } 2056 2057 ctrl |= PHY_CT_RESET; 2058 } 2059 2060 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); 2061 2062 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); 2063 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 2064 2065 /* Enable phy interrupt on autonegotiation complete (or link up) */ 2066 if (skge->autoneg == AUTONEG_ENABLE) 2067 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK); 2068 else 2069 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); 2070} 2071 2072static void yukon_reset(struct skge_hw *hw, int port) 2073{ 2074 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ 2075 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ 2076 gma_write16(hw, port, GM_MC_ADDR_H2, 0); 2077 gma_write16(hw, port, GM_MC_ADDR_H3, 0); 2078 gma_write16(hw, port, GM_MC_ADDR_H4, 0); 2079 2080 gma_write16(hw, port, GM_RX_CTRL, 2081 gma_read16(hw, port, GM_RX_CTRL) 2082 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 2083} 2084 2085/* Apparently, early versions of Yukon-Lite had wrong chip_id? */ 2086static int is_yukon_lite_a0(struct skge_hw *hw) 2087{ 2088 u32 reg; 2089 int ret; 2090 2091 if (hw->chip_id != CHIP_ID_YUKON) 2092 return 0; 2093 2094 reg = skge_read32(hw, B2_FAR); 2095 skge_write8(hw, B2_FAR + 3, 0xff); 2096 ret = (skge_read8(hw, B2_FAR + 3) != 0); 2097 skge_write32(hw, B2_FAR, reg); 2098 return ret; 2099} 2100 2101static void yukon_mac_init(struct skge_hw *hw, int port) 2102{ 2103 struct skge_port *skge = netdev_priv(hw->dev[port]); 2104 int i; 2105 u32 reg; 2106 const u8 *addr = hw->dev[port]->dev_addr; 2107 2108 /* WA code for COMA mode -- set PHY reset */ 2109 if (hw->chip_id == CHIP_ID_YUKON_LITE && 2110 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { 2111 reg = skge_read32(hw, B2_GP_IO); 2112 reg |= GP_DIR_9 | GP_IO_9; 2113 skge_write32(hw, B2_GP_IO, reg); 2114 } 2115 2116 /* hard reset */ 2117 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 2118 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); 2119 2120 /* WA code for COMA mode -- clear PHY reset */ 2121 if (hw->chip_id == CHIP_ID_YUKON_LITE && 2122 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { 2123 reg = skge_read32(hw, B2_GP_IO); 2124 reg |= GP_DIR_9; 2125 reg &= ~GP_IO_9; 2126 skge_write32(hw, B2_GP_IO, reg); 2127 } 2128 2129 /* Set hardware config mode */ 2130 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | 2131 GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE; 2132 reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; 2133 2134 /* Clear GMC reset */ 2135 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); 2136 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); 2137 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); 2138 2139 if (skge->autoneg == AUTONEG_DISABLE) { 2140 reg = GM_GPCR_AU_ALL_DIS; 2141 gma_write16(hw, port, GM_GP_CTRL, 2142 gma_read16(hw, port, GM_GP_CTRL) | reg); 2143 2144 switch (skge->speed) { 2145 case SPEED_1000: 2146 reg &= ~GM_GPCR_SPEED_100; 2147 reg |= GM_GPCR_SPEED_1000; 2148 break; 2149 case SPEED_100: 2150 reg &= ~GM_GPCR_SPEED_1000; 2151 reg |= GM_GPCR_SPEED_100; 2152 break; 2153 case SPEED_10: 2154 reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); 2155 break; 2156 } 2157 2158 if (skge->duplex == DUPLEX_FULL) 2159 reg |= GM_GPCR_DUP_FULL; 2160 } else 2161 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; 2162 2163 switch (skge->flow_control) { 2164 case FLOW_MODE_NONE: 2165 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 2166 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 2167 break; 2168 case FLOW_MODE_LOC_SEND: 2169 /* disable Rx flow-control */ 2170 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 2171 break; 2172 case FLOW_MODE_SYMMETRIC: 2173 case FLOW_MODE_SYM_OR_REM: 2174 /* enable Tx & Rx flow-control */ 2175 break; 2176 } 2177 2178 gma_write16(hw, port, GM_GP_CTRL, reg); 2179 skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); 2180 2181 yukon_init(hw, port); 2182 2183 /* MIB clear */ 2184 reg = gma_read16(hw, port, GM_PHY_ADDR); 2185 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); 2186 2187 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 2188 gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); 2189 gma_write16(hw, port, GM_PHY_ADDR, reg); 2190 2191 /* transmit control */ 2192 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 2193 2194 /* receive control reg: unicast + multicast + no FCS */ 2195 gma_write16(hw, port, GM_RX_CTRL, 2196 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); 2197 2198 /* transmit flow control */ 2199 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); 2200 2201 /* transmit parameter */ 2202 gma_write16(hw, port, GM_TX_PARAM, 2203 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | 2204 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 2205 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); 2206 2207 /* configure the Serial Mode Register */ 2208 reg = DATA_BLIND_VAL(DATA_BLIND_DEF) 2209 | GM_SMOD_VLAN_ENA 2210 | IPG_DATA_VAL(IPG_DATA_DEF); 2211 2212 if (hw->dev[port]->mtu > ETH_DATA_LEN) 2213 reg |= GM_SMOD_JUMBO_ENA; 2214 2215 gma_write16(hw, port, GM_SERIAL_MODE, reg); 2216 2217 /* physical address: used for pause frames */ 2218 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); 2219 /* virtual address for data */ 2220 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); 2221 2222 /* enable interrupt mask for counter overflows */ 2223 gma_write16(hw, port, GM_TX_IRQ_MSK, 0); 2224 gma_write16(hw, port, GM_RX_IRQ_MSK, 0); 2225 gma_write16(hw, port, GM_TR_IRQ_MSK, 0); 2226 2227 /* Initialize Mac Fifo */ 2228 2229 /* Configure Rx MAC FIFO */ 2230 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); 2231 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 2232 2233 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ 2234 if (is_yukon_lite_a0(hw)) 2235 reg &= ~GMF_RX_F_FL_ON; 2236 2237 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 2238 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); 2239 /* 2240 * because Pause Packet Truncation in GMAC is not working 2241 * we have to increase the Flush Threshold to 64 bytes 2242 * in order to flush pause packets in Rx FIFO on Yukon-1 2243 */ 2244 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1); 2245 2246 /* Configure Tx MAC FIFO */ 2247 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 2248 skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 2249} 2250 2251/* Go into power down mode */ 2252static void yukon_suspend(struct skge_hw *hw, int port) 2253{ 2254 u16 ctrl; 2255 2256 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); 2257 ctrl |= PHY_M_PC_POL_R_DIS; 2258 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); 2259 2260 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); 2261 ctrl |= PHY_CT_RESET; 2262 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 2263 2264 /* switch IEEE compatible power down mode on */ 2265 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); 2266 ctrl |= PHY_CT_PDOWN; 2267 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 2268} 2269 2270static void yukon_stop(struct skge_port *skge) 2271{ 2272 struct skge_hw *hw = skge->hw; 2273 int port = skge->port; 2274 2275 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); 2276 yukon_reset(hw, port); 2277 2278 gma_write16(hw, port, GM_GP_CTRL, 2279 gma_read16(hw, port, GM_GP_CTRL) 2280 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); 2281 gma_read16(hw, port, GM_GP_CTRL); 2282 2283 yukon_suspend(hw, port); 2284 2285 /* set GPHY Control reset */ 2286 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 2287 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); 2288} 2289 2290static void yukon_get_stats(struct skge_port *skge, u64 *data) 2291{ 2292 struct skge_hw *hw = skge->hw; 2293 int port = skge->port; 2294 int i; 2295 2296 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 2297 | gma_read32(hw, port, GM_TXO_OK_LO); 2298 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 2299 | gma_read32(hw, port, GM_RXO_OK_LO); 2300 2301 for (i = 2; i < ARRAY_SIZE(skge_stats); i++) 2302 data[i] = gma_read32(hw, port, 2303 skge_stats[i].gma_offset); 2304} 2305 2306static void yukon_mac_intr(struct skge_hw *hw, int port) 2307{ 2308 struct net_device *dev = hw->dev[port]; 2309 struct skge_port *skge = netdev_priv(dev); 2310 u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); 2311 2312 if (netif_msg_intr(skge)) 2313 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n", 2314 dev->name, status); 2315 2316 if (status & GM_IS_RX_FF_OR) { 2317 ++dev->stats.rx_fifo_errors; 2318 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); 2319 } 2320 2321 if (status & GM_IS_TX_FF_UR) { 2322 ++dev->stats.tx_fifo_errors; 2323 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); 2324 } 2325 2326} 2327 2328static u16 yukon_speed(const struct skge_hw *hw, u16 aux) 2329{ 2330 switch (aux & PHY_M_PS_SPEED_MSK) { 2331 case PHY_M_PS_SPEED_1000: 2332 return SPEED_1000; 2333 case PHY_M_PS_SPEED_100: 2334 return SPEED_100; 2335 default: 2336 return SPEED_10; 2337 } 2338} 2339 2340static void yukon_link_up(struct skge_port *skge) 2341{ 2342 struct skge_hw *hw = skge->hw; 2343 int port = skge->port; 2344 u16 reg; 2345 2346 /* Enable Transmit FIFO Underrun */ 2347 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); 2348 2349 reg = gma_read16(hw, port, GM_GP_CTRL); 2350 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) 2351 reg |= GM_GPCR_DUP_FULL; 2352 2353 /* enable Rx/Tx */ 2354 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 2355 gma_write16(hw, port, GM_GP_CTRL, reg); 2356 2357 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); 2358 skge_link_up(skge); 2359} 2360 2361static void yukon_link_down(struct skge_port *skge) 2362{ 2363 struct skge_hw *hw = skge->hw; 2364 int port = skge->port; 2365 u16 ctrl; 2366 2367 ctrl = gma_read16(hw, port, GM_GP_CTRL); 2368 ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 2369 gma_write16(hw, port, GM_GP_CTRL, ctrl); 2370 2371 if (skge->flow_status == FLOW_STAT_REM_SEND) { 2372 ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); 2373 ctrl |= PHY_M_AN_ASP; 2374 /* restore Asymmetric Pause bit */ 2375 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl); 2376 } 2377 2378 skge_link_down(skge); 2379 2380 yukon_init(hw, port); 2381} 2382 2383static void yukon_phy_intr(struct skge_port *skge) 2384{ 2385 struct skge_hw *hw = skge->hw; 2386 int port = skge->port; 2387 const char *reason = NULL; 2388 u16 istatus, phystat; 2389 2390 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); 2391 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); 2392 2393 if (netif_msg_intr(skge)) 2394 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x 0x%x\n", 2395 skge->netdev->name, istatus, phystat); 2396 2397 if (istatus & PHY_M_IS_AN_COMPL) { 2398 if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP) 2399 & PHY_M_AN_RF) { 2400 reason = "remote fault"; 2401 goto failed; 2402 } 2403 2404 if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) { 2405 reason = "master/slave fault"; 2406 goto failed; 2407 } 2408 2409 if (!(phystat & PHY_M_PS_SPDUP_RES)) { 2410 reason = "speed/duplex"; 2411 goto failed; 2412 } 2413 2414 skge->duplex = (phystat & PHY_M_PS_FULL_DUP) 2415 ? DUPLEX_FULL : DUPLEX_HALF; 2416 skge->speed = yukon_speed(hw, phystat); 2417 2418 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 2419 switch (phystat & PHY_M_PS_PAUSE_MSK) { 2420 case PHY_M_PS_PAUSE_MSK: 2421 skge->flow_status = FLOW_STAT_SYMMETRIC; 2422 break; 2423 case PHY_M_PS_RX_P_EN: 2424 skge->flow_status = FLOW_STAT_REM_SEND; 2425 break; 2426 case PHY_M_PS_TX_P_EN: 2427 skge->flow_status = FLOW_STAT_LOC_SEND; 2428 break; 2429 default: 2430 skge->flow_status = FLOW_STAT_NONE; 2431 } 2432 2433 if (skge->flow_status == FLOW_STAT_NONE || 2434 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) 2435 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 2436 else 2437 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); 2438 yukon_link_up(skge); 2439 return; 2440 } 2441 2442 if (istatus & PHY_M_IS_LSP_CHANGE) 2443 skge->speed = yukon_speed(hw, phystat); 2444 2445 if (istatus & PHY_M_IS_DUP_CHANGE) 2446 skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; 2447 if (istatus & PHY_M_IS_LST_CHANGE) { 2448 if (phystat & PHY_M_PS_LINK_UP) 2449 yukon_link_up(skge); 2450 else 2451 yukon_link_down(skge); 2452 } 2453 return; 2454 failed: 2455 printk(KERN_ERR PFX "%s: autonegotiation failed (%s)\n", 2456 skge->netdev->name, reason); 2457 2458 /* XXX restart autonegotiation? */ 2459} 2460 2461static void skge_phy_reset(struct skge_port *skge) 2462{ 2463 struct skge_hw *hw = skge->hw; 2464 int port = skge->port; 2465 struct net_device *dev = hw->dev[port]; 2466 2467 netif_stop_queue(skge->netdev); 2468 netif_carrier_off(skge->netdev); 2469 2470 spin_lock_bh(&hw->phy_lock); 2471 if (hw->chip_id == CHIP_ID_GENESIS) { 2472 genesis_reset(hw, port); 2473 genesis_mac_init(hw, port); 2474 } else { 2475 yukon_reset(hw, port); 2476 yukon_init(hw, port); 2477 } 2478 spin_unlock_bh(&hw->phy_lock); 2479 2480 dev->set_multicast_list(dev); 2481} 2482 2483/* Basic MII support */ 2484static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2485{ 2486 struct mii_ioctl_data *data = if_mii(ifr); 2487 struct skge_port *skge = netdev_priv(dev); 2488 struct skge_hw *hw = skge->hw; 2489 int err = -EOPNOTSUPP; 2490 2491 if (!netif_running(dev)) 2492 return -ENODEV; /* Phy still in reset */ 2493 2494 switch(cmd) { 2495 case SIOCGMIIPHY: 2496 data->phy_id = hw->phy_addr; 2497 2498 /* fallthru */ 2499 case SIOCGMIIREG: { 2500 u16 val = 0; 2501 spin_lock_bh(&hw->phy_lock); 2502 if (hw->chip_id == CHIP_ID_GENESIS) 2503 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2504 else 2505 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2506 spin_unlock_bh(&hw->phy_lock); 2507 data->val_out = val; 2508 break; 2509 } 2510 2511 case SIOCSMIIREG: 2512 if (!capable(CAP_NET_ADMIN)) 2513 return -EPERM; 2514 2515 spin_lock_bh(&hw->phy_lock); 2516 if (hw->chip_id == CHIP_ID_GENESIS) 2517 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2518 data->val_in); 2519 else 2520 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2521 data->val_in); 2522 spin_unlock_bh(&hw->phy_lock); 2523 break; 2524 } 2525 return err; 2526} 2527 2528static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) 2529{ 2530 u32 end; 2531 2532 start /= 8; 2533 len /= 8; 2534 end = start + len - 1; 2535 2536 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); 2537 skge_write32(hw, RB_ADDR(q, RB_START), start); 2538 skge_write32(hw, RB_ADDR(q, RB_WP), start); 2539 skge_write32(hw, RB_ADDR(q, RB_RP), start); 2540 skge_write32(hw, RB_ADDR(q, RB_END), end); 2541 2542 if (q == Q_R1 || q == Q_R2) { 2543 /* Set thresholds on receive queue's */ 2544 skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), 2545 start + (2*len)/3); 2546 skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), 2547 start + (len/3)); 2548 } else { 2549 /* Enable store & forward on Tx queue's because 2550 * Tx FIFO is only 4K on Genesis and 1K on Yukon 2551 */ 2552 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); 2553 } 2554 2555 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); 2556} 2557 2558/* Setup Bus Memory Interface */ 2559static void skge_qset(struct skge_port *skge, u16 q, 2560 const struct skge_element *e) 2561{ 2562 struct skge_hw *hw = skge->hw; 2563 u32 watermark = 0x600; 2564 u64 base = skge->dma + (e->desc - skge->mem); 2565 2566 /* optimization to reduce window on 32bit/33mhz */ 2567 if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0) 2568 watermark /= 2; 2569 2570 skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET); 2571 skge_write32(hw, Q_ADDR(q, Q_F), watermark); 2572 skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32)); 2573 skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base); 2574} 2575 2576static int skge_up(struct net_device *dev) 2577{ 2578 struct skge_port *skge = netdev_priv(dev); 2579 struct skge_hw *hw = skge->hw; 2580 int port = skge->port; 2581 u32 chunk, ram_addr; 2582 size_t rx_size, tx_size; 2583 int err; 2584 2585 if (!is_valid_ether_addr(dev->dev_addr)) 2586 return -EINVAL; 2587 2588 if (netif_msg_ifup(skge)) 2589 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 2590 2591 if (dev->mtu > RX_BUF_SIZE) 2592 skge->rx_buf_size = dev->mtu + ETH_HLEN; 2593 else 2594 skge->rx_buf_size = RX_BUF_SIZE; 2595 2596 2597 rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); 2598 tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); 2599 skge->mem_size = tx_size + rx_size; 2600 skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma); 2601 if (!skge->mem) 2602 return -ENOMEM; 2603 2604 BUG_ON(skge->dma & 7); 2605 2606 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { 2607 dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); 2608 err = -EINVAL; 2609 goto free_pci_mem; 2610 } 2611 2612 memset(skge->mem, 0, skge->mem_size); 2613 2614 err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); 2615 if (err) 2616 goto free_pci_mem; 2617 2618 err = skge_rx_fill(dev); 2619 if (err) 2620 goto free_rx_ring; 2621 2622 err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, 2623 skge->dma + rx_size); 2624 if (err) 2625 goto free_rx_ring; 2626 2627 /* Initialize MAC */ 2628 spin_lock_bh(&hw->phy_lock); 2629 if (hw->chip_id == CHIP_ID_GENESIS) 2630 genesis_mac_init(hw, port); 2631 else 2632 yukon_mac_init(hw, port); 2633 spin_unlock_bh(&hw->phy_lock); 2634 2635 /* Configure RAMbuffers - equally between ports and tx/rx */ 2636 chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); 2637 ram_addr = hw->ram_offset + 2 * chunk * port; 2638 2639 skge_ramset(hw, rxqaddr[port], ram_addr, chunk); 2640 skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); 2641 2642 BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); 2643 skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk); 2644 skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); 2645 2646 /* Start receiver BMU */ 2647 wmb(); 2648 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); 2649 skge_led(skge, LED_MODE_ON); 2650 2651 spin_lock_irq(&hw->hw_lock); 2652 hw->intr_mask |= portmask[port]; 2653 skge_write32(hw, B0_IMSK, hw->intr_mask); 2654 spin_unlock_irq(&hw->hw_lock); 2655 2656 napi_enable(&skge->napi); 2657 return 0; 2658 2659 free_rx_ring: 2660 skge_rx_clean(skge); 2661 kfree(skge->rx_ring.start); 2662 free_pci_mem: 2663 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); 2664 skge->mem = NULL; 2665 2666 return err; 2667} 2668 2669/* stop receiver */ 2670static void skge_rx_stop(struct skge_hw *hw, int port) 2671{ 2672 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); 2673 skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), 2674 RB_RST_SET|RB_DIS_OP_MD); 2675 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); 2676} 2677 2678static int skge_down(struct net_device *dev) 2679{ 2680 struct skge_port *skge = netdev_priv(dev); 2681 struct skge_hw *hw = skge->hw; 2682 int port = skge->port; 2683 2684 if (skge->mem == NULL) 2685 return 0; 2686 2687 if (netif_msg_ifdown(skge)) 2688 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 2689 2690 netif_stop_queue(dev); 2691 2692 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) 2693 del_timer_sync(&skge->link_timer); 2694 2695 napi_disable(&skge->napi); 2696 netif_carrier_off(dev); 2697 2698 spin_lock_irq(&hw->hw_lock); 2699 hw->intr_mask &= ~portmask[port]; 2700 skge_write32(hw, B0_IMSK, hw->intr_mask); 2701 spin_unlock_irq(&hw->hw_lock); 2702 2703 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); 2704 if (hw->chip_id == CHIP_ID_GENESIS) 2705 genesis_stop(skge); 2706 else 2707 yukon_stop(skge); 2708 2709 /* Stop transmitter */ 2710 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2711 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), 2712 RB_RST_SET|RB_DIS_OP_MD); 2713 2714 2715 /* Disable Force Sync bit and Enable Alloc bit */ 2716 skge_write8(hw, SK_REG(port, TXA_CTRL), 2717 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 2718 2719 /* Stop Interval Timer and Limit Counter of Tx Arbiter */ 2720 skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); 2721 skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); 2722 2723 /* Reset PCI FIFO */ 2724 skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); 2725 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); 2726 2727 /* Reset the RAM Buffer async Tx queue */ 2728 skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); 2729 2730 skge_rx_stop(hw, port); 2731 2732 if (hw->chip_id == CHIP_ID_GENESIS) { 2733 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); 2734 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET); 2735 } else { 2736 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 2737 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 2738 } 2739 2740 skge_led(skge, LED_MODE_OFF); 2741 2742 netif_tx_lock_bh(dev); 2743 skge_tx_clean(dev); 2744 netif_tx_unlock_bh(dev); 2745 2746 skge_rx_clean(skge); 2747 2748 kfree(skge->rx_ring.start); 2749 kfree(skge->tx_ring.start); 2750 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); 2751 skge->mem = NULL; 2752 return 0; 2753} 2754 2755static inline int skge_avail(const struct skge_ring *ring) 2756{ 2757 smp_mb(); 2758 return ((ring->to_clean > ring->to_use) ? 0 : ring->count) 2759 + (ring->to_clean - ring->to_use) - 1; 2760} 2761 2762static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) 2763{ 2764 struct skge_port *skge = netdev_priv(dev); 2765 struct skge_hw *hw = skge->hw; 2766 struct skge_element *e; 2767 struct skge_tx_desc *td; 2768 int i; 2769 u32 control, len; 2770 u64 map; 2771 2772 if (skb_padto(skb, ETH_ZLEN)) 2773 return NETDEV_TX_OK; 2774 2775 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) 2776 return NETDEV_TX_BUSY; 2777 2778 e = skge->tx_ring.to_use; 2779 td = e->desc; 2780 BUG_ON(td->control & BMU_OWN); 2781 e->skb = skb; 2782 len = skb_headlen(skb); 2783 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 2784 pci_unmap_addr_set(e, mapaddr, map); 2785 pci_unmap_len_set(e, maplen, len); 2786 2787 td->dma_lo = map; 2788 td->dma_hi = map >> 32; 2789 2790 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2791 const int offset = skb_transport_offset(skb); 2792 2793 /* This seems backwards, but it is what the sk98lin 2794 * does. Looks like hardware is wrong? 2795 */ 2796 if (ipip_hdr(skb)->protocol == IPPROTO_UDP 2797 && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) 2798 control = BMU_TCP_CHECK; 2799 else 2800 control = BMU_UDP_CHECK; 2801 2802 td->csum_offs = 0; 2803 td->csum_start = offset; 2804 td->csum_write = offset + skb->csum_offset; 2805 } else 2806 control = BMU_CHECK; 2807 2808 if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */ 2809 control |= BMU_EOF| BMU_IRQ_EOF; 2810 else { 2811 struct skge_tx_desc *tf = td; 2812 2813 control |= BMU_STFWD; 2814 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2815 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2816 2817 map = pci_map_page(hw->pdev, frag->page, frag->page_offset, 2818 frag->size, PCI_DMA_TODEVICE); 2819 2820 e = e->next; 2821 e->skb = skb; 2822 tf = e->desc; 2823 BUG_ON(tf->control & BMU_OWN); 2824 2825 tf->dma_lo = map; 2826 tf->dma_hi = (u64) map >> 32; 2827 pci_unmap_addr_set(e, mapaddr, map); 2828 pci_unmap_len_set(e, maplen, frag->size); 2829 2830 tf->control = BMU_OWN | BMU_SW | control | frag->size; 2831 } 2832 tf->control |= BMU_EOF | BMU_IRQ_EOF; 2833 } 2834 /* Make sure all the descriptors written */ 2835 wmb(); 2836 td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; 2837 wmb(); 2838 2839 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); 2840 2841 if (unlikely(netif_msg_tx_queued(skge))) 2842 printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n", 2843 dev->name, e - skge->tx_ring.start, skb->len); 2844 2845 skge->tx_ring.to_use = e->next; 2846 smp_wmb(); 2847 2848 if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { 2849 pr_debug("%s: transmit queue full\n", dev->name); 2850 netif_stop_queue(dev); 2851 } 2852 2853 dev->trans_start = jiffies; 2854 2855 return NETDEV_TX_OK; 2856} 2857 2858 2859/* Free resources associated with this reing element */ 2860static void skge_tx_free(struct skge_port *skge, struct skge_element *e, 2861 u32 control) 2862{ 2863 struct pci_dev *pdev = skge->hw->pdev; 2864 2865 /* skb header vs. fragment */ 2866 if (control & BMU_STF) 2867 pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr), 2868 pci_unmap_len(e, maplen), 2869 PCI_DMA_TODEVICE); 2870 else 2871 pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr), 2872 pci_unmap_len(e, maplen), 2873 PCI_DMA_TODEVICE); 2874 2875 if (control & BMU_EOF) { 2876 if (unlikely(netif_msg_tx_done(skge))) 2877 printk(KERN_DEBUG PFX "%s: tx done slot %td\n", 2878 skge->netdev->name, e - skge->tx_ring.start); 2879 2880 dev_kfree_skb(e->skb); 2881 } 2882} 2883 2884/* Free all buffers in transmit ring */ 2885static void skge_tx_clean(struct net_device *dev) 2886{ 2887 struct skge_port *skge = netdev_priv(dev); 2888 struct skge_element *e; 2889 2890 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { 2891 struct skge_tx_desc *td = e->desc; 2892 skge_tx_free(skge, e, td->control); 2893 td->control = 0; 2894 } 2895 2896 skge->tx_ring.to_clean = e; 2897 netif_wake_queue(dev); 2898} 2899 2900static void skge_tx_timeout(struct net_device *dev) 2901{ 2902 struct skge_port *skge = netdev_priv(dev); 2903 2904 if (netif_msg_timer(skge)) 2905 printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name); 2906 2907 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); 2908 skge_tx_clean(dev); 2909} 2910 2911static int skge_change_mtu(struct net_device *dev, int new_mtu) 2912{ 2913 int err; 2914 2915 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 2916 return -EINVAL; 2917 2918 if (!netif_running(dev)) { 2919 dev->mtu = new_mtu; 2920 return 0; 2921 } 2922 2923 skge_down(dev); 2924 2925 dev->mtu = new_mtu; 2926 2927 err = skge_up(dev); 2928 if (err) 2929 dev_close(dev); 2930 2931 return err; 2932} 2933 2934static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; 2935 2936static void genesis_add_filter(u8 filter[8], const u8 *addr) 2937{ 2938 u32 crc, bit; 2939 2940 crc = ether_crc_le(ETH_ALEN, addr); 2941 bit = ~crc & 0x3f; 2942 filter[bit/8] |= 1 << (bit%8); 2943} 2944 2945static void genesis_set_multicast(struct net_device *dev) 2946{ 2947 struct skge_port *skge = netdev_priv(dev); 2948 struct skge_hw *hw = skge->hw; 2949 int port = skge->port; 2950 int i, count = dev->mc_count; 2951 struct dev_mc_list *list = dev->mc_list; 2952 u32 mode; 2953 u8 filter[8]; 2954 2955 mode = xm_read32(hw, port, XM_MODE); 2956 mode |= XM_MD_ENA_HASH; 2957 if (dev->flags & IFF_PROMISC) 2958 mode |= XM_MD_ENA_PROM; 2959 else 2960 mode &= ~XM_MD_ENA_PROM; 2961 2962 if (dev->flags & IFF_ALLMULTI) 2963 memset(filter, 0xff, sizeof(filter)); 2964 else { 2965 memset(filter, 0, sizeof(filter)); 2966 2967 if (skge->flow_status == FLOW_STAT_REM_SEND 2968 || skge->flow_status == FLOW_STAT_SYMMETRIC) 2969 genesis_add_filter(filter, pause_mc_addr); 2970 2971 for (i = 0; list && i < count; i++, list = list->next) 2972 genesis_add_filter(filter, list->dmi_addr); 2973 } 2974 2975 xm_write32(hw, port, XM_MODE, mode); 2976 xm_outhash(hw, port, XM_HSM, filter); 2977} 2978 2979static void yukon_add_filter(u8 filter[8], const u8 *addr) 2980{ 2981 u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f; 2982 filter[bit/8] |= 1 << (bit%8); 2983} 2984 2985static void yukon_set_multicast(struct net_device *dev) 2986{ 2987 struct skge_port *skge = netdev_priv(dev); 2988 struct skge_hw *hw = skge->hw; 2989 int port = skge->port; 2990 struct dev_mc_list *list = dev->mc_list; 2991 int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND 2992 || skge->flow_status == FLOW_STAT_SYMMETRIC); 2993 u16 reg; 2994 u8 filter[8]; 2995 2996 memset(filter, 0, sizeof(filter)); 2997 2998 reg = gma_read16(hw, port, GM_RX_CTRL); 2999 reg |= GM_RXCR_UCF_ENA; 3000 3001 if (dev->flags & IFF_PROMISC) /* promiscuous */ 3002 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 3003 else if (dev->flags & IFF_ALLMULTI) /* all multicast */ 3004 memset(filter, 0xff, sizeof(filter)); 3005 else if (dev->mc_count == 0 && !rx_pause)/* no multicast */ 3006 reg &= ~GM_RXCR_MCF_ENA; 3007 else { 3008 int i; 3009 reg |= GM_RXCR_MCF_ENA; 3010 3011 if (rx_pause) 3012 yukon_add_filter(filter, pause_mc_addr); 3013 3014 for (i = 0; list && i < dev->mc_count; i++, list = list->next) 3015 yukon_add_filter(filter, list->dmi_addr); 3016 } 3017 3018 3019 gma_write16(hw, port, GM_MC_ADDR_H1, 3020 (u16)filter[0] | ((u16)filter[1] << 8)); 3021 gma_write16(hw, port, GM_MC_ADDR_H2, 3022 (u16)filter[2] | ((u16)filter[3] << 8)); 3023 gma_write16(hw, port, GM_MC_ADDR_H3, 3024 (u16)filter[4] | ((u16)filter[5] << 8)); 3025 gma_write16(hw, port, GM_MC_ADDR_H4, 3026 (u16)filter[6] | ((u16)filter[7] << 8)); 3027 3028 gma_write16(hw, port, GM_RX_CTRL, reg); 3029} 3030 3031static inline u16 phy_length(const struct skge_hw *hw, u32 status) 3032{ 3033 if (hw->chip_id == CHIP_ID_GENESIS) 3034 return status >> XMR_FS_LEN_SHIFT; 3035 else 3036 return status >> GMR_FS_LEN_SHIFT; 3037} 3038 3039static inline int bad_phy_status(const struct skge_hw *hw, u32 status) 3040{ 3041 if (hw->chip_id == CHIP_ID_GENESIS) 3042 return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0; 3043 else 3044 return (status & GMR_FS_ANY_ERR) || 3045 (status & GMR_FS_RX_OK) == 0; 3046} 3047 3048 3049/* Get receive buffer from descriptor. 3050 * Handles copy of small buffers and reallocation failures 3051 */ 3052static struct sk_buff *skge_rx_get(struct net_device *dev, 3053 struct skge_element *e, 3054 u32 control, u32 status, u16 csum) 3055{ 3056 struct skge_port *skge = netdev_priv(dev); 3057 struct sk_buff *skb; 3058 u16 len = control & BMU_BBC; 3059 3060 if (unlikely(netif_msg_rx_status(skge))) 3061 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", 3062 dev->name, e - skge->rx_ring.start, 3063 status, len); 3064 3065 if (len > skge->rx_buf_size) 3066 goto error; 3067 3068 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) 3069 goto error; 3070 3071 if (bad_phy_status(skge->hw, status)) 3072 goto error; 3073 3074 if (phy_length(skge->hw, status) != len) 3075 goto error; 3076 3077 if (len < RX_COPY_THRESHOLD) { 3078 skb = netdev_alloc_skb(dev, len + 2); 3079 if (!skb) 3080 goto resubmit; 3081 3082 skb_reserve(skb, 2); 3083 pci_dma_sync_single_for_cpu(skge->hw->pdev, 3084 pci_unmap_addr(e, mapaddr), 3085 len, PCI_DMA_FROMDEVICE); 3086 skb_copy_from_linear_data(e->skb, skb->data, len); 3087 pci_dma_sync_single_for_device(skge->hw->pdev, 3088 pci_unmap_addr(e, mapaddr), 3089 len, PCI_DMA_FROMDEVICE); 3090 skge_rx_reuse(e, skge->rx_buf_size); 3091 } else { 3092 struct sk_buff *nskb; 3093 nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN); 3094 if (!nskb) 3095 goto resubmit; 3096 3097 skb_reserve(nskb, NET_IP_ALIGN); 3098 pci_unmap_single(skge->hw->pdev, 3099 pci_unmap_addr(e, mapaddr), 3100 pci_unmap_len(e, maplen), 3101 PCI_DMA_FROMDEVICE); 3102 skb = e->skb; 3103 prefetch(skb->data); 3104 skge_rx_setup(skge, e, nskb, skge->rx_buf_size); 3105 } 3106 3107 skb_put(skb, len); 3108 if (skge->rx_csum) { 3109 skb->csum = csum; 3110 skb->ip_summed = CHECKSUM_COMPLETE; 3111 } 3112 3113 skb->protocol = eth_type_trans(skb, dev); 3114 3115 return skb; 3116error: 3117 3118 if (netif_msg_rx_err(skge)) 3119 printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n", 3120 dev->name, e - skge->rx_ring.start, 3121 control, status); 3122 3123 if (skge->hw->chip_id == CHIP_ID_GENESIS) { 3124 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) 3125 dev->stats.rx_length_errors++; 3126 if (status & XMR_FS_FRA_ERR) 3127 dev->stats.rx_frame_errors++; 3128 if (status & XMR_FS_FCS_ERR) 3129 dev->stats.rx_crc_errors++; 3130 } else { 3131 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) 3132 dev->stats.rx_length_errors++; 3133 if (status & GMR_FS_FRAGMENT) 3134 dev->stats.rx_frame_errors++; 3135 if (status & GMR_FS_CRC_ERR) 3136 dev->stats.rx_crc_errors++; 3137 } 3138 3139resubmit: 3140 skge_rx_reuse(e, skge->rx_buf_size); 3141 return NULL; 3142} 3143 3144/* Free all buffers in Tx ring which are no longer owned by device */ 3145static void skge_tx_done(struct net_device *dev) 3146{ 3147 struct skge_port *skge = netdev_priv(dev); 3148 struct skge_ring *ring = &skge->tx_ring; 3149 struct skge_element *e; 3150 3151 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); 3152 3153 for (e = ring->to_clean; e != ring->to_use; e = e->next) { 3154 u32 control = ((const struct skge_tx_desc *) e->desc)->control; 3155 3156 if (control & BMU_OWN) 3157 break; 3158 3159 skge_tx_free(skge, e, control); 3160 } 3161 skge->tx_ring.to_clean = e; 3162 3163 /* Can run lockless until we need to synchronize to restart queue. */ 3164 smp_mb(); 3165 3166 if (unlikely(netif_queue_stopped(dev) && 3167 skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { 3168 netif_tx_lock(dev); 3169 if (unlikely(netif_queue_stopped(dev) && 3170 skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { 3171 netif_wake_queue(dev); 3172 3173 } 3174 netif_tx_unlock(dev); 3175 } 3176} 3177 3178static int skge_poll(struct napi_struct *napi, int to_do) 3179{ 3180 struct skge_port *skge = container_of(napi, struct skge_port, napi); 3181 struct net_device *dev = skge->netdev; 3182 struct skge_hw *hw = skge->hw; 3183 struct skge_ring *ring = &skge->rx_ring; 3184 struct skge_element *e; 3185 int work_done = 0; 3186 3187 skge_tx_done(dev); 3188 3189 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); 3190 3191 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { 3192 struct skge_rx_desc *rd = e->desc; 3193 struct sk_buff *skb; 3194 u32 control; 3195 3196 rmb(); 3197 control = rd->control; 3198 if (control & BMU_OWN) 3199 break; 3200 3201 skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); 3202 if (likely(skb)) { 3203 dev->last_rx = jiffies; 3204 netif_receive_skb(skb); 3205 3206 ++work_done; 3207 } 3208 } 3209 ring->to_clean = e; 3210 3211 /* restart receiver */ 3212 wmb(); 3213 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); 3214 3215 if (work_done < to_do) { 3216 unsigned long flags; 3217 3218 spin_lock_irqsave(&hw->hw_lock, flags); 3219 __netif_rx_complete(dev, napi); 3220 hw->intr_mask |= napimask[skge->port]; 3221 skge_write32(hw, B0_IMSK, hw->intr_mask); 3222 skge_read32(hw, B0_IMSK); 3223 spin_unlock_irqrestore(&hw->hw_lock, flags); 3224 } 3225 3226 return work_done; 3227} 3228 3229/* Parity errors seem to happen when Genesis is connected to a switch 3230 * with no other ports present. Heartbeat error?? 3231 */ 3232static void skge_mac_parity(struct skge_hw *hw, int port) 3233{ 3234 struct net_device *dev = hw->dev[port]; 3235 3236 ++dev->stats.tx_heartbeat_errors; 3237 3238 if (hw->chip_id == CHIP_ID_GENESIS) 3239 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), 3240 MFF_CLR_PERR); 3241 else 3242 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ 3243 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), 3244 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) 3245 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); 3246} 3247 3248static void skge_mac_intr(struct skge_hw *hw, int port) 3249{ 3250 if (hw->chip_id == CHIP_ID_GENESIS) 3251 genesis_mac_intr(hw, port); 3252 else 3253 yukon_mac_intr(hw, port); 3254} 3255 3256/* Handle device specific framing and timeout interrupts */ 3257static void skge_error_irq(struct skge_hw *hw) 3258{ 3259 struct pci_dev *pdev = hw->pdev; 3260 u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); 3261 3262 if (hw->chip_id == CHIP_ID_GENESIS) { 3263 /* clear xmac errors */ 3264 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) 3265 skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT); 3266 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) 3267 skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT); 3268 } else { 3269 /* Timestamp (unused) overflow */ 3270 if (hwstatus & IS_IRQ_TIST_OV) 3271 skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3272 } 3273 3274 if (hwstatus & IS_RAM_RD_PAR) { 3275 dev_err(&pdev->dev, "Ram read data parity error\n"); 3276 skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); 3277 } 3278 3279 if (hwstatus & IS_RAM_WR_PAR) { 3280 dev_err(&pdev->dev, "Ram write data parity error\n"); 3281 skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); 3282 } 3283 3284 if (hwstatus & IS_M1_PAR_ERR) 3285 skge_mac_parity(hw, 0); 3286 3287 if (hwstatus & IS_M2_PAR_ERR) 3288 skge_mac_parity(hw, 1); 3289 3290 if (hwstatus & IS_R1_PAR_ERR) { 3291 dev_err(&pdev->dev, "%s: receive queue parity error\n", 3292 hw->dev[0]->name); 3293 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); 3294 } 3295 3296 if (hwstatus & IS_R2_PAR_ERR) { 3297 dev_err(&pdev->dev, "%s: receive queue parity error\n", 3298 hw->dev[1]->name); 3299 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); 3300 } 3301 3302 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { 3303 u16 pci_status, pci_cmd; 3304 3305 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 3306 pci_read_config_word(pdev, PCI_STATUS, &pci_status); 3307 3308 dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n", 3309 pci_cmd, pci_status); 3310 3311 /* Write the error bits back to clear them. */ 3312 pci_status &= PCI_STATUS_ERROR_BITS; 3313 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3314 pci_write_config_word(pdev, PCI_COMMAND, 3315 pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); 3316 pci_write_config_word(pdev, PCI_STATUS, pci_status); 3317 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3318 3319 /* if error still set then just ignore it */ 3320 hwstatus = skge_read32(hw, B0_HWE_ISRC); 3321 if (hwstatus & IS_IRQ_STAT) { 3322 dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n"); 3323 hw->intr_mask &= ~IS_HW_ERR; 3324 } 3325 } 3326} 3327 3328/* 3329 * Interrupt from PHY are handled in tasklet (softirq) 3330 * because accessing phy registers requires spin wait which might 3331 * cause excess interrupt latency. 3332 */ 3333static void skge_extirq(unsigned long arg) 3334{ 3335 struct skge_hw *hw = (struct skge_hw *) arg; 3336 int port; 3337 3338 for (port = 0; port < hw->ports; port++) { 3339 struct net_device *dev = hw->dev[port]; 3340 3341 if (netif_running(dev)) { 3342 struct skge_port *skge = netdev_priv(dev); 3343 3344 spin_lock(&hw->phy_lock); 3345 if (hw->chip_id != CHIP_ID_GENESIS) 3346 yukon_phy_intr(skge); 3347 else if (hw->phy_type == SK_PHY_BCOM) 3348 bcom_phy_intr(skge); 3349 spin_unlock(&hw->phy_lock); 3350 } 3351 } 3352 3353 spin_lock_irq(&hw->hw_lock); 3354 hw->intr_mask |= IS_EXT_REG; 3355 skge_write32(hw, B0_IMSK, hw->intr_mask); 3356 skge_read32(hw, B0_IMSK); 3357 spin_unlock_irq(&hw->hw_lock); 3358} 3359 3360static irqreturn_t skge_intr(int irq, void *dev_id) 3361{ 3362 struct skge_hw *hw = dev_id; 3363 u32 status; 3364 int handled = 0; 3365 3366 spin_lock(&hw->hw_lock); 3367 /* Reading this register masks IRQ */ 3368 status = skge_read32(hw, B0_SP_ISRC); 3369 if (status == 0 || status == ~0) 3370 goto out; 3371 3372 handled = 1; 3373 status &= hw->intr_mask; 3374 if (status & IS_EXT_REG) { 3375 hw->intr_mask &= ~IS_EXT_REG; 3376 tasklet_schedule(&hw->phy_task); 3377 } 3378 3379 if (status & (IS_XA1_F|IS_R1_F)) { 3380 struct skge_port *skge = netdev_priv(hw->dev[0]); 3381 hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); 3382 netif_rx_schedule(hw->dev[0], &skge->napi); 3383 } 3384 3385 if (status & IS_PA_TO_TX1) 3386 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); 3387 3388 if (status & IS_PA_TO_RX1) { 3389 ++hw->dev[0]->stats.rx_over_errors; 3390 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); 3391 } 3392 3393 3394 if (status & IS_MAC1) 3395 skge_mac_intr(hw, 0); 3396 3397 if (hw->dev[1]) { 3398 struct skge_port *skge = netdev_priv(hw->dev[1]); 3399 3400 if (status & (IS_XA2_F|IS_R2_F)) { 3401 hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); 3402 netif_rx_schedule(hw->dev[1], &skge->napi); 3403 } 3404 3405 if (status & IS_PA_TO_RX2) { 3406 ++hw->dev[1]->stats.rx_over_errors; 3407 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); 3408 } 3409 3410 if (status & IS_PA_TO_TX2) 3411 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2); 3412 3413 if (status & IS_MAC2) 3414 skge_mac_intr(hw, 1); 3415 } 3416 3417 if (status & IS_HW_ERR) 3418 skge_error_irq(hw); 3419 3420 skge_write32(hw, B0_IMSK, hw->intr_mask); 3421 skge_read32(hw, B0_IMSK); 3422out: 3423 spin_unlock(&hw->hw_lock); 3424 3425 return IRQ_RETVAL(handled); 3426} 3427 3428#ifdef CONFIG_NET_POLL_CONTROLLER 3429static void skge_netpoll(struct net_device *dev) 3430{ 3431 struct skge_port *skge = netdev_priv(dev); 3432 3433 disable_irq(dev->irq); 3434 skge_intr(dev->irq, skge->hw); 3435 enable_irq(dev->irq); 3436} 3437#endif 3438 3439static int skge_set_mac_address(struct net_device *dev, void *p) 3440{ 3441 struct skge_port *skge = netdev_priv(dev); 3442 struct skge_hw *hw = skge->hw; 3443 unsigned port = skge->port; 3444 const struct sockaddr *addr = p; 3445 u16 ctrl; 3446 3447 if (!is_valid_ether_addr(addr->sa_data)) 3448 return -EADDRNOTAVAIL; 3449 3450 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 3451 3452 if (!netif_running(dev)) { 3453 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); 3454 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); 3455 } else { 3456 /* disable Rx */ 3457 spin_lock_bh(&hw->phy_lock); 3458 ctrl = gma_read16(hw, port, GM_GP_CTRL); 3459 gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); 3460 3461 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); 3462 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); 3463 3464 if (hw->chip_id == CHIP_ID_GENESIS) 3465 xm_outaddr(hw, port, XM_SA, dev->dev_addr); 3466 else { 3467 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); 3468 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); 3469 } 3470 3471 gma_write16(hw, port, GM_GP_CTRL, ctrl); 3472 spin_unlock_bh(&hw->phy_lock); 3473 } 3474 3475 return 0; 3476} 3477 3478static const struct { 3479 u8 id; 3480 const char *name; 3481} skge_chips[] = { 3482 { CHIP_ID_GENESIS, "Genesis" }, 3483 { CHIP_ID_YUKON, "Yukon" }, 3484 { CHIP_ID_YUKON_LITE, "Yukon-Lite"}, 3485 { CHIP_ID_YUKON_LP, "Yukon-LP"}, 3486}; 3487 3488static const char *skge_board_name(const struct skge_hw *hw) 3489{ 3490 int i; 3491 static char buf[16]; 3492 3493 for (i = 0; i < ARRAY_SIZE(skge_chips); i++) 3494 if (skge_chips[i].id == hw->chip_id) 3495 return skge_chips[i].name; 3496 3497 snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id); 3498 return buf; 3499} 3500 3501 3502/* 3503 * Setup the board data structure, but don't bring up 3504 * the port(s) 3505 */ 3506static int skge_reset(struct skge_hw *hw) 3507{ 3508 u32 reg; 3509 u16 ctst, pci_status; 3510 u8 t8, mac_cfg, pmd_type; 3511 int i; 3512 3513 ctst = skge_read16(hw, B0_CTST); 3514 3515 /* do a SW reset */ 3516 skge_write8(hw, B0_CTST, CS_RST_SET); 3517 skge_write8(hw, B0_CTST, CS_RST_CLR); 3518 3519 /* clear PCI errors, if any */ 3520 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3521 skge_write8(hw, B2_TST_CTRL2, 0); 3522 3523 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); 3524 pci_write_config_word(hw->pdev, PCI_STATUS, 3525 pci_status | PCI_STATUS_ERROR_BITS); 3526 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3527 skge_write8(hw, B0_CTST, CS_MRST_CLR); 3528 3529 /* restore CLK_RUN bits (for Yukon-Lite) */ 3530 skge_write16(hw, B0_CTST, 3531 ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA)); 3532 3533 hw->chip_id = skge_read8(hw, B2_CHIP_ID); 3534 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; 3535 pmd_type = skge_read8(hw, B2_PMD_TYP); 3536 hw->copper = (pmd_type == 'T' || pmd_type == '1'); 3537 3538 switch (hw->chip_id) { 3539 case CHIP_ID_GENESIS: 3540 switch (hw->phy_type) { 3541 case SK_PHY_XMAC: 3542 hw->phy_addr = PHY_ADDR_XMAC; 3543 break; 3544 case SK_PHY_BCOM: 3545 hw->phy_addr = PHY_ADDR_BCOM; 3546 break; 3547 default: 3548 dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n", 3549 hw->phy_type); 3550 return -EOPNOTSUPP; 3551 } 3552 break; 3553 3554 case CHIP_ID_YUKON: 3555 case CHIP_ID_YUKON_LITE: 3556 case CHIP_ID_YUKON_LP: 3557 if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S') 3558 hw->copper = 1; 3559 3560 hw->phy_addr = PHY_ADDR_MARV; 3561 break; 3562 3563 default: 3564 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", 3565 hw->chip_id); 3566 return -EOPNOTSUPP; 3567 } 3568 3569 mac_cfg = skge_read8(hw, B2_MAC_CFG); 3570 hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2; 3571 hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4; 3572 3573 /* read the adapters RAM size */ 3574 t8 = skge_read8(hw, B2_E_0); 3575 if (hw->chip_id == CHIP_ID_GENESIS) { 3576 if (t8 == 3) { 3577 /* special case: 4 x 64k x 36, offset = 0x80000 */ 3578 hw->ram_size = 0x100000; 3579 hw->ram_offset = 0x80000; 3580 } else 3581 hw->ram_size = t8 * 512; 3582 } 3583 else if (t8 == 0) 3584 hw->ram_size = 0x20000; 3585 else 3586 hw->ram_size = t8 * 4096; 3587 3588 hw->intr_mask = IS_HW_ERR; 3589 3590 /* Use PHY IRQ for all but fiber based Genesis board */ 3591 if (!(hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)) 3592 hw->intr_mask |= IS_EXT_REG; 3593 3594 if (hw->chip_id == CHIP_ID_GENESIS) 3595 genesis_init(hw); 3596 else { 3597 /* switch power to VCC (WA for VAUX problem) */ 3598 skge_write8(hw, B0_POWER_CTRL, 3599 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 3600 3601 /* avoid boards with stuck Hardware error bits */ 3602 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && 3603 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { 3604 dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n"); 3605 hw->intr_mask &= ~IS_HW_ERR; 3606 } 3607 3608 /* Clear PHY COMA */ 3609 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3610 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg); 3611 reg &= ~PCI_PHY_COMA; 3612 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg); 3613 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3614 3615 3616 for (i = 0; i < hw->ports; i++) { 3617 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 3618 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); 3619 } 3620 } 3621 3622 /* turn off hardware timer (unused) */ 3623 skge_write8(hw, B2_TI_CTRL, TIM_STOP); 3624 skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); 3625 skge_write8(hw, B0_LED, LED_STAT_ON); 3626 3627 /* enable the Tx Arbiters */ 3628 for (i = 0; i < hw->ports; i++) 3629 skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); 3630 3631 /* Initialize ram interface */ 3632 skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); 3633 3634 skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53); 3635 skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53); 3636 skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53); 3637 skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53); 3638 skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53); 3639 skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53); 3640 skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53); 3641 skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53); 3642 skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53); 3643 skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53); 3644 skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53); 3645 skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53); 3646 3647 skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK); 3648 3649 /* Set interrupt moderation for Transmit only 3650 * Receive interrupts avoided by NAPI 3651 */ 3652 skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F); 3653 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); 3654 skge_write32(hw, B2_IRQM_CTRL, TIM_START); 3655 3656 skge_write32(hw, B0_IMSK, hw->intr_mask); 3657 3658 for (i = 0; i < hw->ports; i++) { 3659 if (hw->chip_id == CHIP_ID_GENESIS) 3660 genesis_reset(hw, i); 3661 else 3662 yukon_reset(hw, i); 3663 } 3664 3665 return 0; 3666} 3667 3668 3669#ifdef CONFIG_SKGE_DEBUG 3670 3671static struct dentry *skge_debug; 3672 3673static int skge_debug_show(struct seq_file *seq, void *v) 3674{ 3675 struct net_device *dev = seq->private; 3676 const struct skge_port *skge = netdev_priv(dev); 3677 const struct skge_hw *hw = skge->hw; 3678 const struct skge_element *e; 3679 3680 if (!netif_running(dev)) 3681 return -ENETDOWN; 3682 3683 seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC), 3684 skge_read32(hw, B0_IMSK)); 3685 3686 seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring)); 3687 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { 3688 const struct skge_tx_desc *t = e->desc; 3689 seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n", 3690 t->control, t->dma_hi, t->dma_lo, t->status, 3691 t->csum_offs, t->csum_write, t->csum_start); 3692 } 3693 3694 seq_printf(seq, "\nRx Ring: \n"); 3695 for (e = skge->rx_ring.to_clean; ; e = e->next) { 3696 const struct skge_rx_desc *r = e->desc; 3697 3698 if (r->control & BMU_OWN) 3699 break; 3700 3701 seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n", 3702 r->control, r->dma_hi, r->dma_lo, r->status, 3703 r->timestamp, r->csum1, r->csum1_start); 3704 } 3705 3706 return 0; 3707} 3708 3709static int skge_debug_open(struct inode *inode, struct file *file) 3710{ 3711 return single_open(file, skge_debug_show, inode->i_private); 3712} 3713 3714static const struct file_operations skge_debug_fops = { 3715 .owner = THIS_MODULE, 3716 .open = skge_debug_open, 3717 .read = seq_read, 3718 .llseek = seq_lseek, 3719 .release = single_release, 3720}; 3721 3722/* 3723 * Use network device events to create/remove/rename 3724 * debugfs file entries 3725 */ 3726static int skge_device_event(struct notifier_block *unused, 3727 unsigned long event, void *ptr) 3728{ 3729 struct net_device *dev = ptr; 3730 struct skge_port *skge; 3731 struct dentry *d; 3732 3733 if (dev->open != &skge_up || !skge_debug) 3734 goto done; 3735 3736 skge = netdev_priv(dev); 3737 switch(event) { 3738 case NETDEV_CHANGENAME: 3739 if (skge->debugfs) { 3740 d = debugfs_rename(skge_debug, skge->debugfs, 3741 skge_debug, dev->name); 3742 if (d) 3743 skge->debugfs = d; 3744 else { 3745 pr_info(PFX "%s: rename failed\n", dev->name); 3746 debugfs_remove(skge->debugfs); 3747 } 3748 } 3749 break; 3750 3751 case NETDEV_GOING_DOWN: 3752 if (skge->debugfs) { 3753 debugfs_remove(skge->debugfs); 3754 skge->debugfs = NULL; 3755 } 3756 break; 3757 3758 case NETDEV_UP: 3759 d = debugfs_create_file(dev->name, S_IRUGO, 3760 skge_debug, dev, 3761 &skge_debug_fops); 3762 if (!d || IS_ERR(d)) 3763 pr_info(PFX "%s: debugfs create failed\n", 3764 dev->name); 3765 else 3766 skge->debugfs = d; 3767 break; 3768 } 3769 3770done: 3771 return NOTIFY_DONE; 3772} 3773 3774static struct notifier_block skge_notifier = { 3775 .notifier_call = skge_device_event, 3776}; 3777 3778 3779static __init void skge_debug_init(void) 3780{ 3781 struct dentry *ent; 3782 3783 ent = debugfs_create_dir("skge", NULL); 3784 if (!ent || IS_ERR(ent)) { 3785 pr_info(PFX "debugfs create directory failed\n"); 3786 return; 3787 } 3788 3789 skge_debug = ent; 3790 register_netdevice_notifier(&skge_notifier); 3791} 3792 3793static __exit void skge_debug_cleanup(void) 3794{ 3795 if (skge_debug) { 3796 unregister_netdevice_notifier(&skge_notifier); 3797 debugfs_remove(skge_debug); 3798 skge_debug = NULL; 3799 } 3800} 3801 3802#else 3803#define skge_debug_init() 3804#define skge_debug_cleanup() 3805#endif 3806 3807/* Initialize network device */ 3808static struct net_device *skge_devinit(struct skge_hw *hw, int port, 3809 int highmem) 3810{ 3811 struct skge_port *skge; 3812 struct net_device *dev = alloc_etherdev(sizeof(*skge)); 3813 3814 if (!dev) { 3815 dev_err(&hw->pdev->dev, "etherdev alloc failed\n"); 3816 return NULL; 3817 } 3818 3819 SET_NETDEV_DEV(dev, &hw->pdev->dev); 3820 dev->open = skge_up; 3821 dev->stop = skge_down; 3822 dev->do_ioctl = skge_ioctl; 3823 dev->hard_start_xmit = skge_xmit_frame; 3824 dev->get_stats = skge_get_stats; 3825 if (hw->chip_id == CHIP_ID_GENESIS) 3826 dev->set_multicast_list = genesis_set_multicast; 3827 else 3828 dev->set_multicast_list = yukon_set_multicast; 3829 3830 dev->set_mac_address = skge_set_mac_address; 3831 dev->change_mtu = skge_change_mtu; 3832 SET_ETHTOOL_OPS(dev, &skge_ethtool_ops); 3833 dev->tx_timeout = skge_tx_timeout; 3834 dev->watchdog_timeo = TX_WATCHDOG; 3835#ifdef CONFIG_NET_POLL_CONTROLLER 3836 dev->poll_controller = skge_netpoll; 3837#endif 3838 dev->irq = hw->pdev->irq; 3839 3840 if (highmem) 3841 dev->features |= NETIF_F_HIGHDMA; 3842 3843 skge = netdev_priv(dev); 3844 netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); 3845 skge->netdev = dev; 3846 skge->hw = hw; 3847 skge->msg_enable = netif_msg_init(debug, default_msg); 3848 3849 skge->tx_ring.count = DEFAULT_TX_RING_SIZE; 3850 skge->rx_ring.count = DEFAULT_RX_RING_SIZE; 3851 3852 /* Auto speed and flow control */ 3853 skge->autoneg = AUTONEG_ENABLE; 3854 skge->flow_control = FLOW_MODE_SYM_OR_REM; 3855 skge->duplex = -1; 3856 skge->speed = -1; 3857 skge->advertising = skge_supported_modes(hw); 3858 3859 if (pci_wake_enabled(hw->pdev)) 3860 skge->wol = wol_supported(hw) & WAKE_MAGIC; 3861 3862 hw->dev[port] = dev; 3863 3864 skge->port = port; 3865 3866 /* Only used for Genesis XMAC */ 3867 setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge); 3868 3869 if (hw->chip_id != CHIP_ID_GENESIS) { 3870 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3871 skge->rx_csum = 1; 3872 } 3873 3874 /* read the mac address */ 3875 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 3876 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 3877 3878 /* device is off until link detection */ 3879 netif_carrier_off(dev); 3880 netif_stop_queue(dev); 3881 3882 return dev; 3883} 3884 3885static void __devinit skge_show_addr(struct net_device *dev) 3886{ 3887 const struct skge_port *skge = netdev_priv(dev); 3888 DECLARE_MAC_BUF(mac); 3889 3890 if (netif_msg_probe(skge)) 3891 printk(KERN_INFO PFX "%s: addr %s\n", 3892 dev->name, print_mac(mac, dev->dev_addr)); 3893} 3894 3895static int __devinit skge_probe(struct pci_dev *pdev, 3896 const struct pci_device_id *ent) 3897{ 3898 struct net_device *dev, *dev1; 3899 struct skge_hw *hw; 3900 int err, using_dac = 0; 3901 3902 err = pci_enable_device(pdev); 3903 if (err) { 3904 dev_err(&pdev->dev, "cannot enable PCI device\n"); 3905 goto err_out; 3906 } 3907 3908 err = pci_request_regions(pdev, DRV_NAME); 3909 if (err) { 3910 dev_err(&pdev->dev, "cannot obtain PCI resources\n"); 3911 goto err_out_disable_pdev; 3912 } 3913 3914 pci_set_master(pdev); 3915 3916 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 3917 using_dac = 1; 3918 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3919 } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { 3920 using_dac = 0; 3921 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 3922 } 3923 3924 if (err) { 3925 dev_err(&pdev->dev, "no usable DMA configuration\n"); 3926 goto err_out_free_regions; 3927 } 3928 3929#ifdef __BIG_ENDIAN 3930 /* byte swap descriptors in hardware */ 3931 { 3932 u32 reg; 3933 3934 pci_read_config_dword(pdev, PCI_DEV_REG2, &reg); 3935 reg |= PCI_REV_DESC; 3936 pci_write_config_dword(pdev, PCI_DEV_REG2, reg); 3937 } 3938#endif 3939 3940 err = -ENOMEM; 3941 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 3942 if (!hw) { 3943 dev_err(&pdev->dev, "cannot allocate hardware struct\n"); 3944 goto err_out_free_regions; 3945 } 3946 3947 hw->pdev = pdev; 3948 spin_lock_init(&hw->hw_lock); 3949 spin_lock_init(&hw->phy_lock); 3950 tasklet_init(&hw->phy_task, &skge_extirq, (unsigned long) hw); 3951 3952 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3953 if (!hw->regs) { 3954 dev_err(&pdev->dev, "cannot map device registers\n"); 3955 goto err_out_free_hw; 3956 } 3957 3958 err = skge_reset(hw); 3959 if (err) 3960 goto err_out_iounmap; 3961 3962 printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n", 3963 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, 3964 skge_board_name(hw), hw->chip_rev); 3965 3966 dev = skge_devinit(hw, 0, using_dac); 3967 if (!dev) 3968 goto err_out_led_off; 3969 3970 /* Some motherboards are broken and has zero in ROM. */ 3971 if (!is_valid_ether_addr(dev->dev_addr)) 3972 dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n"); 3973 3974 err = register_netdev(dev); 3975 if (err) { 3976 dev_err(&pdev->dev, "cannot register net device\n"); 3977 goto err_out_free_netdev; 3978 } 3979 3980 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); 3981 if (err) { 3982 dev_err(&pdev->dev, "%s: cannot assign irq %d\n", 3983 dev->name, pdev->irq); 3984 goto err_out_unregister; 3985 } 3986 skge_show_addr(dev); 3987 3988 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) { 3989 if (register_netdev(dev1) == 0) 3990 skge_show_addr(dev1); 3991 else { 3992 /* Failure to register second port need not be fatal */ 3993 dev_warn(&pdev->dev, "register of second port failed\n"); 3994 hw->dev[1] = NULL; 3995 free_netdev(dev1); 3996 } 3997 } 3998 pci_set_drvdata(pdev, hw); 3999 4000 return 0; 4001 4002err_out_unregister: 4003 unregister_netdev(dev); 4004err_out_free_netdev: 4005 free_netdev(dev); 4006err_out_led_off: 4007 skge_write16(hw, B0_LED, LED_STAT_OFF); 4008err_out_iounmap: 4009 iounmap(hw->regs); 4010err_out_free_hw: 4011 kfree(hw); 4012err_out_free_regions: 4013 pci_release_regions(pdev); 4014err_out_disable_pdev: 4015 pci_disable_device(pdev); 4016 pci_set_drvdata(pdev, NULL); 4017err_out: 4018 return err; 4019} 4020 4021static void __devexit skge_remove(struct pci_dev *pdev) 4022{ 4023 struct skge_hw *hw = pci_get_drvdata(pdev); 4024 struct net_device *dev0, *dev1; 4025 4026 if (!hw) 4027 return; 4028 4029 flush_scheduled_work(); 4030 4031 if ((dev1 = hw->dev[1])) 4032 unregister_netdev(dev1); 4033 dev0 = hw->dev[0]; 4034 unregister_netdev(dev0); 4035 4036 tasklet_disable(&hw->phy_task); 4037 4038 spin_lock_irq(&hw->hw_lock); 4039 hw->intr_mask = 0; 4040 skge_write32(hw, B0_IMSK, 0); 4041 skge_read32(hw, B0_IMSK); 4042 spin_unlock_irq(&hw->hw_lock); 4043 4044 skge_write16(hw, B0_LED, LED_STAT_OFF); 4045 skge_write8(hw, B0_CTST, CS_RST_SET); 4046 4047 free_irq(pdev->irq, hw); 4048 pci_release_regions(pdev); 4049 pci_disable_device(pdev); 4050 if (dev1) 4051 free_netdev(dev1); 4052 free_netdev(dev0); 4053 4054 iounmap(hw->regs); 4055 kfree(hw); 4056 pci_set_drvdata(pdev, NULL); 4057} 4058 4059#ifdef CONFIG_PM 4060static int skge_suspend(struct pci_dev *pdev, pm_message_t state) 4061{ 4062 struct skge_hw *hw = pci_get_drvdata(pdev); 4063 int i, err, wol = 0; 4064 4065 if (!hw) 4066 return 0; 4067 4068 err = pci_save_state(pdev); 4069 if (err) 4070 return err; 4071 4072 for (i = 0; i < hw->ports; i++) { 4073 struct net_device *dev = hw->dev[i]; 4074 struct skge_port *skge = netdev_priv(dev); 4075 4076 if (netif_running(dev)) 4077 skge_down(dev); 4078 if (skge->wol) 4079 skge_wol_init(skge); 4080 4081 wol |= skge->wol; 4082 } 4083 4084 skge_write32(hw, B0_IMSK, 0); 4085 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 4086 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4087 4088 return 0; 4089} 4090 4091static int skge_resume(struct pci_dev *pdev) 4092{ 4093 struct skge_hw *hw = pci_get_drvdata(pdev); 4094 int i, err; 4095 4096 if (!hw) 4097 return 0; 4098 4099 err = pci_set_power_state(pdev, PCI_D0); 4100 if (err) 4101 goto out; 4102 4103 err = pci_restore_state(pdev); 4104 if (err) 4105 goto out; 4106 4107 pci_enable_wake(pdev, PCI_D0, 0); 4108 4109 err = skge_reset(hw); 4110 if (err) 4111 goto out; 4112 4113 for (i = 0; i < hw->ports; i++) { 4114 struct net_device *dev = hw->dev[i]; 4115 4116 if (netif_running(dev)) { 4117 err = skge_up(dev); 4118 4119 if (err) { 4120 printk(KERN_ERR PFX "%s: could not up: %d\n", 4121 dev->name, err); 4122 dev_close(dev); 4123 goto out; 4124 } 4125 } 4126 } 4127out: 4128 return err; 4129} 4130#endif 4131 4132static void skge_shutdown(struct pci_dev *pdev) 4133{ 4134 struct skge_hw *hw = pci_get_drvdata(pdev); 4135 int i, wol = 0; 4136 4137 if (!hw) 4138 return; 4139 4140 for (i = 0; i < hw->ports; i++) { 4141 struct net_device *dev = hw->dev[i]; 4142 struct skge_port *skge = netdev_priv(dev); 4143 4144 if (skge->wol) 4145 skge_wol_init(skge); 4146 wol |= skge->wol; 4147 } 4148 4149 pci_enable_wake(pdev, PCI_D3hot, wol); 4150 pci_enable_wake(pdev, PCI_D3cold, wol); 4151 4152 pci_disable_device(pdev); 4153 pci_set_power_state(pdev, PCI_D3hot); 4154 4155} 4156 4157static struct pci_driver skge_driver = { 4158 .name = DRV_NAME, 4159 .id_table = skge_id_table, 4160 .probe = skge_probe, 4161 .remove = __devexit_p(skge_remove), 4162#ifdef CONFIG_PM 4163 .suspend = skge_suspend, 4164 .resume = skge_resume, 4165#endif 4166 .shutdown = skge_shutdown, 4167}; 4168 4169static int __init skge_init_module(void) 4170{ 4171 skge_debug_init(); 4172 return pci_register_driver(&skge_driver); 4173} 4174 4175static void __exit skge_cleanup_module(void) 4176{ 4177 pci_unregister_driver(&skge_driver); 4178 skge_debug_cleanup(); 4179} 4180 4181module_init(skge_init_module); 4182module_exit(skge_cleanup_module);