Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.30 4179 lines 108 kB view raw
1/* 2 * New driver for Marvell Yukon chipset and SysKonnect Gigabit 3 * Ethernet adapters. Based on earlier sk98lin, e100 and 4 * FreeBSD if_sk drivers. 5 * 6 * This driver intentionally does not support all the features 7 * of the original driver such as link fail-over and link management because 8 * those should be done at higher levels. 9 * 10 * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 24 */ 25 26#include <linux/in.h> 27#include <linux/kernel.h> 28#include <linux/module.h> 29#include <linux/moduleparam.h> 30#include <linux/netdevice.h> 31#include <linux/etherdevice.h> 32#include <linux/ethtool.h> 33#include <linux/pci.h> 34#include <linux/if_vlan.h> 35#include <linux/ip.h> 36#include <linux/delay.h> 37#include <linux/crc32.h> 38#include <linux/dma-mapping.h> 39#include <linux/debugfs.h> 40#include <linux/seq_file.h> 41#include <linux/mii.h> 42#include <asm/irq.h> 43 44#include "skge.h" 45 46#define DRV_NAME "skge" 47#define DRV_VERSION "1.13" 48#define PFX DRV_NAME " " 49 50#define DEFAULT_TX_RING_SIZE 128 51#define DEFAULT_RX_RING_SIZE 512 52#define MAX_TX_RING_SIZE 1024 53#define TX_LOW_WATER (MAX_SKB_FRAGS + 1) 54#define MAX_RX_RING_SIZE 4096 55#define RX_COPY_THRESHOLD 128 56#define RX_BUF_SIZE 1536 57#define PHY_RETRIES 1000 58#define ETH_JUMBO_MTU 9000 59#define TX_WATCHDOG (5 * HZ) 60#define NAPI_WEIGHT 64 61#define BLINK_MS 250 62#define LINK_HZ HZ 63 64#define SKGE_EEPROM_MAGIC 0x9933aabb 65 66 67MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); 68MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); 69MODULE_LICENSE("GPL"); 70MODULE_VERSION(DRV_VERSION); 71 72static const u32 default_msg 73 = NETIF_MSG_DRV| NETIF_MSG_PROBE| NETIF_MSG_LINK 74 | NETIF_MSG_IFUP| NETIF_MSG_IFDOWN; 75 76static int debug = -1; /* defaults above */ 77module_param(debug, int, 0); 78MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 79 80static const struct pci_device_id skge_id_table[] = { 81 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) }, 82 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) }, 83 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, 84 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, 85 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T) }, 86 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */ 87 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, 88 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ 89 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, 90 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) }, 91 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, 92 { 0 } 93}; 94MODULE_DEVICE_TABLE(pci, skge_id_table); 95 96static int skge_up(struct net_device *dev); 97static int skge_down(struct net_device *dev); 98static void skge_phy_reset(struct skge_port *skge); 99static void skge_tx_clean(struct net_device *dev); 100static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 101static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 102static void genesis_get_stats(struct skge_port *skge, u64 *data); 103static void yukon_get_stats(struct skge_port *skge, u64 *data); 104static void yukon_init(struct skge_hw *hw, int port); 105static void genesis_mac_init(struct skge_hw *hw, int port); 106static void genesis_link_up(struct skge_port *skge); 107static void skge_set_multicast(struct net_device *dev); 108 109/* Avoid conditionals by using array */ 110static const int txqaddr[] = { Q_XA1, Q_XA2 }; 111static const int rxqaddr[] = { Q_R1, Q_R2 }; 112static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; 113static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 114static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; 115static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 }; 116 117static int skge_get_regs_len(struct net_device *dev) 118{ 119 return 0x4000; 120} 121 122/* 123 * Returns copy of whole control register region 124 * Note: skip RAM address register because accessing it will 125 * cause bus hangs! 126 */ 127static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, 128 void *p) 129{ 130 const struct skge_port *skge = netdev_priv(dev); 131 const void __iomem *io = skge->hw->regs; 132 133 regs->version = 1; 134 memset(p, 0, regs->len); 135 memcpy_fromio(p, io, B3_RAM_ADDR); 136 137 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, 138 regs->len - B3_RI_WTO_R1); 139} 140 141/* Wake on Lan only supported on Yukon chips with rev 1 or above */ 142static u32 wol_supported(const struct skge_hw *hw) 143{ 144 if (hw->chip_id == CHIP_ID_GENESIS) 145 return 0; 146 147 if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) 148 return 0; 149 150 return WAKE_MAGIC | WAKE_PHY; 151} 152 153static void skge_wol_init(struct skge_port *skge) 154{ 155 struct skge_hw *hw = skge->hw; 156 int port = skge->port; 157 u16 ctrl; 158 159 skge_write16(hw, B0_CTST, CS_RST_CLR); 160 skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); 161 162 /* Turn on Vaux */ 163 skge_write8(hw, B0_POWER_CTRL, 164 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 165 166 /* WA code for COMA mode -- clear PHY reset */ 167 if (hw->chip_id == CHIP_ID_YUKON_LITE && 168 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { 169 u32 reg = skge_read32(hw, B2_GP_IO); 170 reg |= GP_DIR_9; 171 reg &= ~GP_IO_9; 172 skge_write32(hw, B2_GP_IO, reg); 173 } 174 175 skge_write32(hw, SK_REG(port, GPHY_CTRL), 176 GPC_DIS_SLEEP | 177 GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | 178 GPC_ANEG_1 | GPC_RST_SET); 179 180 skge_write32(hw, SK_REG(port, GPHY_CTRL), 181 GPC_DIS_SLEEP | 182 GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | 183 GPC_ANEG_1 | GPC_RST_CLR); 184 185 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); 186 187 /* Force to 10/100 skge_reset will re-enable on resume */ 188 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, 189 PHY_AN_100FULL | PHY_AN_100HALF | 190 PHY_AN_10FULL | PHY_AN_10HALF| PHY_AN_CSMA); 191 /* no 1000 HD/FD */ 192 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0); 193 gm_phy_write(hw, port, PHY_MARV_CTRL, 194 PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE | 195 PHY_CT_RE_CFG | PHY_CT_DUP_MD); 196 197 198 /* Set GMAC to no flow control and auto update for speed/duplex */ 199 gma_write16(hw, port, GM_GP_CTRL, 200 GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| 201 GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); 202 203 /* Set WOL address */ 204 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), 205 skge->netdev->dev_addr, ETH_ALEN); 206 207 /* Turn on appropriate WOL control bits */ 208 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); 209 ctrl = 0; 210 if (skge->wol & WAKE_PHY) 211 ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; 212 else 213 ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; 214 215 if (skge->wol & WAKE_MAGIC) 216 ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; 217 else 218 ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;; 219 220 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; 221 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); 222 223 /* block receiver */ 224 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 225} 226 227static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 228{ 229 struct skge_port *skge = netdev_priv(dev); 230 231 wol->supported = wol_supported(skge->hw); 232 wol->wolopts = skge->wol; 233} 234 235static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 236{ 237 struct skge_port *skge = netdev_priv(dev); 238 struct skge_hw *hw = skge->hw; 239 240 if ((wol->wolopts & ~wol_supported(hw)) 241 || !device_can_wakeup(&hw->pdev->dev)) 242 return -EOPNOTSUPP; 243 244 skge->wol = wol->wolopts; 245 246 device_set_wakeup_enable(&hw->pdev->dev, skge->wol); 247 248 return 0; 249} 250 251/* Determine supported/advertised modes based on hardware. 252 * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx 253 */ 254static u32 skge_supported_modes(const struct skge_hw *hw) 255{ 256 u32 supported; 257 258 if (hw->copper) { 259 supported = SUPPORTED_10baseT_Half 260 | SUPPORTED_10baseT_Full 261 | SUPPORTED_100baseT_Half 262 | SUPPORTED_100baseT_Full 263 | SUPPORTED_1000baseT_Half 264 | SUPPORTED_1000baseT_Full 265 | SUPPORTED_Autoneg| SUPPORTED_TP; 266 267 if (hw->chip_id == CHIP_ID_GENESIS) 268 supported &= ~(SUPPORTED_10baseT_Half 269 | SUPPORTED_10baseT_Full 270 | SUPPORTED_100baseT_Half 271 | SUPPORTED_100baseT_Full); 272 273 else if (hw->chip_id == CHIP_ID_YUKON) 274 supported &= ~SUPPORTED_1000baseT_Half; 275 } else 276 supported = SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half 277 | SUPPORTED_FIBRE | SUPPORTED_Autoneg; 278 279 return supported; 280} 281 282static int skge_get_settings(struct net_device *dev, 283 struct ethtool_cmd *ecmd) 284{ 285 struct skge_port *skge = netdev_priv(dev); 286 struct skge_hw *hw = skge->hw; 287 288 ecmd->transceiver = XCVR_INTERNAL; 289 ecmd->supported = skge_supported_modes(hw); 290 291 if (hw->copper) { 292 ecmd->port = PORT_TP; 293 ecmd->phy_address = hw->phy_addr; 294 } else 295 ecmd->port = PORT_FIBRE; 296 297 ecmd->advertising = skge->advertising; 298 ecmd->autoneg = skge->autoneg; 299 ecmd->speed = skge->speed; 300 ecmd->duplex = skge->duplex; 301 return 0; 302} 303 304static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 305{ 306 struct skge_port *skge = netdev_priv(dev); 307 const struct skge_hw *hw = skge->hw; 308 u32 supported = skge_supported_modes(hw); 309 int err = 0; 310 311 if (ecmd->autoneg == AUTONEG_ENABLE) { 312 ecmd->advertising = supported; 313 skge->duplex = -1; 314 skge->speed = -1; 315 } else { 316 u32 setting; 317 318 switch (ecmd->speed) { 319 case SPEED_1000: 320 if (ecmd->duplex == DUPLEX_FULL) 321 setting = SUPPORTED_1000baseT_Full; 322 else if (ecmd->duplex == DUPLEX_HALF) 323 setting = SUPPORTED_1000baseT_Half; 324 else 325 return -EINVAL; 326 break; 327 case SPEED_100: 328 if (ecmd->duplex == DUPLEX_FULL) 329 setting = SUPPORTED_100baseT_Full; 330 else if (ecmd->duplex == DUPLEX_HALF) 331 setting = SUPPORTED_100baseT_Half; 332 else 333 return -EINVAL; 334 break; 335 336 case SPEED_10: 337 if (ecmd->duplex == DUPLEX_FULL) 338 setting = SUPPORTED_10baseT_Full; 339 else if (ecmd->duplex == DUPLEX_HALF) 340 setting = SUPPORTED_10baseT_Half; 341 else 342 return -EINVAL; 343 break; 344 default: 345 return -EINVAL; 346 } 347 348 if ((setting & supported) == 0) 349 return -EINVAL; 350 351 skge->speed = ecmd->speed; 352 skge->duplex = ecmd->duplex; 353 } 354 355 skge->autoneg = ecmd->autoneg; 356 skge->advertising = ecmd->advertising; 357 358 if (netif_running(dev)) { 359 skge_down(dev); 360 err = skge_up(dev); 361 if (err) { 362 dev_close(dev); 363 return err; 364 } 365 } 366 367 return (0); 368} 369 370static void skge_get_drvinfo(struct net_device *dev, 371 struct ethtool_drvinfo *info) 372{ 373 struct skge_port *skge = netdev_priv(dev); 374 375 strcpy(info->driver, DRV_NAME); 376 strcpy(info->version, DRV_VERSION); 377 strcpy(info->fw_version, "N/A"); 378 strcpy(info->bus_info, pci_name(skge->hw->pdev)); 379} 380 381static const struct skge_stat { 382 char name[ETH_GSTRING_LEN]; 383 u16 xmac_offset; 384 u16 gma_offset; 385} skge_stats[] = { 386 { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI }, 387 { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI }, 388 389 { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK }, 390 { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK }, 391 { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK }, 392 { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK }, 393 { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK }, 394 { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK }, 395 { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE }, 396 { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE }, 397 398 { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL }, 399 { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL }, 400 { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL }, 401 { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL }, 402 { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR }, 403 { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV }, 404 405 { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, 406 { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT }, 407 { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG }, 408 { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, 409 { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR }, 410}; 411 412static int skge_get_sset_count(struct net_device *dev, int sset) 413{ 414 switch (sset) { 415 case ETH_SS_STATS: 416 return ARRAY_SIZE(skge_stats); 417 default: 418 return -EOPNOTSUPP; 419 } 420} 421 422static void skge_get_ethtool_stats(struct net_device *dev, 423 struct ethtool_stats *stats, u64 *data) 424{ 425 struct skge_port *skge = netdev_priv(dev); 426 427 if (skge->hw->chip_id == CHIP_ID_GENESIS) 428 genesis_get_stats(skge, data); 429 else 430 yukon_get_stats(skge, data); 431} 432 433/* Use hardware MIB variables for critical path statistics and 434 * transmit feedback not reported at interrupt. 435 * Other errors are accounted for in interrupt handler. 436 */ 437static struct net_device_stats *skge_get_stats(struct net_device *dev) 438{ 439 struct skge_port *skge = netdev_priv(dev); 440 u64 data[ARRAY_SIZE(skge_stats)]; 441 442 if (skge->hw->chip_id == CHIP_ID_GENESIS) 443 genesis_get_stats(skge, data); 444 else 445 yukon_get_stats(skge, data); 446 447 dev->stats.tx_bytes = data[0]; 448 dev->stats.rx_bytes = data[1]; 449 dev->stats.tx_packets = data[2] + data[4] + data[6]; 450 dev->stats.rx_packets = data[3] + data[5] + data[7]; 451 dev->stats.multicast = data[3] + data[5]; 452 dev->stats.collisions = data[10]; 453 dev->stats.tx_aborted_errors = data[12]; 454 455 return &dev->stats; 456} 457 458static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) 459{ 460 int i; 461 462 switch (stringset) { 463 case ETH_SS_STATS: 464 for (i = 0; i < ARRAY_SIZE(skge_stats); i++) 465 memcpy(data + i * ETH_GSTRING_LEN, 466 skge_stats[i].name, ETH_GSTRING_LEN); 467 break; 468 } 469} 470 471static void skge_get_ring_param(struct net_device *dev, 472 struct ethtool_ringparam *p) 473{ 474 struct skge_port *skge = netdev_priv(dev); 475 476 p->rx_max_pending = MAX_RX_RING_SIZE; 477 p->tx_max_pending = MAX_TX_RING_SIZE; 478 p->rx_mini_max_pending = 0; 479 p->rx_jumbo_max_pending = 0; 480 481 p->rx_pending = skge->rx_ring.count; 482 p->tx_pending = skge->tx_ring.count; 483 p->rx_mini_pending = 0; 484 p->rx_jumbo_pending = 0; 485} 486 487static int skge_set_ring_param(struct net_device *dev, 488 struct ethtool_ringparam *p) 489{ 490 struct skge_port *skge = netdev_priv(dev); 491 int err = 0; 492 493 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || 494 p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE) 495 return -EINVAL; 496 497 skge->rx_ring.count = p->rx_pending; 498 skge->tx_ring.count = p->tx_pending; 499 500 if (netif_running(dev)) { 501 skge_down(dev); 502 err = skge_up(dev); 503 if (err) 504 dev_close(dev); 505 } 506 507 return err; 508} 509 510static u32 skge_get_msglevel(struct net_device *netdev) 511{ 512 struct skge_port *skge = netdev_priv(netdev); 513 return skge->msg_enable; 514} 515 516static void skge_set_msglevel(struct net_device *netdev, u32 value) 517{ 518 struct skge_port *skge = netdev_priv(netdev); 519 skge->msg_enable = value; 520} 521 522static int skge_nway_reset(struct net_device *dev) 523{ 524 struct skge_port *skge = netdev_priv(dev); 525 526 if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev)) 527 return -EINVAL; 528 529 skge_phy_reset(skge); 530 return 0; 531} 532 533static int skge_set_sg(struct net_device *dev, u32 data) 534{ 535 struct skge_port *skge = netdev_priv(dev); 536 struct skge_hw *hw = skge->hw; 537 538 if (hw->chip_id == CHIP_ID_GENESIS && data) 539 return -EOPNOTSUPP; 540 return ethtool_op_set_sg(dev, data); 541} 542 543static int skge_set_tx_csum(struct net_device *dev, u32 data) 544{ 545 struct skge_port *skge = netdev_priv(dev); 546 struct skge_hw *hw = skge->hw; 547 548 if (hw->chip_id == CHIP_ID_GENESIS && data) 549 return -EOPNOTSUPP; 550 551 return ethtool_op_set_tx_csum(dev, data); 552} 553 554static u32 skge_get_rx_csum(struct net_device *dev) 555{ 556 struct skge_port *skge = netdev_priv(dev); 557 558 return skge->rx_csum; 559} 560 561/* Only Yukon supports checksum offload. */ 562static int skge_set_rx_csum(struct net_device *dev, u32 data) 563{ 564 struct skge_port *skge = netdev_priv(dev); 565 566 if (skge->hw->chip_id == CHIP_ID_GENESIS && data) 567 return -EOPNOTSUPP; 568 569 skge->rx_csum = data; 570 return 0; 571} 572 573static void skge_get_pauseparam(struct net_device *dev, 574 struct ethtool_pauseparam *ecmd) 575{ 576 struct skge_port *skge = netdev_priv(dev); 577 578 ecmd->rx_pause = (skge->flow_control == FLOW_MODE_SYMMETRIC) 579 || (skge->flow_control == FLOW_MODE_SYM_OR_REM); 580 ecmd->tx_pause = ecmd->rx_pause || (skge->flow_control == FLOW_MODE_LOC_SEND); 581 582 ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause; 583} 584 585static int skge_set_pauseparam(struct net_device *dev, 586 struct ethtool_pauseparam *ecmd) 587{ 588 struct skge_port *skge = netdev_priv(dev); 589 struct ethtool_pauseparam old; 590 int err = 0; 591 592 skge_get_pauseparam(dev, &old); 593 594 if (ecmd->autoneg != old.autoneg) 595 skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC; 596 else { 597 if (ecmd->rx_pause && ecmd->tx_pause) 598 skge->flow_control = FLOW_MODE_SYMMETRIC; 599 else if (ecmd->rx_pause && !ecmd->tx_pause) 600 skge->flow_control = FLOW_MODE_SYM_OR_REM; 601 else if (!ecmd->rx_pause && ecmd->tx_pause) 602 skge->flow_control = FLOW_MODE_LOC_SEND; 603 else 604 skge->flow_control = FLOW_MODE_NONE; 605 } 606 607 if (netif_running(dev)) { 608 skge_down(dev); 609 err = skge_up(dev); 610 if (err) { 611 dev_close(dev); 612 return err; 613 } 614 } 615 616 return 0; 617} 618 619/* Chip internal frequency for clock calculations */ 620static inline u32 hwkhz(const struct skge_hw *hw) 621{ 622 return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125; 623} 624 625/* Chip HZ to microseconds */ 626static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) 627{ 628 return (ticks * 1000) / hwkhz(hw); 629} 630 631/* Microseconds to chip HZ */ 632static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) 633{ 634 return hwkhz(hw) * usec / 1000; 635} 636 637static int skge_get_coalesce(struct net_device *dev, 638 struct ethtool_coalesce *ecmd) 639{ 640 struct skge_port *skge = netdev_priv(dev); 641 struct skge_hw *hw = skge->hw; 642 int port = skge->port; 643 644 ecmd->rx_coalesce_usecs = 0; 645 ecmd->tx_coalesce_usecs = 0; 646 647 if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) { 648 u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI)); 649 u32 msk = skge_read32(hw, B2_IRQM_MSK); 650 651 if (msk & rxirqmask[port]) 652 ecmd->rx_coalesce_usecs = delay; 653 if (msk & txirqmask[port]) 654 ecmd->tx_coalesce_usecs = delay; 655 } 656 657 return 0; 658} 659 660/* Note: interrupt timer is per board, but can turn on/off per port */ 661static int skge_set_coalesce(struct net_device *dev, 662 struct ethtool_coalesce *ecmd) 663{ 664 struct skge_port *skge = netdev_priv(dev); 665 struct skge_hw *hw = skge->hw; 666 int port = skge->port; 667 u32 msk = skge_read32(hw, B2_IRQM_MSK); 668 u32 delay = 25; 669 670 if (ecmd->rx_coalesce_usecs == 0) 671 msk &= ~rxirqmask[port]; 672 else if (ecmd->rx_coalesce_usecs < 25 || 673 ecmd->rx_coalesce_usecs > 33333) 674 return -EINVAL; 675 else { 676 msk |= rxirqmask[port]; 677 delay = ecmd->rx_coalesce_usecs; 678 } 679 680 if (ecmd->tx_coalesce_usecs == 0) 681 msk &= ~txirqmask[port]; 682 else if (ecmd->tx_coalesce_usecs < 25 || 683 ecmd->tx_coalesce_usecs > 33333) 684 return -EINVAL; 685 else { 686 msk |= txirqmask[port]; 687 delay = min(delay, ecmd->rx_coalesce_usecs); 688 } 689 690 skge_write32(hw, B2_IRQM_MSK, msk); 691 if (msk == 0) 692 skge_write32(hw, B2_IRQM_CTRL, TIM_STOP); 693 else { 694 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay)); 695 skge_write32(hw, B2_IRQM_CTRL, TIM_START); 696 } 697 return 0; 698} 699 700enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST }; 701static void skge_led(struct skge_port *skge, enum led_mode mode) 702{ 703 struct skge_hw *hw = skge->hw; 704 int port = skge->port; 705 706 spin_lock_bh(&hw->phy_lock); 707 if (hw->chip_id == CHIP_ID_GENESIS) { 708 switch (mode) { 709 case LED_MODE_OFF: 710 if (hw->phy_type == SK_PHY_BCOM) 711 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF); 712 else { 713 skge_write32(hw, SK_REG(port, TX_LED_VAL), 0); 714 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF); 715 } 716 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); 717 skge_write32(hw, SK_REG(port, RX_LED_VAL), 0); 718 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF); 719 break; 720 721 case LED_MODE_ON: 722 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON); 723 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); 724 725 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); 726 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); 727 728 break; 729 730 case LED_MODE_TST: 731 skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON); 732 skge_write32(hw, SK_REG(port, RX_LED_VAL), 100); 733 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); 734 735 if (hw->phy_type == SK_PHY_BCOM) 736 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON); 737 else { 738 skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON); 739 skge_write32(hw, SK_REG(port, TX_LED_VAL), 100); 740 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); 741 } 742 743 } 744 } else { 745 switch (mode) { 746 case LED_MODE_OFF: 747 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); 748 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 749 PHY_M_LED_MO_DUP(MO_LED_OFF) | 750 PHY_M_LED_MO_10(MO_LED_OFF) | 751 PHY_M_LED_MO_100(MO_LED_OFF) | 752 PHY_M_LED_MO_1000(MO_LED_OFF) | 753 PHY_M_LED_MO_RX(MO_LED_OFF)); 754 break; 755 case LED_MODE_ON: 756 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 757 PHY_M_LED_PULS_DUR(PULS_170MS) | 758 PHY_M_LED_BLINK_RT(BLINK_84MS) | 759 PHY_M_LEDC_TX_CTRL | 760 PHY_M_LEDC_DP_CTRL); 761 762 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 763 PHY_M_LED_MO_RX(MO_LED_OFF) | 764 (skge->speed == SPEED_100 ? 765 PHY_M_LED_MO_100(MO_LED_ON) : 0)); 766 break; 767 case LED_MODE_TST: 768 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); 769 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 770 PHY_M_LED_MO_DUP(MO_LED_ON) | 771 PHY_M_LED_MO_10(MO_LED_ON) | 772 PHY_M_LED_MO_100(MO_LED_ON) | 773 PHY_M_LED_MO_1000(MO_LED_ON) | 774 PHY_M_LED_MO_RX(MO_LED_ON)); 775 } 776 } 777 spin_unlock_bh(&hw->phy_lock); 778} 779 780/* blink LED's for finding board */ 781static int skge_phys_id(struct net_device *dev, u32 data) 782{ 783 struct skge_port *skge = netdev_priv(dev); 784 unsigned long ms; 785 enum led_mode mode = LED_MODE_TST; 786 787 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 788 ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT / HZ) * 1000; 789 else 790 ms = data * 1000; 791 792 while (ms > 0) { 793 skge_led(skge, mode); 794 mode ^= LED_MODE_TST; 795 796 if (msleep_interruptible(BLINK_MS)) 797 break; 798 ms -= BLINK_MS; 799 } 800 801 /* back to regular LED state */ 802 skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF); 803 804 return 0; 805} 806 807static int skge_get_eeprom_len(struct net_device *dev) 808{ 809 struct skge_port *skge = netdev_priv(dev); 810 u32 reg2; 811 812 pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2); 813 return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); 814} 815 816static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset) 817{ 818 u32 val; 819 820 pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); 821 822 do { 823 pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); 824 } while (!(offset & PCI_VPD_ADDR_F)); 825 826 pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); 827 return val; 828} 829 830static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) 831{ 832 pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val); 833 pci_write_config_word(pdev, cap + PCI_VPD_ADDR, 834 offset | PCI_VPD_ADDR_F); 835 836 do { 837 pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); 838 } while (offset & PCI_VPD_ADDR_F); 839} 840 841static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 842 u8 *data) 843{ 844 struct skge_port *skge = netdev_priv(dev); 845 struct pci_dev *pdev = skge->hw->pdev; 846 int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); 847 int length = eeprom->len; 848 u16 offset = eeprom->offset; 849 850 if (!cap) 851 return -EINVAL; 852 853 eeprom->magic = SKGE_EEPROM_MAGIC; 854 855 while (length > 0) { 856 u32 val = skge_vpd_read(pdev, cap, offset); 857 int n = min_t(int, length, sizeof(val)); 858 859 memcpy(data, &val, n); 860 length -= n; 861 data += n; 862 offset += n; 863 } 864 return 0; 865} 866 867static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 868 u8 *data) 869{ 870 struct skge_port *skge = netdev_priv(dev); 871 struct pci_dev *pdev = skge->hw->pdev; 872 int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); 873 int length = eeprom->len; 874 u16 offset = eeprom->offset; 875 876 if (!cap) 877 return -EINVAL; 878 879 if (eeprom->magic != SKGE_EEPROM_MAGIC) 880 return -EINVAL; 881 882 while (length > 0) { 883 u32 val; 884 int n = min_t(int, length, sizeof(val)); 885 886 if (n < sizeof(val)) 887 val = skge_vpd_read(pdev, cap, offset); 888 memcpy(&val, data, n); 889 890 skge_vpd_write(pdev, cap, offset, val); 891 892 length -= n; 893 data += n; 894 offset += n; 895 } 896 return 0; 897} 898 899static const struct ethtool_ops skge_ethtool_ops = { 900 .get_settings = skge_get_settings, 901 .set_settings = skge_set_settings, 902 .get_drvinfo = skge_get_drvinfo, 903 .get_regs_len = skge_get_regs_len, 904 .get_regs = skge_get_regs, 905 .get_wol = skge_get_wol, 906 .set_wol = skge_set_wol, 907 .get_msglevel = skge_get_msglevel, 908 .set_msglevel = skge_set_msglevel, 909 .nway_reset = skge_nway_reset, 910 .get_link = ethtool_op_get_link, 911 .get_eeprom_len = skge_get_eeprom_len, 912 .get_eeprom = skge_get_eeprom, 913 .set_eeprom = skge_set_eeprom, 914 .get_ringparam = skge_get_ring_param, 915 .set_ringparam = skge_set_ring_param, 916 .get_pauseparam = skge_get_pauseparam, 917 .set_pauseparam = skge_set_pauseparam, 918 .get_coalesce = skge_get_coalesce, 919 .set_coalesce = skge_set_coalesce, 920 .set_sg = skge_set_sg, 921 .set_tx_csum = skge_set_tx_csum, 922 .get_rx_csum = skge_get_rx_csum, 923 .set_rx_csum = skge_set_rx_csum, 924 .get_strings = skge_get_strings, 925 .phys_id = skge_phys_id, 926 .get_sset_count = skge_get_sset_count, 927 .get_ethtool_stats = skge_get_ethtool_stats, 928}; 929 930/* 931 * Allocate ring elements and chain them together 932 * One-to-one association of board descriptors with ring elements 933 */ 934static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) 935{ 936 struct skge_tx_desc *d; 937 struct skge_element *e; 938 int i; 939 940 ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL); 941 if (!ring->start) 942 return -ENOMEM; 943 944 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { 945 e->desc = d; 946 if (i == ring->count - 1) { 947 e->next = ring->start; 948 d->next_offset = base; 949 } else { 950 e->next = e + 1; 951 d->next_offset = base + (i+1) * sizeof(*d); 952 } 953 } 954 ring->to_use = ring->to_clean = ring->start; 955 956 return 0; 957} 958 959/* Allocate and setup a new buffer for receiving */ 960static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 961 struct sk_buff *skb, unsigned int bufsize) 962{ 963 struct skge_rx_desc *rd = e->desc; 964 u64 map; 965 966 map = pci_map_single(skge->hw->pdev, skb->data, bufsize, 967 PCI_DMA_FROMDEVICE); 968 969 rd->dma_lo = map; 970 rd->dma_hi = map >> 32; 971 e->skb = skb; 972 rd->csum1_start = ETH_HLEN; 973 rd->csum2_start = ETH_HLEN; 974 rd->csum1 = 0; 975 rd->csum2 = 0; 976 977 wmb(); 978 979 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 980 pci_unmap_addr_set(e, mapaddr, map); 981 pci_unmap_len_set(e, maplen, bufsize); 982} 983 984/* Resume receiving using existing skb, 985 * Note: DMA address is not changed by chip. 986 * MTU not changed while receiver active. 987 */ 988static inline void skge_rx_reuse(struct skge_element *e, unsigned int size) 989{ 990 struct skge_rx_desc *rd = e->desc; 991 992 rd->csum2 = 0; 993 rd->csum2_start = ETH_HLEN; 994 995 wmb(); 996 997 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size; 998} 999 1000 1001/* Free all buffers in receive ring, assumes receiver stopped */ 1002static void skge_rx_clean(struct skge_port *skge) 1003{ 1004 struct skge_hw *hw = skge->hw; 1005 struct skge_ring *ring = &skge->rx_ring; 1006 struct skge_element *e; 1007 1008 e = ring->start; 1009 do { 1010 struct skge_rx_desc *rd = e->desc; 1011 rd->control = 0; 1012 if (e->skb) { 1013 pci_unmap_single(hw->pdev, 1014 pci_unmap_addr(e, mapaddr), 1015 pci_unmap_len(e, maplen), 1016 PCI_DMA_FROMDEVICE); 1017 dev_kfree_skb(e->skb); 1018 e->skb = NULL; 1019 } 1020 } while ((e = e->next) != ring->start); 1021} 1022 1023 1024/* Allocate buffers for receive ring 1025 * For receive: to_clean is next received frame. 1026 */ 1027static int skge_rx_fill(struct net_device *dev) 1028{ 1029 struct skge_port *skge = netdev_priv(dev); 1030 struct skge_ring *ring = &skge->rx_ring; 1031 struct skge_element *e; 1032 1033 e = ring->start; 1034 do { 1035 struct sk_buff *skb; 1036 1037 skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN, 1038 GFP_KERNEL); 1039 if (!skb) 1040 return -ENOMEM; 1041 1042 skb_reserve(skb, NET_IP_ALIGN); 1043 skge_rx_setup(skge, e, skb, skge->rx_buf_size); 1044 } while ( (e = e->next) != ring->start); 1045 1046 ring->to_clean = ring->start; 1047 return 0; 1048} 1049 1050static const char *skge_pause(enum pause_status status) 1051{ 1052 switch(status) { 1053 case FLOW_STAT_NONE: 1054 return "none"; 1055 case FLOW_STAT_REM_SEND: 1056 return "rx only"; 1057 case FLOW_STAT_LOC_SEND: 1058 return "tx_only"; 1059 case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */ 1060 return "both"; 1061 default: 1062 return "indeterminated"; 1063 } 1064} 1065 1066 1067static void skge_link_up(struct skge_port *skge) 1068{ 1069 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), 1070 LED_BLK_OFF|LED_SYNC_OFF|LED_ON); 1071 1072 netif_carrier_on(skge->netdev); 1073 netif_wake_queue(skge->netdev); 1074 1075 if (netif_msg_link(skge)) { 1076 printk(KERN_INFO PFX 1077 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n", 1078 skge->netdev->name, skge->speed, 1079 skge->duplex == DUPLEX_FULL ? "full" : "half", 1080 skge_pause(skge->flow_status)); 1081 } 1082} 1083 1084static void skge_link_down(struct skge_port *skge) 1085{ 1086 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); 1087 netif_carrier_off(skge->netdev); 1088 netif_stop_queue(skge->netdev); 1089 1090 if (netif_msg_link(skge)) 1091 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name); 1092} 1093 1094 1095static void xm_link_down(struct skge_hw *hw, int port) 1096{ 1097 struct net_device *dev = hw->dev[port]; 1098 struct skge_port *skge = netdev_priv(dev); 1099 1100 xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); 1101 1102 if (netif_carrier_ok(dev)) 1103 skge_link_down(skge); 1104} 1105 1106static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) 1107{ 1108 int i; 1109 1110 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); 1111 *val = xm_read16(hw, port, XM_PHY_DATA); 1112 1113 if (hw->phy_type == SK_PHY_XMAC) 1114 goto ready; 1115 1116 for (i = 0; i < PHY_RETRIES; i++) { 1117 if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) 1118 goto ready; 1119 udelay(1); 1120 } 1121 1122 return -ETIMEDOUT; 1123 ready: 1124 *val = xm_read16(hw, port, XM_PHY_DATA); 1125 1126 return 0; 1127} 1128 1129static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) 1130{ 1131 u16 v = 0; 1132 if (__xm_phy_read(hw, port, reg, &v)) 1133 printk(KERN_WARNING PFX "%s: phy read timed out\n", 1134 hw->dev[port]->name); 1135 return v; 1136} 1137 1138static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) 1139{ 1140 int i; 1141 1142 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); 1143 for (i = 0; i < PHY_RETRIES; i++) { 1144 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) 1145 goto ready; 1146 udelay(1); 1147 } 1148 return -EIO; 1149 1150 ready: 1151 xm_write16(hw, port, XM_PHY_DATA, val); 1152 for (i = 0; i < PHY_RETRIES; i++) { 1153 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) 1154 return 0; 1155 udelay(1); 1156 } 1157 return -ETIMEDOUT; 1158} 1159 1160static void genesis_init(struct skge_hw *hw) 1161{ 1162 /* set blink source counter */ 1163 skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100); 1164 skge_write8(hw, B2_BSC_CTRL, BSC_START); 1165 1166 /* configure mac arbiter */ 1167 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); 1168 1169 /* configure mac arbiter timeout values */ 1170 skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53); 1171 skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53); 1172 skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53); 1173 skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53); 1174 1175 skge_write8(hw, B3_MA_RCINI_RX1, 0); 1176 skge_write8(hw, B3_MA_RCINI_RX2, 0); 1177 skge_write8(hw, B3_MA_RCINI_TX1, 0); 1178 skge_write8(hw, B3_MA_RCINI_TX2, 0); 1179 1180 /* configure packet arbiter timeout */ 1181 skge_write16(hw, B3_PA_CTRL, PA_RST_CLR); 1182 skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX); 1183 skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX); 1184 skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX); 1185 skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX); 1186} 1187 1188static void genesis_reset(struct skge_hw *hw, int port) 1189{ 1190 const u8 zero[8] = { 0 }; 1191 u32 reg; 1192 1193 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); 1194 1195 /* reset the statistics module */ 1196 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); 1197 xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); 1198 xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ 1199 xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ 1200 xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ 1201 1202 /* disable Broadcom PHY IRQ */ 1203 if (hw->phy_type == SK_PHY_BCOM) 1204 xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); 1205 1206 xm_outhash(hw, port, XM_HSM, zero); 1207 1208 /* Flush TX and RX fifo */ 1209 reg = xm_read32(hw, port, XM_MODE); 1210 xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF); 1211 xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF); 1212} 1213 1214 1215/* Convert mode to MII values */ 1216static const u16 phy_pause_map[] = { 1217 [FLOW_MODE_NONE] = 0, 1218 [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM, 1219 [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP, 1220 [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, 1221}; 1222 1223/* special defines for FIBER (88E1011S only) */ 1224static const u16 fiber_pause_map[] = { 1225 [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE, 1226 [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD, 1227 [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD, 1228 [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD, 1229}; 1230 1231 1232/* Check status of Broadcom phy link */ 1233static void bcom_check_link(struct skge_hw *hw, int port) 1234{ 1235 struct net_device *dev = hw->dev[port]; 1236 struct skge_port *skge = netdev_priv(dev); 1237 u16 status; 1238 1239 /* read twice because of latch */ 1240 xm_phy_read(hw, port, PHY_BCOM_STAT); 1241 status = xm_phy_read(hw, port, PHY_BCOM_STAT); 1242 1243 if ((status & PHY_ST_LSYNC) == 0) { 1244 xm_link_down(hw, port); 1245 return; 1246 } 1247 1248 if (skge->autoneg == AUTONEG_ENABLE) { 1249 u16 lpa, aux; 1250 1251 if (!(status & PHY_ST_AN_OVER)) 1252 return; 1253 1254 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); 1255 if (lpa & PHY_B_AN_RF) { 1256 printk(KERN_NOTICE PFX "%s: remote fault\n", 1257 dev->name); 1258 return; 1259 } 1260 1261 aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT); 1262 1263 /* Check Duplex mismatch */ 1264 switch (aux & PHY_B_AS_AN_RES_MSK) { 1265 case PHY_B_RES_1000FD: 1266 skge->duplex = DUPLEX_FULL; 1267 break; 1268 case PHY_B_RES_1000HD: 1269 skge->duplex = DUPLEX_HALF; 1270 break; 1271 default: 1272 printk(KERN_NOTICE PFX "%s: duplex mismatch\n", 1273 dev->name); 1274 return; 1275 } 1276 1277 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 1278 switch (aux & PHY_B_AS_PAUSE_MSK) { 1279 case PHY_B_AS_PAUSE_MSK: 1280 skge->flow_status = FLOW_STAT_SYMMETRIC; 1281 break; 1282 case PHY_B_AS_PRR: 1283 skge->flow_status = FLOW_STAT_REM_SEND; 1284 break; 1285 case PHY_B_AS_PRT: 1286 skge->flow_status = FLOW_STAT_LOC_SEND; 1287 break; 1288 default: 1289 skge->flow_status = FLOW_STAT_NONE; 1290 } 1291 skge->speed = SPEED_1000; 1292 } 1293 1294 if (!netif_carrier_ok(dev)) 1295 genesis_link_up(skge); 1296} 1297 1298/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional 1299 * Phy on for 100 or 10Mbit operation 1300 */ 1301static void bcom_phy_init(struct skge_port *skge) 1302{ 1303 struct skge_hw *hw = skge->hw; 1304 int port = skge->port; 1305 int i; 1306 u16 id1, r, ext, ctl; 1307 1308 /* magic workaround patterns for Broadcom */ 1309 static const struct { 1310 u16 reg; 1311 u16 val; 1312 } A1hack[] = { 1313 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, 1314 { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, 1315 { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, 1316 { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 1317 }, C0hack[] = { 1318 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, 1319 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, 1320 }; 1321 1322 /* read Id from external PHY (all have the same address) */ 1323 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); 1324 1325 /* Optimize MDIO transfer by suppressing preamble. */ 1326 r = xm_read16(hw, port, XM_MMU_CMD); 1327 r |= XM_MMU_NO_PRE; 1328 xm_write16(hw, port, XM_MMU_CMD,r); 1329 1330 switch (id1) { 1331 case PHY_BCOM_ID1_C0: 1332 /* 1333 * Workaround BCOM Errata for the C0 type. 1334 * Write magic patterns to reserved registers. 1335 */ 1336 for (i = 0; i < ARRAY_SIZE(C0hack); i++) 1337 xm_phy_write(hw, port, 1338 C0hack[i].reg, C0hack[i].val); 1339 1340 break; 1341 case PHY_BCOM_ID1_A1: 1342 /* 1343 * Workaround BCOM Errata for the A1 type. 1344 * Write magic patterns to reserved registers. 1345 */ 1346 for (i = 0; i < ARRAY_SIZE(A1hack); i++) 1347 xm_phy_write(hw, port, 1348 A1hack[i].reg, A1hack[i].val); 1349 break; 1350 } 1351 1352 /* 1353 * Workaround BCOM Errata (#10523) for all BCom PHYs. 1354 * Disable Power Management after reset. 1355 */ 1356 r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL); 1357 r |= PHY_B_AC_DIS_PM; 1358 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r); 1359 1360 /* Dummy read */ 1361 xm_read16(hw, port, XM_ISRC); 1362 1363 ext = PHY_B_PEC_EN_LTR; /* enable tx led */ 1364 ctl = PHY_CT_SP1000; /* always 1000mbit */ 1365 1366 if (skge->autoneg == AUTONEG_ENABLE) { 1367 /* 1368 * Workaround BCOM Errata #1 for the C5 type. 1369 * 1000Base-T Link Acquisition Failure in Slave Mode 1370 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register 1371 */ 1372 u16 adv = PHY_B_1000C_RD; 1373 if (skge->advertising & ADVERTISED_1000baseT_Half) 1374 adv |= PHY_B_1000C_AHD; 1375 if (skge->advertising & ADVERTISED_1000baseT_Full) 1376 adv |= PHY_B_1000C_AFD; 1377 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv); 1378 1379 ctl |= PHY_CT_ANE | PHY_CT_RE_CFG; 1380 } else { 1381 if (skge->duplex == DUPLEX_FULL) 1382 ctl |= PHY_CT_DUP_MD; 1383 /* Force to slave */ 1384 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE); 1385 } 1386 1387 /* Set autonegotiation pause parameters */ 1388 xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, 1389 phy_pause_map[skge->flow_control] | PHY_AN_CSMA); 1390 1391 /* Handle Jumbo frames */ 1392 if (hw->dev[port]->mtu > ETH_DATA_LEN) { 1393 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, 1394 PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK); 1395 1396 ext |= PHY_B_PEC_HIGH_LA; 1397 1398 } 1399 1400 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); 1401 xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); 1402 1403 /* Use link status change interrupt */ 1404 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); 1405} 1406 1407static void xm_phy_init(struct skge_port *skge) 1408{ 1409 struct skge_hw *hw = skge->hw; 1410 int port = skge->port; 1411 u16 ctrl = 0; 1412 1413 if (skge->autoneg == AUTONEG_ENABLE) { 1414 if (skge->advertising & ADVERTISED_1000baseT_Half) 1415 ctrl |= PHY_X_AN_HD; 1416 if (skge->advertising & ADVERTISED_1000baseT_Full) 1417 ctrl |= PHY_X_AN_FD; 1418 1419 ctrl |= fiber_pause_map[skge->flow_control]; 1420 1421 xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl); 1422 1423 /* Restart Auto-negotiation */ 1424 ctrl = PHY_CT_ANE | PHY_CT_RE_CFG; 1425 } else { 1426 /* Set DuplexMode in Config register */ 1427 if (skge->duplex == DUPLEX_FULL) 1428 ctrl |= PHY_CT_DUP_MD; 1429 /* 1430 * Do NOT enable Auto-negotiation here. This would hold 1431 * the link down because no IDLEs are transmitted 1432 */ 1433 } 1434 1435 xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); 1436 1437 /* Poll PHY for status changes */ 1438 mod_timer(&skge->link_timer, jiffies + LINK_HZ); 1439} 1440 1441static int xm_check_link(struct net_device *dev) 1442{ 1443 struct skge_port *skge = netdev_priv(dev); 1444 struct skge_hw *hw = skge->hw; 1445 int port = skge->port; 1446 u16 status; 1447 1448 /* read twice because of latch */ 1449 xm_phy_read(hw, port, PHY_XMAC_STAT); 1450 status = xm_phy_read(hw, port, PHY_XMAC_STAT); 1451 1452 if ((status & PHY_ST_LSYNC) == 0) { 1453 xm_link_down(hw, port); 1454 return 0; 1455 } 1456 1457 if (skge->autoneg == AUTONEG_ENABLE) { 1458 u16 lpa, res; 1459 1460 if (!(status & PHY_ST_AN_OVER)) 1461 return 0; 1462 1463 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); 1464 if (lpa & PHY_B_AN_RF) { 1465 printk(KERN_NOTICE PFX "%s: remote fault\n", 1466 dev->name); 1467 return 0; 1468 } 1469 1470 res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); 1471 1472 /* Check Duplex mismatch */ 1473 switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) { 1474 case PHY_X_RS_FD: 1475 skge->duplex = DUPLEX_FULL; 1476 break; 1477 case PHY_X_RS_HD: 1478 skge->duplex = DUPLEX_HALF; 1479 break; 1480 default: 1481 printk(KERN_NOTICE PFX "%s: duplex mismatch\n", 1482 dev->name); 1483 return 0; 1484 } 1485 1486 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 1487 if ((skge->flow_control == FLOW_MODE_SYMMETRIC || 1488 skge->flow_control == FLOW_MODE_SYM_OR_REM) && 1489 (lpa & PHY_X_P_SYM_MD)) 1490 skge->flow_status = FLOW_STAT_SYMMETRIC; 1491 else if (skge->flow_control == FLOW_MODE_SYM_OR_REM && 1492 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) 1493 /* Enable PAUSE receive, disable PAUSE transmit */ 1494 skge->flow_status = FLOW_STAT_REM_SEND; 1495 else if (skge->flow_control == FLOW_MODE_LOC_SEND && 1496 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) 1497 /* Disable PAUSE receive, enable PAUSE transmit */ 1498 skge->flow_status = FLOW_STAT_LOC_SEND; 1499 else 1500 skge->flow_status = FLOW_STAT_NONE; 1501 1502 skge->speed = SPEED_1000; 1503 } 1504 1505 if (!netif_carrier_ok(dev)) 1506 genesis_link_up(skge); 1507 return 1; 1508} 1509 1510/* Poll to check for link coming up. 1511 * 1512 * Since internal PHY is wired to a level triggered pin, can't 1513 * get an interrupt when carrier is detected, need to poll for 1514 * link coming up. 1515 */ 1516static void xm_link_timer(unsigned long arg) 1517{ 1518 struct skge_port *skge = (struct skge_port *) arg; 1519 struct net_device *dev = skge->netdev; 1520 struct skge_hw *hw = skge->hw; 1521 int port = skge->port; 1522 int i; 1523 unsigned long flags; 1524 1525 if (!netif_running(dev)) 1526 return; 1527 1528 spin_lock_irqsave(&hw->phy_lock, flags); 1529 1530 /* 1531 * Verify that the link by checking GPIO register three times. 1532 * This pin has the signal from the link_sync pin connected to it. 1533 */ 1534 for (i = 0; i < 3; i++) { 1535 if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) 1536 goto link_down; 1537 } 1538 1539 /* Re-enable interrupt to detect link down */ 1540 if (xm_check_link(dev)) { 1541 u16 msk = xm_read16(hw, port, XM_IMSK); 1542 msk &= ~XM_IS_INP_ASS; 1543 xm_write16(hw, port, XM_IMSK, msk); 1544 xm_read16(hw, port, XM_ISRC); 1545 } else { 1546link_down: 1547 mod_timer(&skge->link_timer, 1548 round_jiffies(jiffies + LINK_HZ)); 1549 } 1550 spin_unlock_irqrestore(&hw->phy_lock, flags); 1551} 1552 1553static void genesis_mac_init(struct skge_hw *hw, int port) 1554{ 1555 struct net_device *dev = hw->dev[port]; 1556 struct skge_port *skge = netdev_priv(dev); 1557 int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN; 1558 int i; 1559 u32 r; 1560 const u8 zero[6] = { 0 }; 1561 1562 for (i = 0; i < 10; i++) { 1563 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), 1564 MFF_SET_MAC_RST); 1565 if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST) 1566 goto reset_ok; 1567 udelay(1); 1568 } 1569 1570 printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name); 1571 1572 reset_ok: 1573 /* Unreset the XMAC. */ 1574 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); 1575 1576 /* 1577 * Perform additional initialization for external PHYs, 1578 * namely for the 1000baseTX cards that use the XMAC's 1579 * GMII mode. 1580 */ 1581 if (hw->phy_type != SK_PHY_XMAC) { 1582 /* Take external Phy out of reset */ 1583 r = skge_read32(hw, B2_GP_IO); 1584 if (port == 0) 1585 r |= GP_DIR_0|GP_IO_0; 1586 else 1587 r |= GP_DIR_2|GP_IO_2; 1588 1589 skge_write32(hw, B2_GP_IO, r); 1590 1591 /* Enable GMII interface */ 1592 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); 1593 } 1594 1595 1596 switch(hw->phy_type) { 1597 case SK_PHY_XMAC: 1598 xm_phy_init(skge); 1599 break; 1600 case SK_PHY_BCOM: 1601 bcom_phy_init(skge); 1602 bcom_check_link(hw, port); 1603 } 1604 1605 /* Set Station Address */ 1606 xm_outaddr(hw, port, XM_SA, dev->dev_addr); 1607 1608 /* We don't use match addresses so clear */ 1609 for (i = 1; i < 16; i++) 1610 xm_outaddr(hw, port, XM_EXM(i), zero); 1611 1612 /* Clear MIB counters */ 1613 xm_write16(hw, port, XM_STAT_CMD, 1614 XM_SC_CLR_RXC | XM_SC_CLR_TXC); 1615 /* Clear two times according to Errata #3 */ 1616 xm_write16(hw, port, XM_STAT_CMD, 1617 XM_SC_CLR_RXC | XM_SC_CLR_TXC); 1618 1619 /* configure Rx High Water Mark (XM_RX_HI_WM) */ 1620 xm_write16(hw, port, XM_RX_HI_WM, 1450); 1621 1622 /* We don't need the FCS appended to the packet. */ 1623 r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS; 1624 if (jumbo) 1625 r |= XM_RX_BIG_PK_OK; 1626 1627 if (skge->duplex == DUPLEX_HALF) { 1628 /* 1629 * If in manual half duplex mode the other side might be in 1630 * full duplex mode, so ignore if a carrier extension is not seen 1631 * on frames received 1632 */ 1633 r |= XM_RX_DIS_CEXT; 1634 } 1635 xm_write16(hw, port, XM_RX_CMD, r); 1636 1637 /* We want short frames padded to 60 bytes. */ 1638 xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); 1639 1640 /* Increase threshold for jumbo frames on dual port */ 1641 if (hw->ports > 1 && jumbo) 1642 xm_write16(hw, port, XM_TX_THR, 1020); 1643 else 1644 xm_write16(hw, port, XM_TX_THR, 512); 1645 1646 /* 1647 * Enable the reception of all error frames. This is is 1648 * a necessary evil due to the design of the XMAC. The 1649 * XMAC's receive FIFO is only 8K in size, however jumbo 1650 * frames can be up to 9000 bytes in length. When bad 1651 * frame filtering is enabled, the XMAC's RX FIFO operates 1652 * in 'store and forward' mode. For this to work, the 1653 * entire frame has to fit into the FIFO, but that means 1654 * that jumbo frames larger than 8192 bytes will be 1655 * truncated. Disabling all bad frame filtering causes 1656 * the RX FIFO to operate in streaming mode, in which 1657 * case the XMAC will start transferring frames out of the 1658 * RX FIFO as soon as the FIFO threshold is reached. 1659 */ 1660 xm_write32(hw, port, XM_MODE, XM_DEF_MODE); 1661 1662 1663 /* 1664 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK) 1665 * - Enable all bits excepting 'Octets Rx OK Low CntOv' 1666 * and 'Octets Rx OK Hi Cnt Ov'. 1667 */ 1668 xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK); 1669 1670 /* 1671 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK) 1672 * - Enable all bits excepting 'Octets Tx OK Low CntOv' 1673 * and 'Octets Tx OK Hi Cnt Ov'. 1674 */ 1675 xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK); 1676 1677 /* Configure MAC arbiter */ 1678 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); 1679 1680 /* configure timeout values */ 1681 skge_write8(hw, B3_MA_TOINI_RX1, 72); 1682 skge_write8(hw, B3_MA_TOINI_RX2, 72); 1683 skge_write8(hw, B3_MA_TOINI_TX1, 72); 1684 skge_write8(hw, B3_MA_TOINI_TX2, 72); 1685 1686 skge_write8(hw, B3_MA_RCINI_RX1, 0); 1687 skge_write8(hw, B3_MA_RCINI_RX2, 0); 1688 skge_write8(hw, B3_MA_RCINI_TX1, 0); 1689 skge_write8(hw, B3_MA_RCINI_TX2, 0); 1690 1691 /* Configure Rx MAC FIFO */ 1692 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); 1693 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); 1694 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); 1695 1696 /* Configure Tx MAC FIFO */ 1697 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); 1698 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); 1699 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); 1700 1701 if (jumbo) { 1702 /* Enable frame flushing if jumbo frames used */ 1703 skge_write16(hw, SK_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH); 1704 } else { 1705 /* enable timeout timers if normal frames */ 1706 skge_write16(hw, B3_PA_CTRL, 1707 (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); 1708 } 1709} 1710 1711static void genesis_stop(struct skge_port *skge) 1712{ 1713 struct skge_hw *hw = skge->hw; 1714 int port = skge->port; 1715 unsigned retries = 1000; 1716 u16 cmd; 1717 1718 /* Disable Tx and Rx */ 1719 cmd = xm_read16(hw, port, XM_MMU_CMD); 1720 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1721 xm_write16(hw, port, XM_MMU_CMD, cmd); 1722 1723 genesis_reset(hw, port); 1724 1725 /* Clear Tx packet arbiter timeout IRQ */ 1726 skge_write16(hw, B3_PA_CTRL, 1727 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); 1728 1729 /* Reset the MAC */ 1730 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); 1731 do { 1732 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); 1733 if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)) 1734 break; 1735 } while (--retries > 0); 1736 1737 /* For external PHYs there must be special handling */ 1738 if (hw->phy_type != SK_PHY_XMAC) { 1739 u32 reg = skge_read32(hw, B2_GP_IO); 1740 if (port == 0) { 1741 reg |= GP_DIR_0; 1742 reg &= ~GP_IO_0; 1743 } else { 1744 reg |= GP_DIR_2; 1745 reg &= ~GP_IO_2; 1746 } 1747 skge_write32(hw, B2_GP_IO, reg); 1748 skge_read32(hw, B2_GP_IO); 1749 } 1750 1751 xm_write16(hw, port, XM_MMU_CMD, 1752 xm_read16(hw, port, XM_MMU_CMD) 1753 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); 1754 1755 xm_read16(hw, port, XM_MMU_CMD); 1756} 1757 1758 1759static void genesis_get_stats(struct skge_port *skge, u64 *data) 1760{ 1761 struct skge_hw *hw = skge->hw; 1762 int port = skge->port; 1763 int i; 1764 unsigned long timeout = jiffies + HZ; 1765 1766 xm_write16(hw, port, 1767 XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); 1768 1769 /* wait for update to complete */ 1770 while (xm_read16(hw, port, XM_STAT_CMD) 1771 & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) { 1772 if (time_after(jiffies, timeout)) 1773 break; 1774 udelay(10); 1775 } 1776 1777 /* special case for 64 bit octet counter */ 1778 data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32 1779 | xm_read32(hw, port, XM_TXO_OK_LO); 1780 data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32 1781 | xm_read32(hw, port, XM_RXO_OK_LO); 1782 1783 for (i = 2; i < ARRAY_SIZE(skge_stats); i++) 1784 data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset); 1785} 1786 1787static void genesis_mac_intr(struct skge_hw *hw, int port) 1788{ 1789 struct net_device *dev = hw->dev[port]; 1790 struct skge_port *skge = netdev_priv(dev); 1791 u16 status = xm_read16(hw, port, XM_ISRC); 1792 1793 if (netif_msg_intr(skge)) 1794 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n", 1795 dev->name, status); 1796 1797 if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { 1798 xm_link_down(hw, port); 1799 mod_timer(&skge->link_timer, jiffies + 1); 1800 } 1801 1802 if (status & XM_IS_TXF_UR) { 1803 xm_write32(hw, port, XM_MODE, XM_MD_FTF); 1804 ++dev->stats.tx_fifo_errors; 1805 } 1806} 1807 1808static void genesis_link_up(struct skge_port *skge) 1809{ 1810 struct skge_hw *hw = skge->hw; 1811 int port = skge->port; 1812 u16 cmd, msk; 1813 u32 mode; 1814 1815 cmd = xm_read16(hw, port, XM_MMU_CMD); 1816 1817 /* 1818 * enabling pause frame reception is required for 1000BT 1819 * because the XMAC is not reset if the link is going down 1820 */ 1821 if (skge->flow_status == FLOW_STAT_NONE || 1822 skge->flow_status == FLOW_STAT_LOC_SEND) 1823 /* Disable Pause Frame Reception */ 1824 cmd |= XM_MMU_IGN_PF; 1825 else 1826 /* Enable Pause Frame Reception */ 1827 cmd &= ~XM_MMU_IGN_PF; 1828 1829 xm_write16(hw, port, XM_MMU_CMD, cmd); 1830 1831 mode = xm_read32(hw, port, XM_MODE); 1832 if (skge->flow_status== FLOW_STAT_SYMMETRIC || 1833 skge->flow_status == FLOW_STAT_LOC_SEND) { 1834 /* 1835 * Configure Pause Frame Generation 1836 * Use internal and external Pause Frame Generation. 1837 * Sending pause frames is edge triggered. 1838 * Send a Pause frame with the maximum pause time if 1839 * internal oder external FIFO full condition occurs. 1840 * Send a zero pause time frame to re-start transmission. 1841 */ 1842 /* XM_PAUSE_DA = '010000C28001' (default) */ 1843 /* XM_MAC_PTIME = 0xffff (maximum) */ 1844 /* remember this value is defined in big endian (!) */ 1845 xm_write16(hw, port, XM_MAC_PTIME, 0xffff); 1846 1847 mode |= XM_PAUSE_MODE; 1848 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); 1849 } else { 1850 /* 1851 * disable pause frame generation is required for 1000BT 1852 * because the XMAC is not reset if the link is going down 1853 */ 1854 /* Disable Pause Mode in Mode Register */ 1855 mode &= ~XM_PAUSE_MODE; 1856 1857 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); 1858 } 1859 1860 xm_write32(hw, port, XM_MODE, mode); 1861 1862 /* Turn on detection of Tx underrun */ 1863 msk = xm_read16(hw, port, XM_IMSK); 1864 msk &= ~XM_IS_TXF_UR; 1865 xm_write16(hw, port, XM_IMSK, msk); 1866 1867 xm_read16(hw, port, XM_ISRC); 1868 1869 /* get MMU Command Reg. */ 1870 cmd = xm_read16(hw, port, XM_MMU_CMD); 1871 if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) 1872 cmd |= XM_MMU_GMII_FD; 1873 1874 /* 1875 * Workaround BCOM Errata (#10523) for all BCom Phys 1876 * Enable Power Management after link up 1877 */ 1878 if (hw->phy_type == SK_PHY_BCOM) { 1879 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, 1880 xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) 1881 & ~PHY_B_AC_DIS_PM); 1882 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); 1883 } 1884 1885 /* enable Rx/Tx */ 1886 xm_write16(hw, port, XM_MMU_CMD, 1887 cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1888 skge_link_up(skge); 1889} 1890 1891 1892static inline void bcom_phy_intr(struct skge_port *skge) 1893{ 1894 struct skge_hw *hw = skge->hw; 1895 int port = skge->port; 1896 u16 isrc; 1897 1898 isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT); 1899 if (netif_msg_intr(skge)) 1900 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x\n", 1901 skge->netdev->name, isrc); 1902 1903 if (isrc & PHY_B_IS_PSE) 1904 printk(KERN_ERR PFX "%s: uncorrectable pair swap error\n", 1905 hw->dev[port]->name); 1906 1907 /* Workaround BCom Errata: 1908 * enable and disable loopback mode if "NO HCD" occurs. 1909 */ 1910 if (isrc & PHY_B_IS_NO_HDCL) { 1911 u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL); 1912 xm_phy_write(hw, port, PHY_BCOM_CTRL, 1913 ctrl | PHY_CT_LOOP); 1914 xm_phy_write(hw, port, PHY_BCOM_CTRL, 1915 ctrl & ~PHY_CT_LOOP); 1916 } 1917 1918 if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) 1919 bcom_check_link(hw, port); 1920 1921} 1922 1923static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) 1924{ 1925 int i; 1926 1927 gma_write16(hw, port, GM_SMI_DATA, val); 1928 gma_write16(hw, port, GM_SMI_CTRL, 1929 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); 1930 for (i = 0; i < PHY_RETRIES; i++) { 1931 udelay(1); 1932 1933 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) 1934 return 0; 1935 } 1936 1937 printk(KERN_WARNING PFX "%s: phy write timeout\n", 1938 hw->dev[port]->name); 1939 return -EIO; 1940} 1941 1942static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) 1943{ 1944 int i; 1945 1946 gma_write16(hw, port, GM_SMI_CTRL, 1947 GM_SMI_CT_PHY_AD(hw->phy_addr) 1948 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 1949 1950 for (i = 0; i < PHY_RETRIES; i++) { 1951 udelay(1); 1952 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) 1953 goto ready; 1954 } 1955 1956 return -ETIMEDOUT; 1957 ready: 1958 *val = gma_read16(hw, port, GM_SMI_DATA); 1959 return 0; 1960} 1961 1962static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) 1963{ 1964 u16 v = 0; 1965 if (__gm_phy_read(hw, port, reg, &v)) 1966 printk(KERN_WARNING PFX "%s: phy read timeout\n", 1967 hw->dev[port]->name); 1968 return v; 1969} 1970 1971/* Marvell Phy Initialization */ 1972static void yukon_init(struct skge_hw *hw, int port) 1973{ 1974 struct skge_port *skge = netdev_priv(hw->dev[port]); 1975 u16 ctrl, ct1000, adv; 1976 1977 if (skge->autoneg == AUTONEG_ENABLE) { 1978 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 1979 1980 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | 1981 PHY_M_EC_MAC_S_MSK); 1982 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); 1983 1984 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); 1985 1986 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); 1987 } 1988 1989 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); 1990 if (skge->autoneg == AUTONEG_DISABLE) 1991 ctrl &= ~PHY_CT_ANE; 1992 1993 ctrl |= PHY_CT_RESET; 1994 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 1995 1996 ctrl = 0; 1997 ct1000 = 0; 1998 adv = PHY_AN_CSMA; 1999 2000 if (skge->autoneg == AUTONEG_ENABLE) { 2001 if (hw->copper) { 2002 if (skge->advertising & ADVERTISED_1000baseT_Full) 2003 ct1000 |= PHY_M_1000C_AFD; 2004 if (skge->advertising & ADVERTISED_1000baseT_Half) 2005 ct1000 |= PHY_M_1000C_AHD; 2006 if (skge->advertising & ADVERTISED_100baseT_Full) 2007 adv |= PHY_M_AN_100_FD; 2008 if (skge->advertising & ADVERTISED_100baseT_Half) 2009 adv |= PHY_M_AN_100_HD; 2010 if (skge->advertising & ADVERTISED_10baseT_Full) 2011 adv |= PHY_M_AN_10_FD; 2012 if (skge->advertising & ADVERTISED_10baseT_Half) 2013 adv |= PHY_M_AN_10_HD; 2014 2015 /* Set Flow-control capabilities */ 2016 adv |= phy_pause_map[skge->flow_control]; 2017 } else { 2018 if (skge->advertising & ADVERTISED_1000baseT_Full) 2019 adv |= PHY_M_AN_1000X_AFD; 2020 if (skge->advertising & ADVERTISED_1000baseT_Half) 2021 adv |= PHY_M_AN_1000X_AHD; 2022 2023 adv |= fiber_pause_map[skge->flow_control]; 2024 } 2025 2026 /* Restart Auto-negotiation */ 2027 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; 2028 } else { 2029 /* forced speed/duplex settings */ 2030 ct1000 = PHY_M_1000C_MSE; 2031 2032 if (skge->duplex == DUPLEX_FULL) 2033 ctrl |= PHY_CT_DUP_MD; 2034 2035 switch (skge->speed) { 2036 case SPEED_1000: 2037 ctrl |= PHY_CT_SP1000; 2038 break; 2039 case SPEED_100: 2040 ctrl |= PHY_CT_SP100; 2041 break; 2042 } 2043 2044 ctrl |= PHY_CT_RESET; 2045 } 2046 2047 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); 2048 2049 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); 2050 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 2051 2052 /* Enable phy interrupt on autonegotiation complete (or link up) */ 2053 if (skge->autoneg == AUTONEG_ENABLE) 2054 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK); 2055 else 2056 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); 2057} 2058 2059static void yukon_reset(struct skge_hw *hw, int port) 2060{ 2061 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ 2062 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ 2063 gma_write16(hw, port, GM_MC_ADDR_H2, 0); 2064 gma_write16(hw, port, GM_MC_ADDR_H3, 0); 2065 gma_write16(hw, port, GM_MC_ADDR_H4, 0); 2066 2067 gma_write16(hw, port, GM_RX_CTRL, 2068 gma_read16(hw, port, GM_RX_CTRL) 2069 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 2070} 2071 2072/* Apparently, early versions of Yukon-Lite had wrong chip_id? */ 2073static int is_yukon_lite_a0(struct skge_hw *hw) 2074{ 2075 u32 reg; 2076 int ret; 2077 2078 if (hw->chip_id != CHIP_ID_YUKON) 2079 return 0; 2080 2081 reg = skge_read32(hw, B2_FAR); 2082 skge_write8(hw, B2_FAR + 3, 0xff); 2083 ret = (skge_read8(hw, B2_FAR + 3) != 0); 2084 skge_write32(hw, B2_FAR, reg); 2085 return ret; 2086} 2087 2088static void yukon_mac_init(struct skge_hw *hw, int port) 2089{ 2090 struct skge_port *skge = netdev_priv(hw->dev[port]); 2091 int i; 2092 u32 reg; 2093 const u8 *addr = hw->dev[port]->dev_addr; 2094 2095 /* WA code for COMA mode -- set PHY reset */ 2096 if (hw->chip_id == CHIP_ID_YUKON_LITE && 2097 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { 2098 reg = skge_read32(hw, B2_GP_IO); 2099 reg |= GP_DIR_9 | GP_IO_9; 2100 skge_write32(hw, B2_GP_IO, reg); 2101 } 2102 2103 /* hard reset */ 2104 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 2105 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); 2106 2107 /* WA code for COMA mode -- clear PHY reset */ 2108 if (hw->chip_id == CHIP_ID_YUKON_LITE && 2109 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { 2110 reg = skge_read32(hw, B2_GP_IO); 2111 reg |= GP_DIR_9; 2112 reg &= ~GP_IO_9; 2113 skge_write32(hw, B2_GP_IO, reg); 2114 } 2115 2116 /* Set hardware config mode */ 2117 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | 2118 GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE; 2119 reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; 2120 2121 /* Clear GMC reset */ 2122 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); 2123 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); 2124 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); 2125 2126 if (skge->autoneg == AUTONEG_DISABLE) { 2127 reg = GM_GPCR_AU_ALL_DIS; 2128 gma_write16(hw, port, GM_GP_CTRL, 2129 gma_read16(hw, port, GM_GP_CTRL) | reg); 2130 2131 switch (skge->speed) { 2132 case SPEED_1000: 2133 reg &= ~GM_GPCR_SPEED_100; 2134 reg |= GM_GPCR_SPEED_1000; 2135 break; 2136 case SPEED_100: 2137 reg &= ~GM_GPCR_SPEED_1000; 2138 reg |= GM_GPCR_SPEED_100; 2139 break; 2140 case SPEED_10: 2141 reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); 2142 break; 2143 } 2144 2145 if (skge->duplex == DUPLEX_FULL) 2146 reg |= GM_GPCR_DUP_FULL; 2147 } else 2148 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; 2149 2150 switch (skge->flow_control) { 2151 case FLOW_MODE_NONE: 2152 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 2153 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 2154 break; 2155 case FLOW_MODE_LOC_SEND: 2156 /* disable Rx flow-control */ 2157 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 2158 break; 2159 case FLOW_MODE_SYMMETRIC: 2160 case FLOW_MODE_SYM_OR_REM: 2161 /* enable Tx & Rx flow-control */ 2162 break; 2163 } 2164 2165 gma_write16(hw, port, GM_GP_CTRL, reg); 2166 skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); 2167 2168 yukon_init(hw, port); 2169 2170 /* MIB clear */ 2171 reg = gma_read16(hw, port, GM_PHY_ADDR); 2172 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); 2173 2174 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 2175 gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); 2176 gma_write16(hw, port, GM_PHY_ADDR, reg); 2177 2178 /* transmit control */ 2179 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 2180 2181 /* receive control reg: unicast + multicast + no FCS */ 2182 gma_write16(hw, port, GM_RX_CTRL, 2183 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); 2184 2185 /* transmit flow control */ 2186 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); 2187 2188 /* transmit parameter */ 2189 gma_write16(hw, port, GM_TX_PARAM, 2190 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | 2191 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 2192 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); 2193 2194 /* configure the Serial Mode Register */ 2195 reg = DATA_BLIND_VAL(DATA_BLIND_DEF) 2196 | GM_SMOD_VLAN_ENA 2197 | IPG_DATA_VAL(IPG_DATA_DEF); 2198 2199 if (hw->dev[port]->mtu > ETH_DATA_LEN) 2200 reg |= GM_SMOD_JUMBO_ENA; 2201 2202 gma_write16(hw, port, GM_SERIAL_MODE, reg); 2203 2204 /* physical address: used for pause frames */ 2205 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); 2206 /* virtual address for data */ 2207 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); 2208 2209 /* enable interrupt mask for counter overflows */ 2210 gma_write16(hw, port, GM_TX_IRQ_MSK, 0); 2211 gma_write16(hw, port, GM_RX_IRQ_MSK, 0); 2212 gma_write16(hw, port, GM_TR_IRQ_MSK, 0); 2213 2214 /* Initialize Mac Fifo */ 2215 2216 /* Configure Rx MAC FIFO */ 2217 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); 2218 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 2219 2220 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ 2221 if (is_yukon_lite_a0(hw)) 2222 reg &= ~GMF_RX_F_FL_ON; 2223 2224 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 2225 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); 2226 /* 2227 * because Pause Packet Truncation in GMAC is not working 2228 * we have to increase the Flush Threshold to 64 bytes 2229 * in order to flush pause packets in Rx FIFO on Yukon-1 2230 */ 2231 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1); 2232 2233 /* Configure Tx MAC FIFO */ 2234 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 2235 skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 2236} 2237 2238/* Go into power down mode */ 2239static void yukon_suspend(struct skge_hw *hw, int port) 2240{ 2241 u16 ctrl; 2242 2243 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); 2244 ctrl |= PHY_M_PC_POL_R_DIS; 2245 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); 2246 2247 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); 2248 ctrl |= PHY_CT_RESET; 2249 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 2250 2251 /* switch IEEE compatible power down mode on */ 2252 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); 2253 ctrl |= PHY_CT_PDOWN; 2254 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 2255} 2256 2257static void yukon_stop(struct skge_port *skge) 2258{ 2259 struct skge_hw *hw = skge->hw; 2260 int port = skge->port; 2261 2262 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); 2263 yukon_reset(hw, port); 2264 2265 gma_write16(hw, port, GM_GP_CTRL, 2266 gma_read16(hw, port, GM_GP_CTRL) 2267 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); 2268 gma_read16(hw, port, GM_GP_CTRL); 2269 2270 yukon_suspend(hw, port); 2271 2272 /* set GPHY Control reset */ 2273 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 2274 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); 2275} 2276 2277static void yukon_get_stats(struct skge_port *skge, u64 *data) 2278{ 2279 struct skge_hw *hw = skge->hw; 2280 int port = skge->port; 2281 int i; 2282 2283 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 2284 | gma_read32(hw, port, GM_TXO_OK_LO); 2285 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 2286 | gma_read32(hw, port, GM_RXO_OK_LO); 2287 2288 for (i = 2; i < ARRAY_SIZE(skge_stats); i++) 2289 data[i] = gma_read32(hw, port, 2290 skge_stats[i].gma_offset); 2291} 2292 2293static void yukon_mac_intr(struct skge_hw *hw, int port) 2294{ 2295 struct net_device *dev = hw->dev[port]; 2296 struct skge_port *skge = netdev_priv(dev); 2297 u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); 2298 2299 if (netif_msg_intr(skge)) 2300 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n", 2301 dev->name, status); 2302 2303 if (status & GM_IS_RX_FF_OR) { 2304 ++dev->stats.rx_fifo_errors; 2305 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); 2306 } 2307 2308 if (status & GM_IS_TX_FF_UR) { 2309 ++dev->stats.tx_fifo_errors; 2310 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); 2311 } 2312 2313} 2314 2315static u16 yukon_speed(const struct skge_hw *hw, u16 aux) 2316{ 2317 switch (aux & PHY_M_PS_SPEED_MSK) { 2318 case PHY_M_PS_SPEED_1000: 2319 return SPEED_1000; 2320 case PHY_M_PS_SPEED_100: 2321 return SPEED_100; 2322 default: 2323 return SPEED_10; 2324 } 2325} 2326 2327static void yukon_link_up(struct skge_port *skge) 2328{ 2329 struct skge_hw *hw = skge->hw; 2330 int port = skge->port; 2331 u16 reg; 2332 2333 /* Enable Transmit FIFO Underrun */ 2334 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); 2335 2336 reg = gma_read16(hw, port, GM_GP_CTRL); 2337 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) 2338 reg |= GM_GPCR_DUP_FULL; 2339 2340 /* enable Rx/Tx */ 2341 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 2342 gma_write16(hw, port, GM_GP_CTRL, reg); 2343 2344 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); 2345 skge_link_up(skge); 2346} 2347 2348static void yukon_link_down(struct skge_port *skge) 2349{ 2350 struct skge_hw *hw = skge->hw; 2351 int port = skge->port; 2352 u16 ctrl; 2353 2354 ctrl = gma_read16(hw, port, GM_GP_CTRL); 2355 ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 2356 gma_write16(hw, port, GM_GP_CTRL, ctrl); 2357 2358 if (skge->flow_status == FLOW_STAT_REM_SEND) { 2359 ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); 2360 ctrl |= PHY_M_AN_ASP; 2361 /* restore Asymmetric Pause bit */ 2362 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl); 2363 } 2364 2365 skge_link_down(skge); 2366 2367 yukon_init(hw, port); 2368} 2369 2370static void yukon_phy_intr(struct skge_port *skge) 2371{ 2372 struct skge_hw *hw = skge->hw; 2373 int port = skge->port; 2374 const char *reason = NULL; 2375 u16 istatus, phystat; 2376 2377 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); 2378 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); 2379 2380 if (netif_msg_intr(skge)) 2381 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x 0x%x\n", 2382 skge->netdev->name, istatus, phystat); 2383 2384 if (istatus & PHY_M_IS_AN_COMPL) { 2385 if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP) 2386 & PHY_M_AN_RF) { 2387 reason = "remote fault"; 2388 goto failed; 2389 } 2390 2391 if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) { 2392 reason = "master/slave fault"; 2393 goto failed; 2394 } 2395 2396 if (!(phystat & PHY_M_PS_SPDUP_RES)) { 2397 reason = "speed/duplex"; 2398 goto failed; 2399 } 2400 2401 skge->duplex = (phystat & PHY_M_PS_FULL_DUP) 2402 ? DUPLEX_FULL : DUPLEX_HALF; 2403 skge->speed = yukon_speed(hw, phystat); 2404 2405 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 2406 switch (phystat & PHY_M_PS_PAUSE_MSK) { 2407 case PHY_M_PS_PAUSE_MSK: 2408 skge->flow_status = FLOW_STAT_SYMMETRIC; 2409 break; 2410 case PHY_M_PS_RX_P_EN: 2411 skge->flow_status = FLOW_STAT_REM_SEND; 2412 break; 2413 case PHY_M_PS_TX_P_EN: 2414 skge->flow_status = FLOW_STAT_LOC_SEND; 2415 break; 2416 default: 2417 skge->flow_status = FLOW_STAT_NONE; 2418 } 2419 2420 if (skge->flow_status == FLOW_STAT_NONE || 2421 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) 2422 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 2423 else 2424 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); 2425 yukon_link_up(skge); 2426 return; 2427 } 2428 2429 if (istatus & PHY_M_IS_LSP_CHANGE) 2430 skge->speed = yukon_speed(hw, phystat); 2431 2432 if (istatus & PHY_M_IS_DUP_CHANGE) 2433 skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; 2434 if (istatus & PHY_M_IS_LST_CHANGE) { 2435 if (phystat & PHY_M_PS_LINK_UP) 2436 yukon_link_up(skge); 2437 else 2438 yukon_link_down(skge); 2439 } 2440 return; 2441 failed: 2442 printk(KERN_ERR PFX "%s: autonegotiation failed (%s)\n", 2443 skge->netdev->name, reason); 2444 2445 /* XXX restart autonegotiation? */ 2446} 2447 2448static void skge_phy_reset(struct skge_port *skge) 2449{ 2450 struct skge_hw *hw = skge->hw; 2451 int port = skge->port; 2452 struct net_device *dev = hw->dev[port]; 2453 2454 netif_stop_queue(skge->netdev); 2455 netif_carrier_off(skge->netdev); 2456 2457 spin_lock_bh(&hw->phy_lock); 2458 if (hw->chip_id == CHIP_ID_GENESIS) { 2459 genesis_reset(hw, port); 2460 genesis_mac_init(hw, port); 2461 } else { 2462 yukon_reset(hw, port); 2463 yukon_init(hw, port); 2464 } 2465 spin_unlock_bh(&hw->phy_lock); 2466 2467 skge_set_multicast(dev); 2468} 2469 2470/* Basic MII support */ 2471static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2472{ 2473 struct mii_ioctl_data *data = if_mii(ifr); 2474 struct skge_port *skge = netdev_priv(dev); 2475 struct skge_hw *hw = skge->hw; 2476 int err = -EOPNOTSUPP; 2477 2478 if (!netif_running(dev)) 2479 return -ENODEV; /* Phy still in reset */ 2480 2481 switch(cmd) { 2482 case SIOCGMIIPHY: 2483 data->phy_id = hw->phy_addr; 2484 2485 /* fallthru */ 2486 case SIOCGMIIREG: { 2487 u16 val = 0; 2488 spin_lock_bh(&hw->phy_lock); 2489 if (hw->chip_id == CHIP_ID_GENESIS) 2490 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2491 else 2492 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2493 spin_unlock_bh(&hw->phy_lock); 2494 data->val_out = val; 2495 break; 2496 } 2497 2498 case SIOCSMIIREG: 2499 if (!capable(CAP_NET_ADMIN)) 2500 return -EPERM; 2501 2502 spin_lock_bh(&hw->phy_lock); 2503 if (hw->chip_id == CHIP_ID_GENESIS) 2504 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2505 data->val_in); 2506 else 2507 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2508 data->val_in); 2509 spin_unlock_bh(&hw->phy_lock); 2510 break; 2511 } 2512 return err; 2513} 2514 2515static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) 2516{ 2517 u32 end; 2518 2519 start /= 8; 2520 len /= 8; 2521 end = start + len - 1; 2522 2523 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); 2524 skge_write32(hw, RB_ADDR(q, RB_START), start); 2525 skge_write32(hw, RB_ADDR(q, RB_WP), start); 2526 skge_write32(hw, RB_ADDR(q, RB_RP), start); 2527 skge_write32(hw, RB_ADDR(q, RB_END), end); 2528 2529 if (q == Q_R1 || q == Q_R2) { 2530 /* Set thresholds on receive queue's */ 2531 skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), 2532 start + (2*len)/3); 2533 skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), 2534 start + (len/3)); 2535 } else { 2536 /* Enable store & forward on Tx queue's because 2537 * Tx FIFO is only 4K on Genesis and 1K on Yukon 2538 */ 2539 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); 2540 } 2541 2542 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); 2543} 2544 2545/* Setup Bus Memory Interface */ 2546static void skge_qset(struct skge_port *skge, u16 q, 2547 const struct skge_element *e) 2548{ 2549 struct skge_hw *hw = skge->hw; 2550 u32 watermark = 0x600; 2551 u64 base = skge->dma + (e->desc - skge->mem); 2552 2553 /* optimization to reduce window on 32bit/33mhz */ 2554 if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0) 2555 watermark /= 2; 2556 2557 skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET); 2558 skge_write32(hw, Q_ADDR(q, Q_F), watermark); 2559 skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32)); 2560 skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base); 2561} 2562 2563static int skge_up(struct net_device *dev) 2564{ 2565 struct skge_port *skge = netdev_priv(dev); 2566 struct skge_hw *hw = skge->hw; 2567 int port = skge->port; 2568 u32 chunk, ram_addr; 2569 size_t rx_size, tx_size; 2570 int err; 2571 2572 if (!is_valid_ether_addr(dev->dev_addr)) 2573 return -EINVAL; 2574 2575 if (netif_msg_ifup(skge)) 2576 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 2577 2578 if (dev->mtu > RX_BUF_SIZE) 2579 skge->rx_buf_size = dev->mtu + ETH_HLEN; 2580 else 2581 skge->rx_buf_size = RX_BUF_SIZE; 2582 2583 2584 rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); 2585 tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); 2586 skge->mem_size = tx_size + rx_size; 2587 skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma); 2588 if (!skge->mem) 2589 return -ENOMEM; 2590 2591 BUG_ON(skge->dma & 7); 2592 2593 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { 2594 dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); 2595 err = -EINVAL; 2596 goto free_pci_mem; 2597 } 2598 2599 memset(skge->mem, 0, skge->mem_size); 2600 2601 err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); 2602 if (err) 2603 goto free_pci_mem; 2604 2605 err = skge_rx_fill(dev); 2606 if (err) 2607 goto free_rx_ring; 2608 2609 err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, 2610 skge->dma + rx_size); 2611 if (err) 2612 goto free_rx_ring; 2613 2614 /* Initialize MAC */ 2615 spin_lock_bh(&hw->phy_lock); 2616 if (hw->chip_id == CHIP_ID_GENESIS) 2617 genesis_mac_init(hw, port); 2618 else 2619 yukon_mac_init(hw, port); 2620 spin_unlock_bh(&hw->phy_lock); 2621 2622 /* Configure RAMbuffers - equally between ports and tx/rx */ 2623 chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); 2624 ram_addr = hw->ram_offset + 2 * chunk * port; 2625 2626 skge_ramset(hw, rxqaddr[port], ram_addr, chunk); 2627 skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); 2628 2629 BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); 2630 skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk); 2631 skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); 2632 2633 /* Start receiver BMU */ 2634 wmb(); 2635 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); 2636 skge_led(skge, LED_MODE_ON); 2637 2638 spin_lock_irq(&hw->hw_lock); 2639 hw->intr_mask |= portmask[port]; 2640 skge_write32(hw, B0_IMSK, hw->intr_mask); 2641 spin_unlock_irq(&hw->hw_lock); 2642 2643 napi_enable(&skge->napi); 2644 return 0; 2645 2646 free_rx_ring: 2647 skge_rx_clean(skge); 2648 kfree(skge->rx_ring.start); 2649 free_pci_mem: 2650 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); 2651 skge->mem = NULL; 2652 2653 return err; 2654} 2655 2656/* stop receiver */ 2657static void skge_rx_stop(struct skge_hw *hw, int port) 2658{ 2659 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); 2660 skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), 2661 RB_RST_SET|RB_DIS_OP_MD); 2662 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); 2663} 2664 2665static int skge_down(struct net_device *dev) 2666{ 2667 struct skge_port *skge = netdev_priv(dev); 2668 struct skge_hw *hw = skge->hw; 2669 int port = skge->port; 2670 2671 if (skge->mem == NULL) 2672 return 0; 2673 2674 if (netif_msg_ifdown(skge)) 2675 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 2676 2677 netif_tx_disable(dev); 2678 2679 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) 2680 del_timer_sync(&skge->link_timer); 2681 2682 napi_disable(&skge->napi); 2683 netif_carrier_off(dev); 2684 2685 spin_lock_irq(&hw->hw_lock); 2686 hw->intr_mask &= ~portmask[port]; 2687 skge_write32(hw, B0_IMSK, hw->intr_mask); 2688 spin_unlock_irq(&hw->hw_lock); 2689 2690 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); 2691 if (hw->chip_id == CHIP_ID_GENESIS) 2692 genesis_stop(skge); 2693 else 2694 yukon_stop(skge); 2695 2696 /* Stop transmitter */ 2697 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2698 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), 2699 RB_RST_SET|RB_DIS_OP_MD); 2700 2701 2702 /* Disable Force Sync bit and Enable Alloc bit */ 2703 skge_write8(hw, SK_REG(port, TXA_CTRL), 2704 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 2705 2706 /* Stop Interval Timer and Limit Counter of Tx Arbiter */ 2707 skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); 2708 skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); 2709 2710 /* Reset PCI FIFO */ 2711 skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); 2712 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); 2713 2714 /* Reset the RAM Buffer async Tx queue */ 2715 skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); 2716 2717 skge_rx_stop(hw, port); 2718 2719 if (hw->chip_id == CHIP_ID_GENESIS) { 2720 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); 2721 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET); 2722 } else { 2723 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 2724 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 2725 } 2726 2727 skge_led(skge, LED_MODE_OFF); 2728 2729 netif_tx_lock_bh(dev); 2730 skge_tx_clean(dev); 2731 netif_tx_unlock_bh(dev); 2732 2733 skge_rx_clean(skge); 2734 2735 kfree(skge->rx_ring.start); 2736 kfree(skge->tx_ring.start); 2737 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); 2738 skge->mem = NULL; 2739 return 0; 2740} 2741 2742static inline int skge_avail(const struct skge_ring *ring) 2743{ 2744 smp_mb(); 2745 return ((ring->to_clean > ring->to_use) ? 0 : ring->count) 2746 + (ring->to_clean - ring->to_use) - 1; 2747} 2748 2749static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) 2750{ 2751 struct skge_port *skge = netdev_priv(dev); 2752 struct skge_hw *hw = skge->hw; 2753 struct skge_element *e; 2754 struct skge_tx_desc *td; 2755 int i; 2756 u32 control, len; 2757 u64 map; 2758 2759 if (skb_padto(skb, ETH_ZLEN)) 2760 return NETDEV_TX_OK; 2761 2762 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) 2763 return NETDEV_TX_BUSY; 2764 2765 e = skge->tx_ring.to_use; 2766 td = e->desc; 2767 BUG_ON(td->control & BMU_OWN); 2768 e->skb = skb; 2769 len = skb_headlen(skb); 2770 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 2771 pci_unmap_addr_set(e, mapaddr, map); 2772 pci_unmap_len_set(e, maplen, len); 2773 2774 td->dma_lo = map; 2775 td->dma_hi = map >> 32; 2776 2777 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2778 const int offset = skb_transport_offset(skb); 2779 2780 /* This seems backwards, but it is what the sk98lin 2781 * does. Looks like hardware is wrong? 2782 */ 2783 if (ipip_hdr(skb)->protocol == IPPROTO_UDP 2784 && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) 2785 control = BMU_TCP_CHECK; 2786 else 2787 control = BMU_UDP_CHECK; 2788 2789 td->csum_offs = 0; 2790 td->csum_start = offset; 2791 td->csum_write = offset + skb->csum_offset; 2792 } else 2793 control = BMU_CHECK; 2794 2795 if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */ 2796 control |= BMU_EOF| BMU_IRQ_EOF; 2797 else { 2798 struct skge_tx_desc *tf = td; 2799 2800 control |= BMU_STFWD; 2801 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2802 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2803 2804 map = pci_map_page(hw->pdev, frag->page, frag->page_offset, 2805 frag->size, PCI_DMA_TODEVICE); 2806 2807 e = e->next; 2808 e->skb = skb; 2809 tf = e->desc; 2810 BUG_ON(tf->control & BMU_OWN); 2811 2812 tf->dma_lo = map; 2813 tf->dma_hi = (u64) map >> 32; 2814 pci_unmap_addr_set(e, mapaddr, map); 2815 pci_unmap_len_set(e, maplen, frag->size); 2816 2817 tf->control = BMU_OWN | BMU_SW | control | frag->size; 2818 } 2819 tf->control |= BMU_EOF | BMU_IRQ_EOF; 2820 } 2821 /* Make sure all the descriptors written */ 2822 wmb(); 2823 td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; 2824 wmb(); 2825 2826 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); 2827 2828 if (unlikely(netif_msg_tx_queued(skge))) 2829 printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n", 2830 dev->name, e - skge->tx_ring.start, skb->len); 2831 2832 skge->tx_ring.to_use = e->next; 2833 smp_wmb(); 2834 2835 if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { 2836 pr_debug("%s: transmit queue full\n", dev->name); 2837 netif_stop_queue(dev); 2838 } 2839 2840 dev->trans_start = jiffies; 2841 2842 return NETDEV_TX_OK; 2843} 2844 2845 2846/* Free resources associated with this reing element */ 2847static void skge_tx_free(struct skge_port *skge, struct skge_element *e, 2848 u32 control) 2849{ 2850 struct pci_dev *pdev = skge->hw->pdev; 2851 2852 /* skb header vs. fragment */ 2853 if (control & BMU_STF) 2854 pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr), 2855 pci_unmap_len(e, maplen), 2856 PCI_DMA_TODEVICE); 2857 else 2858 pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr), 2859 pci_unmap_len(e, maplen), 2860 PCI_DMA_TODEVICE); 2861 2862 if (control & BMU_EOF) { 2863 if (unlikely(netif_msg_tx_done(skge))) 2864 printk(KERN_DEBUG PFX "%s: tx done slot %td\n", 2865 skge->netdev->name, e - skge->tx_ring.start); 2866 2867 dev_kfree_skb(e->skb); 2868 } 2869} 2870 2871/* Free all buffers in transmit ring */ 2872static void skge_tx_clean(struct net_device *dev) 2873{ 2874 struct skge_port *skge = netdev_priv(dev); 2875 struct skge_element *e; 2876 2877 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { 2878 struct skge_tx_desc *td = e->desc; 2879 skge_tx_free(skge, e, td->control); 2880 td->control = 0; 2881 } 2882 2883 skge->tx_ring.to_clean = e; 2884} 2885 2886static void skge_tx_timeout(struct net_device *dev) 2887{ 2888 struct skge_port *skge = netdev_priv(dev); 2889 2890 if (netif_msg_timer(skge)) 2891 printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name); 2892 2893 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); 2894 skge_tx_clean(dev); 2895 netif_wake_queue(dev); 2896} 2897 2898static int skge_change_mtu(struct net_device *dev, int new_mtu) 2899{ 2900 int err; 2901 2902 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 2903 return -EINVAL; 2904 2905 if (!netif_running(dev)) { 2906 dev->mtu = new_mtu; 2907 return 0; 2908 } 2909 2910 skge_down(dev); 2911 2912 dev->mtu = new_mtu; 2913 2914 err = skge_up(dev); 2915 if (err) 2916 dev_close(dev); 2917 2918 return err; 2919} 2920 2921static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; 2922 2923static void genesis_add_filter(u8 filter[8], const u8 *addr) 2924{ 2925 u32 crc, bit; 2926 2927 crc = ether_crc_le(ETH_ALEN, addr); 2928 bit = ~crc & 0x3f; 2929 filter[bit/8] |= 1 << (bit%8); 2930} 2931 2932static void genesis_set_multicast(struct net_device *dev) 2933{ 2934 struct skge_port *skge = netdev_priv(dev); 2935 struct skge_hw *hw = skge->hw; 2936 int port = skge->port; 2937 int i, count = dev->mc_count; 2938 struct dev_mc_list *list = dev->mc_list; 2939 u32 mode; 2940 u8 filter[8]; 2941 2942 mode = xm_read32(hw, port, XM_MODE); 2943 mode |= XM_MD_ENA_HASH; 2944 if (dev->flags & IFF_PROMISC) 2945 mode |= XM_MD_ENA_PROM; 2946 else 2947 mode &= ~XM_MD_ENA_PROM; 2948 2949 if (dev->flags & IFF_ALLMULTI) 2950 memset(filter, 0xff, sizeof(filter)); 2951 else { 2952 memset(filter, 0, sizeof(filter)); 2953 2954 if (skge->flow_status == FLOW_STAT_REM_SEND 2955 || skge->flow_status == FLOW_STAT_SYMMETRIC) 2956 genesis_add_filter(filter, pause_mc_addr); 2957 2958 for (i = 0; list && i < count; i++, list = list->next) 2959 genesis_add_filter(filter, list->dmi_addr); 2960 } 2961 2962 xm_write32(hw, port, XM_MODE, mode); 2963 xm_outhash(hw, port, XM_HSM, filter); 2964} 2965 2966static void yukon_add_filter(u8 filter[8], const u8 *addr) 2967{ 2968 u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f; 2969 filter[bit/8] |= 1 << (bit%8); 2970} 2971 2972static void yukon_set_multicast(struct net_device *dev) 2973{ 2974 struct skge_port *skge = netdev_priv(dev); 2975 struct skge_hw *hw = skge->hw; 2976 int port = skge->port; 2977 struct dev_mc_list *list = dev->mc_list; 2978 int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND 2979 || skge->flow_status == FLOW_STAT_SYMMETRIC); 2980 u16 reg; 2981 u8 filter[8]; 2982 2983 memset(filter, 0, sizeof(filter)); 2984 2985 reg = gma_read16(hw, port, GM_RX_CTRL); 2986 reg |= GM_RXCR_UCF_ENA; 2987 2988 if (dev->flags & IFF_PROMISC) /* promiscuous */ 2989 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 2990 else if (dev->flags & IFF_ALLMULTI) /* all multicast */ 2991 memset(filter, 0xff, sizeof(filter)); 2992 else if (dev->mc_count == 0 && !rx_pause)/* no multicast */ 2993 reg &= ~GM_RXCR_MCF_ENA; 2994 else { 2995 int i; 2996 reg |= GM_RXCR_MCF_ENA; 2997 2998 if (rx_pause) 2999 yukon_add_filter(filter, pause_mc_addr); 3000 3001 for (i = 0; list && i < dev->mc_count; i++, list = list->next) 3002 yukon_add_filter(filter, list->dmi_addr); 3003 } 3004 3005 3006 gma_write16(hw, port, GM_MC_ADDR_H1, 3007 (u16)filter[0] | ((u16)filter[1] << 8)); 3008 gma_write16(hw, port, GM_MC_ADDR_H2, 3009 (u16)filter[2] | ((u16)filter[3] << 8)); 3010 gma_write16(hw, port, GM_MC_ADDR_H3, 3011 (u16)filter[4] | ((u16)filter[5] << 8)); 3012 gma_write16(hw, port, GM_MC_ADDR_H4, 3013 (u16)filter[6] | ((u16)filter[7] << 8)); 3014 3015 gma_write16(hw, port, GM_RX_CTRL, reg); 3016} 3017 3018static inline u16 phy_length(const struct skge_hw *hw, u32 status) 3019{ 3020 if (hw->chip_id == CHIP_ID_GENESIS) 3021 return status >> XMR_FS_LEN_SHIFT; 3022 else 3023 return status >> GMR_FS_LEN_SHIFT; 3024} 3025 3026static inline int bad_phy_status(const struct skge_hw *hw, u32 status) 3027{ 3028 if (hw->chip_id == CHIP_ID_GENESIS) 3029 return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0; 3030 else 3031 return (status & GMR_FS_ANY_ERR) || 3032 (status & GMR_FS_RX_OK) == 0; 3033} 3034 3035static void skge_set_multicast(struct net_device *dev) 3036{ 3037 struct skge_port *skge = netdev_priv(dev); 3038 struct skge_hw *hw = skge->hw; 3039 3040 if (hw->chip_id == CHIP_ID_GENESIS) 3041 genesis_set_multicast(dev); 3042 else 3043 yukon_set_multicast(dev); 3044 3045} 3046 3047 3048/* Get receive buffer from descriptor. 3049 * Handles copy of small buffers and reallocation failures 3050 */ 3051static struct sk_buff *skge_rx_get(struct net_device *dev, 3052 struct skge_element *e, 3053 u32 control, u32 status, u16 csum) 3054{ 3055 struct skge_port *skge = netdev_priv(dev); 3056 struct sk_buff *skb; 3057 u16 len = control & BMU_BBC; 3058 3059 if (unlikely(netif_msg_rx_status(skge))) 3060 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", 3061 dev->name, e - skge->rx_ring.start, 3062 status, len); 3063 3064 if (len > skge->rx_buf_size) 3065 goto error; 3066 3067 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) 3068 goto error; 3069 3070 if (bad_phy_status(skge->hw, status)) 3071 goto error; 3072 3073 if (phy_length(skge->hw, status) != len) 3074 goto error; 3075 3076 if (len < RX_COPY_THRESHOLD) { 3077 skb = netdev_alloc_skb(dev, len + 2); 3078 if (!skb) 3079 goto resubmit; 3080 3081 skb_reserve(skb, 2); 3082 pci_dma_sync_single_for_cpu(skge->hw->pdev, 3083 pci_unmap_addr(e, mapaddr), 3084 len, PCI_DMA_FROMDEVICE); 3085 skb_copy_from_linear_data(e->skb, skb->data, len); 3086 pci_dma_sync_single_for_device(skge->hw->pdev, 3087 pci_unmap_addr(e, mapaddr), 3088 len, PCI_DMA_FROMDEVICE); 3089 skge_rx_reuse(e, skge->rx_buf_size); 3090 } else { 3091 struct sk_buff *nskb; 3092 nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN); 3093 if (!nskb) 3094 goto resubmit; 3095 3096 skb_reserve(nskb, NET_IP_ALIGN); 3097 pci_unmap_single(skge->hw->pdev, 3098 pci_unmap_addr(e, mapaddr), 3099 pci_unmap_len(e, maplen), 3100 PCI_DMA_FROMDEVICE); 3101 skb = e->skb; 3102 prefetch(skb->data); 3103 skge_rx_setup(skge, e, nskb, skge->rx_buf_size); 3104 } 3105 3106 skb_put(skb, len); 3107 if (skge->rx_csum) { 3108 skb->csum = csum; 3109 skb->ip_summed = CHECKSUM_COMPLETE; 3110 } 3111 3112 skb->protocol = eth_type_trans(skb, dev); 3113 3114 return skb; 3115error: 3116 3117 if (netif_msg_rx_err(skge)) 3118 printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n", 3119 dev->name, e - skge->rx_ring.start, 3120 control, status); 3121 3122 if (skge->hw->chip_id == CHIP_ID_GENESIS) { 3123 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) 3124 dev->stats.rx_length_errors++; 3125 if (status & XMR_FS_FRA_ERR) 3126 dev->stats.rx_frame_errors++; 3127 if (status & XMR_FS_FCS_ERR) 3128 dev->stats.rx_crc_errors++; 3129 } else { 3130 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) 3131 dev->stats.rx_length_errors++; 3132 if (status & GMR_FS_FRAGMENT) 3133 dev->stats.rx_frame_errors++; 3134 if (status & GMR_FS_CRC_ERR) 3135 dev->stats.rx_crc_errors++; 3136 } 3137 3138resubmit: 3139 skge_rx_reuse(e, skge->rx_buf_size); 3140 return NULL; 3141} 3142 3143/* Free all buffers in Tx ring which are no longer owned by device */ 3144static void skge_tx_done(struct net_device *dev) 3145{ 3146 struct skge_port *skge = netdev_priv(dev); 3147 struct skge_ring *ring = &skge->tx_ring; 3148 struct skge_element *e; 3149 3150 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); 3151 3152 for (e = ring->to_clean; e != ring->to_use; e = e->next) { 3153 u32 control = ((const struct skge_tx_desc *) e->desc)->control; 3154 3155 if (control & BMU_OWN) 3156 break; 3157 3158 skge_tx_free(skge, e, control); 3159 } 3160 skge->tx_ring.to_clean = e; 3161 3162 /* Can run lockless until we need to synchronize to restart queue. */ 3163 smp_mb(); 3164 3165 if (unlikely(netif_queue_stopped(dev) && 3166 skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { 3167 netif_tx_lock(dev); 3168 if (unlikely(netif_queue_stopped(dev) && 3169 skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { 3170 netif_wake_queue(dev); 3171 3172 } 3173 netif_tx_unlock(dev); 3174 } 3175} 3176 3177static int skge_poll(struct napi_struct *napi, int to_do) 3178{ 3179 struct skge_port *skge = container_of(napi, struct skge_port, napi); 3180 struct net_device *dev = skge->netdev; 3181 struct skge_hw *hw = skge->hw; 3182 struct skge_ring *ring = &skge->rx_ring; 3183 struct skge_element *e; 3184 int work_done = 0; 3185 3186 skge_tx_done(dev); 3187 3188 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); 3189 3190 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { 3191 struct skge_rx_desc *rd = e->desc; 3192 struct sk_buff *skb; 3193 u32 control; 3194 3195 rmb(); 3196 control = rd->control; 3197 if (control & BMU_OWN) 3198 break; 3199 3200 skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); 3201 if (likely(skb)) { 3202 netif_receive_skb(skb); 3203 3204 ++work_done; 3205 } 3206 } 3207 ring->to_clean = e; 3208 3209 /* restart receiver */ 3210 wmb(); 3211 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); 3212 3213 if (work_done < to_do) { 3214 unsigned long flags; 3215 3216 spin_lock_irqsave(&hw->hw_lock, flags); 3217 __napi_complete(napi); 3218 hw->intr_mask |= napimask[skge->port]; 3219 skge_write32(hw, B0_IMSK, hw->intr_mask); 3220 skge_read32(hw, B0_IMSK); 3221 spin_unlock_irqrestore(&hw->hw_lock, flags); 3222 } 3223 3224 return work_done; 3225} 3226 3227/* Parity errors seem to happen when Genesis is connected to a switch 3228 * with no other ports present. Heartbeat error?? 3229 */ 3230static void skge_mac_parity(struct skge_hw *hw, int port) 3231{ 3232 struct net_device *dev = hw->dev[port]; 3233 3234 ++dev->stats.tx_heartbeat_errors; 3235 3236 if (hw->chip_id == CHIP_ID_GENESIS) 3237 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), 3238 MFF_CLR_PERR); 3239 else 3240 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ 3241 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), 3242 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) 3243 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); 3244} 3245 3246static void skge_mac_intr(struct skge_hw *hw, int port) 3247{ 3248 if (hw->chip_id == CHIP_ID_GENESIS) 3249 genesis_mac_intr(hw, port); 3250 else 3251 yukon_mac_intr(hw, port); 3252} 3253 3254/* Handle device specific framing and timeout interrupts */ 3255static void skge_error_irq(struct skge_hw *hw) 3256{ 3257 struct pci_dev *pdev = hw->pdev; 3258 u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); 3259 3260 if (hw->chip_id == CHIP_ID_GENESIS) { 3261 /* clear xmac errors */ 3262 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) 3263 skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT); 3264 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) 3265 skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT); 3266 } else { 3267 /* Timestamp (unused) overflow */ 3268 if (hwstatus & IS_IRQ_TIST_OV) 3269 skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3270 } 3271 3272 if (hwstatus & IS_RAM_RD_PAR) { 3273 dev_err(&pdev->dev, "Ram read data parity error\n"); 3274 skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); 3275 } 3276 3277 if (hwstatus & IS_RAM_WR_PAR) { 3278 dev_err(&pdev->dev, "Ram write data parity error\n"); 3279 skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); 3280 } 3281 3282 if (hwstatus & IS_M1_PAR_ERR) 3283 skge_mac_parity(hw, 0); 3284 3285 if (hwstatus & IS_M2_PAR_ERR) 3286 skge_mac_parity(hw, 1); 3287 3288 if (hwstatus & IS_R1_PAR_ERR) { 3289 dev_err(&pdev->dev, "%s: receive queue parity error\n", 3290 hw->dev[0]->name); 3291 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); 3292 } 3293 3294 if (hwstatus & IS_R2_PAR_ERR) { 3295 dev_err(&pdev->dev, "%s: receive queue parity error\n", 3296 hw->dev[1]->name); 3297 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); 3298 } 3299 3300 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { 3301 u16 pci_status, pci_cmd; 3302 3303 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 3304 pci_read_config_word(pdev, PCI_STATUS, &pci_status); 3305 3306 dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n", 3307 pci_cmd, pci_status); 3308 3309 /* Write the error bits back to clear them. */ 3310 pci_status &= PCI_STATUS_ERROR_BITS; 3311 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3312 pci_write_config_word(pdev, PCI_COMMAND, 3313 pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); 3314 pci_write_config_word(pdev, PCI_STATUS, pci_status); 3315 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3316 3317 /* if error still set then just ignore it */ 3318 hwstatus = skge_read32(hw, B0_HWE_ISRC); 3319 if (hwstatus & IS_IRQ_STAT) { 3320 dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n"); 3321 hw->intr_mask &= ~IS_HW_ERR; 3322 } 3323 } 3324} 3325 3326/* 3327 * Interrupt from PHY are handled in tasklet (softirq) 3328 * because accessing phy registers requires spin wait which might 3329 * cause excess interrupt latency. 3330 */ 3331static void skge_extirq(unsigned long arg) 3332{ 3333 struct skge_hw *hw = (struct skge_hw *) arg; 3334 int port; 3335 3336 for (port = 0; port < hw->ports; port++) { 3337 struct net_device *dev = hw->dev[port]; 3338 3339 if (netif_running(dev)) { 3340 struct skge_port *skge = netdev_priv(dev); 3341 3342 spin_lock(&hw->phy_lock); 3343 if (hw->chip_id != CHIP_ID_GENESIS) 3344 yukon_phy_intr(skge); 3345 else if (hw->phy_type == SK_PHY_BCOM) 3346 bcom_phy_intr(skge); 3347 spin_unlock(&hw->phy_lock); 3348 } 3349 } 3350 3351 spin_lock_irq(&hw->hw_lock); 3352 hw->intr_mask |= IS_EXT_REG; 3353 skge_write32(hw, B0_IMSK, hw->intr_mask); 3354 skge_read32(hw, B0_IMSK); 3355 spin_unlock_irq(&hw->hw_lock); 3356} 3357 3358static irqreturn_t skge_intr(int irq, void *dev_id) 3359{ 3360 struct skge_hw *hw = dev_id; 3361 u32 status; 3362 int handled = 0; 3363 3364 spin_lock(&hw->hw_lock); 3365 /* Reading this register masks IRQ */ 3366 status = skge_read32(hw, B0_SP_ISRC); 3367 if (status == 0 || status == ~0) 3368 goto out; 3369 3370 handled = 1; 3371 status &= hw->intr_mask; 3372 if (status & IS_EXT_REG) { 3373 hw->intr_mask &= ~IS_EXT_REG; 3374 tasklet_schedule(&hw->phy_task); 3375 } 3376 3377 if (status & (IS_XA1_F|IS_R1_F)) { 3378 struct skge_port *skge = netdev_priv(hw->dev[0]); 3379 hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); 3380 napi_schedule(&skge->napi); 3381 } 3382 3383 if (status & IS_PA_TO_TX1) 3384 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); 3385 3386 if (status & IS_PA_TO_RX1) { 3387 ++hw->dev[0]->stats.rx_over_errors; 3388 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); 3389 } 3390 3391 3392 if (status & IS_MAC1) 3393 skge_mac_intr(hw, 0); 3394 3395 if (hw->dev[1]) { 3396 struct skge_port *skge = netdev_priv(hw->dev[1]); 3397 3398 if (status & (IS_XA2_F|IS_R2_F)) { 3399 hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); 3400 napi_schedule(&skge->napi); 3401 } 3402 3403 if (status & IS_PA_TO_RX2) { 3404 ++hw->dev[1]->stats.rx_over_errors; 3405 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); 3406 } 3407 3408 if (status & IS_PA_TO_TX2) 3409 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2); 3410 3411 if (status & IS_MAC2) 3412 skge_mac_intr(hw, 1); 3413 } 3414 3415 if (status & IS_HW_ERR) 3416 skge_error_irq(hw); 3417 3418 skge_write32(hw, B0_IMSK, hw->intr_mask); 3419 skge_read32(hw, B0_IMSK); 3420out: 3421 spin_unlock(&hw->hw_lock); 3422 3423 return IRQ_RETVAL(handled); 3424} 3425 3426#ifdef CONFIG_NET_POLL_CONTROLLER 3427static void skge_netpoll(struct net_device *dev) 3428{ 3429 struct skge_port *skge = netdev_priv(dev); 3430 3431 disable_irq(dev->irq); 3432 skge_intr(dev->irq, skge->hw); 3433 enable_irq(dev->irq); 3434} 3435#endif 3436 3437static int skge_set_mac_address(struct net_device *dev, void *p) 3438{ 3439 struct skge_port *skge = netdev_priv(dev); 3440 struct skge_hw *hw = skge->hw; 3441 unsigned port = skge->port; 3442 const struct sockaddr *addr = p; 3443 u16 ctrl; 3444 3445 if (!is_valid_ether_addr(addr->sa_data)) 3446 return -EADDRNOTAVAIL; 3447 3448 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 3449 3450 if (!netif_running(dev)) { 3451 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); 3452 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); 3453 } else { 3454 /* disable Rx */ 3455 spin_lock_bh(&hw->phy_lock); 3456 ctrl = gma_read16(hw, port, GM_GP_CTRL); 3457 gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); 3458 3459 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); 3460 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); 3461 3462 if (hw->chip_id == CHIP_ID_GENESIS) 3463 xm_outaddr(hw, port, XM_SA, dev->dev_addr); 3464 else { 3465 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); 3466 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); 3467 } 3468 3469 gma_write16(hw, port, GM_GP_CTRL, ctrl); 3470 spin_unlock_bh(&hw->phy_lock); 3471 } 3472 3473 return 0; 3474} 3475 3476static const struct { 3477 u8 id; 3478 const char *name; 3479} skge_chips[] = { 3480 { CHIP_ID_GENESIS, "Genesis" }, 3481 { CHIP_ID_YUKON, "Yukon" }, 3482 { CHIP_ID_YUKON_LITE, "Yukon-Lite"}, 3483 { CHIP_ID_YUKON_LP, "Yukon-LP"}, 3484}; 3485 3486static const char *skge_board_name(const struct skge_hw *hw) 3487{ 3488 int i; 3489 static char buf[16]; 3490 3491 for (i = 0; i < ARRAY_SIZE(skge_chips); i++) 3492 if (skge_chips[i].id == hw->chip_id) 3493 return skge_chips[i].name; 3494 3495 snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id); 3496 return buf; 3497} 3498 3499 3500/* 3501 * Setup the board data structure, but don't bring up 3502 * the port(s) 3503 */ 3504static int skge_reset(struct skge_hw *hw) 3505{ 3506 u32 reg; 3507 u16 ctst, pci_status; 3508 u8 t8, mac_cfg, pmd_type; 3509 int i; 3510 3511 ctst = skge_read16(hw, B0_CTST); 3512 3513 /* do a SW reset */ 3514 skge_write8(hw, B0_CTST, CS_RST_SET); 3515 skge_write8(hw, B0_CTST, CS_RST_CLR); 3516 3517 /* clear PCI errors, if any */ 3518 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3519 skge_write8(hw, B2_TST_CTRL2, 0); 3520 3521 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); 3522 pci_write_config_word(hw->pdev, PCI_STATUS, 3523 pci_status | PCI_STATUS_ERROR_BITS); 3524 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3525 skge_write8(hw, B0_CTST, CS_MRST_CLR); 3526 3527 /* restore CLK_RUN bits (for Yukon-Lite) */ 3528 skge_write16(hw, B0_CTST, 3529 ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA)); 3530 3531 hw->chip_id = skge_read8(hw, B2_CHIP_ID); 3532 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; 3533 pmd_type = skge_read8(hw, B2_PMD_TYP); 3534 hw->copper = (pmd_type == 'T' || pmd_type == '1'); 3535 3536 switch (hw->chip_id) { 3537 case CHIP_ID_GENESIS: 3538 switch (hw->phy_type) { 3539 case SK_PHY_XMAC: 3540 hw->phy_addr = PHY_ADDR_XMAC; 3541 break; 3542 case SK_PHY_BCOM: 3543 hw->phy_addr = PHY_ADDR_BCOM; 3544 break; 3545 default: 3546 dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n", 3547 hw->phy_type); 3548 return -EOPNOTSUPP; 3549 } 3550 break; 3551 3552 case CHIP_ID_YUKON: 3553 case CHIP_ID_YUKON_LITE: 3554 case CHIP_ID_YUKON_LP: 3555 if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S') 3556 hw->copper = 1; 3557 3558 hw->phy_addr = PHY_ADDR_MARV; 3559 break; 3560 3561 default: 3562 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", 3563 hw->chip_id); 3564 return -EOPNOTSUPP; 3565 } 3566 3567 mac_cfg = skge_read8(hw, B2_MAC_CFG); 3568 hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2; 3569 hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4; 3570 3571 /* read the adapters RAM size */ 3572 t8 = skge_read8(hw, B2_E_0); 3573 if (hw->chip_id == CHIP_ID_GENESIS) { 3574 if (t8 == 3) { 3575 /* special case: 4 x 64k x 36, offset = 0x80000 */ 3576 hw->ram_size = 0x100000; 3577 hw->ram_offset = 0x80000; 3578 } else 3579 hw->ram_size = t8 * 512; 3580 } 3581 else if (t8 == 0) 3582 hw->ram_size = 0x20000; 3583 else 3584 hw->ram_size = t8 * 4096; 3585 3586 hw->intr_mask = IS_HW_ERR; 3587 3588 /* Use PHY IRQ for all but fiber based Genesis board */ 3589 if (!(hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)) 3590 hw->intr_mask |= IS_EXT_REG; 3591 3592 if (hw->chip_id == CHIP_ID_GENESIS) 3593 genesis_init(hw); 3594 else { 3595 /* switch power to VCC (WA for VAUX problem) */ 3596 skge_write8(hw, B0_POWER_CTRL, 3597 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 3598 3599 /* avoid boards with stuck Hardware error bits */ 3600 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && 3601 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { 3602 dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n"); 3603 hw->intr_mask &= ~IS_HW_ERR; 3604 } 3605 3606 /* Clear PHY COMA */ 3607 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3608 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg); 3609 reg &= ~PCI_PHY_COMA; 3610 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg); 3611 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3612 3613 3614 for (i = 0; i < hw->ports; i++) { 3615 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 3616 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); 3617 } 3618 } 3619 3620 /* turn off hardware timer (unused) */ 3621 skge_write8(hw, B2_TI_CTRL, TIM_STOP); 3622 skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); 3623 skge_write8(hw, B0_LED, LED_STAT_ON); 3624 3625 /* enable the Tx Arbiters */ 3626 for (i = 0; i < hw->ports; i++) 3627 skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); 3628 3629 /* Initialize ram interface */ 3630 skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); 3631 3632 skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53); 3633 skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53); 3634 skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53); 3635 skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53); 3636 skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53); 3637 skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53); 3638 skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53); 3639 skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53); 3640 skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53); 3641 skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53); 3642 skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53); 3643 skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53); 3644 3645 skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK); 3646 3647 /* Set interrupt moderation for Transmit only 3648 * Receive interrupts avoided by NAPI 3649 */ 3650 skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F); 3651 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); 3652 skge_write32(hw, B2_IRQM_CTRL, TIM_START); 3653 3654 skge_write32(hw, B0_IMSK, hw->intr_mask); 3655 3656 for (i = 0; i < hw->ports; i++) { 3657 if (hw->chip_id == CHIP_ID_GENESIS) 3658 genesis_reset(hw, i); 3659 else 3660 yukon_reset(hw, i); 3661 } 3662 3663 return 0; 3664} 3665 3666 3667#ifdef CONFIG_SKGE_DEBUG 3668 3669static struct dentry *skge_debug; 3670 3671static int skge_debug_show(struct seq_file *seq, void *v) 3672{ 3673 struct net_device *dev = seq->private; 3674 const struct skge_port *skge = netdev_priv(dev); 3675 const struct skge_hw *hw = skge->hw; 3676 const struct skge_element *e; 3677 3678 if (!netif_running(dev)) 3679 return -ENETDOWN; 3680 3681 seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC), 3682 skge_read32(hw, B0_IMSK)); 3683 3684 seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring)); 3685 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { 3686 const struct skge_tx_desc *t = e->desc; 3687 seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n", 3688 t->control, t->dma_hi, t->dma_lo, t->status, 3689 t->csum_offs, t->csum_write, t->csum_start); 3690 } 3691 3692 seq_printf(seq, "\nRx Ring: \n"); 3693 for (e = skge->rx_ring.to_clean; ; e = e->next) { 3694 const struct skge_rx_desc *r = e->desc; 3695 3696 if (r->control & BMU_OWN) 3697 break; 3698 3699 seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n", 3700 r->control, r->dma_hi, r->dma_lo, r->status, 3701 r->timestamp, r->csum1, r->csum1_start); 3702 } 3703 3704 return 0; 3705} 3706 3707static int skge_debug_open(struct inode *inode, struct file *file) 3708{ 3709 return single_open(file, skge_debug_show, inode->i_private); 3710} 3711 3712static const struct file_operations skge_debug_fops = { 3713 .owner = THIS_MODULE, 3714 .open = skge_debug_open, 3715 .read = seq_read, 3716 .llseek = seq_lseek, 3717 .release = single_release, 3718}; 3719 3720/* 3721 * Use network device events to create/remove/rename 3722 * debugfs file entries 3723 */ 3724static int skge_device_event(struct notifier_block *unused, 3725 unsigned long event, void *ptr) 3726{ 3727 struct net_device *dev = ptr; 3728 struct skge_port *skge; 3729 struct dentry *d; 3730 3731 if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug) 3732 goto done; 3733 3734 skge = netdev_priv(dev); 3735 switch(event) { 3736 case NETDEV_CHANGENAME: 3737 if (skge->debugfs) { 3738 d = debugfs_rename(skge_debug, skge->debugfs, 3739 skge_debug, dev->name); 3740 if (d) 3741 skge->debugfs = d; 3742 else { 3743 pr_info(PFX "%s: rename failed\n", dev->name); 3744 debugfs_remove(skge->debugfs); 3745 } 3746 } 3747 break; 3748 3749 case NETDEV_GOING_DOWN: 3750 if (skge->debugfs) { 3751 debugfs_remove(skge->debugfs); 3752 skge->debugfs = NULL; 3753 } 3754 break; 3755 3756 case NETDEV_UP: 3757 d = debugfs_create_file(dev->name, S_IRUGO, 3758 skge_debug, dev, 3759 &skge_debug_fops); 3760 if (!d || IS_ERR(d)) 3761 pr_info(PFX "%s: debugfs create failed\n", 3762 dev->name); 3763 else 3764 skge->debugfs = d; 3765 break; 3766 } 3767 3768done: 3769 return NOTIFY_DONE; 3770} 3771 3772static struct notifier_block skge_notifier = { 3773 .notifier_call = skge_device_event, 3774}; 3775 3776 3777static __init void skge_debug_init(void) 3778{ 3779 struct dentry *ent; 3780 3781 ent = debugfs_create_dir("skge", NULL); 3782 if (!ent || IS_ERR(ent)) { 3783 pr_info(PFX "debugfs create directory failed\n"); 3784 return; 3785 } 3786 3787 skge_debug = ent; 3788 register_netdevice_notifier(&skge_notifier); 3789} 3790 3791static __exit void skge_debug_cleanup(void) 3792{ 3793 if (skge_debug) { 3794 unregister_netdevice_notifier(&skge_notifier); 3795 debugfs_remove(skge_debug); 3796 skge_debug = NULL; 3797 } 3798} 3799 3800#else 3801#define skge_debug_init() 3802#define skge_debug_cleanup() 3803#endif 3804 3805static const struct net_device_ops skge_netdev_ops = { 3806 .ndo_open = skge_up, 3807 .ndo_stop = skge_down, 3808 .ndo_start_xmit = skge_xmit_frame, 3809 .ndo_do_ioctl = skge_ioctl, 3810 .ndo_get_stats = skge_get_stats, 3811 .ndo_tx_timeout = skge_tx_timeout, 3812 .ndo_change_mtu = skge_change_mtu, 3813 .ndo_validate_addr = eth_validate_addr, 3814 .ndo_set_multicast_list = skge_set_multicast, 3815 .ndo_set_mac_address = skge_set_mac_address, 3816#ifdef CONFIG_NET_POLL_CONTROLLER 3817 .ndo_poll_controller = skge_netpoll, 3818#endif 3819}; 3820 3821 3822/* Initialize network device */ 3823static struct net_device *skge_devinit(struct skge_hw *hw, int port, 3824 int highmem) 3825{ 3826 struct skge_port *skge; 3827 struct net_device *dev = alloc_etherdev(sizeof(*skge)); 3828 3829 if (!dev) { 3830 dev_err(&hw->pdev->dev, "etherdev alloc failed\n"); 3831 return NULL; 3832 } 3833 3834 SET_NETDEV_DEV(dev, &hw->pdev->dev); 3835 dev->netdev_ops = &skge_netdev_ops; 3836 dev->ethtool_ops = &skge_ethtool_ops; 3837 dev->watchdog_timeo = TX_WATCHDOG; 3838 dev->irq = hw->pdev->irq; 3839 3840 if (highmem) 3841 dev->features |= NETIF_F_HIGHDMA; 3842 3843 skge = netdev_priv(dev); 3844 netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); 3845 skge->netdev = dev; 3846 skge->hw = hw; 3847 skge->msg_enable = netif_msg_init(debug, default_msg); 3848 3849 skge->tx_ring.count = DEFAULT_TX_RING_SIZE; 3850 skge->rx_ring.count = DEFAULT_RX_RING_SIZE; 3851 3852 /* Auto speed and flow control */ 3853 skge->autoneg = AUTONEG_ENABLE; 3854 skge->flow_control = FLOW_MODE_SYM_OR_REM; 3855 skge->duplex = -1; 3856 skge->speed = -1; 3857 skge->advertising = skge_supported_modes(hw); 3858 3859 if (device_may_wakeup(&hw->pdev->dev)) 3860 skge->wol = wol_supported(hw) & WAKE_MAGIC; 3861 3862 hw->dev[port] = dev; 3863 3864 skge->port = port; 3865 3866 /* Only used for Genesis XMAC */ 3867 setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge); 3868 3869 if (hw->chip_id != CHIP_ID_GENESIS) { 3870 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3871 skge->rx_csum = 1; 3872 } 3873 3874 /* read the mac address */ 3875 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 3876 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 3877 3878 /* device is off until link detection */ 3879 netif_carrier_off(dev); 3880 netif_stop_queue(dev); 3881 3882 return dev; 3883} 3884 3885static void __devinit skge_show_addr(struct net_device *dev) 3886{ 3887 const struct skge_port *skge = netdev_priv(dev); 3888 3889 if (netif_msg_probe(skge)) 3890 printk(KERN_INFO PFX "%s: addr %pM\n", 3891 dev->name, dev->dev_addr); 3892} 3893 3894static int __devinit skge_probe(struct pci_dev *pdev, 3895 const struct pci_device_id *ent) 3896{ 3897 struct net_device *dev, *dev1; 3898 struct skge_hw *hw; 3899 int err, using_dac = 0; 3900 3901 err = pci_enable_device(pdev); 3902 if (err) { 3903 dev_err(&pdev->dev, "cannot enable PCI device\n"); 3904 goto err_out; 3905 } 3906 3907 err = pci_request_regions(pdev, DRV_NAME); 3908 if (err) { 3909 dev_err(&pdev->dev, "cannot obtain PCI resources\n"); 3910 goto err_out_disable_pdev; 3911 } 3912 3913 pci_set_master(pdev); 3914 3915 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3916 using_dac = 1; 3917 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3918 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 3919 using_dac = 0; 3920 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3921 } 3922 3923 if (err) { 3924 dev_err(&pdev->dev, "no usable DMA configuration\n"); 3925 goto err_out_free_regions; 3926 } 3927 3928#ifdef __BIG_ENDIAN 3929 /* byte swap descriptors in hardware */ 3930 { 3931 u32 reg; 3932 3933 pci_read_config_dword(pdev, PCI_DEV_REG2, &reg); 3934 reg |= PCI_REV_DESC; 3935 pci_write_config_dword(pdev, PCI_DEV_REG2, reg); 3936 } 3937#endif 3938 3939 err = -ENOMEM; 3940 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 3941 if (!hw) { 3942 dev_err(&pdev->dev, "cannot allocate hardware struct\n"); 3943 goto err_out_free_regions; 3944 } 3945 3946 hw->pdev = pdev; 3947 spin_lock_init(&hw->hw_lock); 3948 spin_lock_init(&hw->phy_lock); 3949 tasklet_init(&hw->phy_task, &skge_extirq, (unsigned long) hw); 3950 3951 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3952 if (!hw->regs) { 3953 dev_err(&pdev->dev, "cannot map device registers\n"); 3954 goto err_out_free_hw; 3955 } 3956 3957 err = skge_reset(hw); 3958 if (err) 3959 goto err_out_iounmap; 3960 3961 printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n", 3962 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, 3963 skge_board_name(hw), hw->chip_rev); 3964 3965 dev = skge_devinit(hw, 0, using_dac); 3966 if (!dev) 3967 goto err_out_led_off; 3968 3969 /* Some motherboards are broken and has zero in ROM. */ 3970 if (!is_valid_ether_addr(dev->dev_addr)) 3971 dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n"); 3972 3973 err = register_netdev(dev); 3974 if (err) { 3975 dev_err(&pdev->dev, "cannot register net device\n"); 3976 goto err_out_free_netdev; 3977 } 3978 3979 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); 3980 if (err) { 3981 dev_err(&pdev->dev, "%s: cannot assign irq %d\n", 3982 dev->name, pdev->irq); 3983 goto err_out_unregister; 3984 } 3985 skge_show_addr(dev); 3986 3987 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) { 3988 if (register_netdev(dev1) == 0) 3989 skge_show_addr(dev1); 3990 else { 3991 /* Failure to register second port need not be fatal */ 3992 dev_warn(&pdev->dev, "register of second port failed\n"); 3993 hw->dev[1] = NULL; 3994 free_netdev(dev1); 3995 } 3996 } 3997 pci_set_drvdata(pdev, hw); 3998 3999 return 0; 4000 4001err_out_unregister: 4002 unregister_netdev(dev); 4003err_out_free_netdev: 4004 free_netdev(dev); 4005err_out_led_off: 4006 skge_write16(hw, B0_LED, LED_STAT_OFF); 4007err_out_iounmap: 4008 iounmap(hw->regs); 4009err_out_free_hw: 4010 kfree(hw); 4011err_out_free_regions: 4012 pci_release_regions(pdev); 4013err_out_disable_pdev: 4014 pci_disable_device(pdev); 4015 pci_set_drvdata(pdev, NULL); 4016err_out: 4017 return err; 4018} 4019 4020static void __devexit skge_remove(struct pci_dev *pdev) 4021{ 4022 struct skge_hw *hw = pci_get_drvdata(pdev); 4023 struct net_device *dev0, *dev1; 4024 4025 if (!hw) 4026 return; 4027 4028 flush_scheduled_work(); 4029 4030 if ((dev1 = hw->dev[1])) 4031 unregister_netdev(dev1); 4032 dev0 = hw->dev[0]; 4033 unregister_netdev(dev0); 4034 4035 tasklet_disable(&hw->phy_task); 4036 4037 spin_lock_irq(&hw->hw_lock); 4038 hw->intr_mask = 0; 4039 skge_write32(hw, B0_IMSK, 0); 4040 skge_read32(hw, B0_IMSK); 4041 spin_unlock_irq(&hw->hw_lock); 4042 4043 skge_write16(hw, B0_LED, LED_STAT_OFF); 4044 skge_write8(hw, B0_CTST, CS_RST_SET); 4045 4046 free_irq(pdev->irq, hw); 4047 pci_release_regions(pdev); 4048 pci_disable_device(pdev); 4049 if (dev1) 4050 free_netdev(dev1); 4051 free_netdev(dev0); 4052 4053 iounmap(hw->regs); 4054 kfree(hw); 4055 pci_set_drvdata(pdev, NULL); 4056} 4057 4058#ifdef CONFIG_PM 4059static int skge_suspend(struct pci_dev *pdev, pm_message_t state) 4060{ 4061 struct skge_hw *hw = pci_get_drvdata(pdev); 4062 int i, err, wol = 0; 4063 4064 if (!hw) 4065 return 0; 4066 4067 err = pci_save_state(pdev); 4068 if (err) 4069 return err; 4070 4071 for (i = 0; i < hw->ports; i++) { 4072 struct net_device *dev = hw->dev[i]; 4073 struct skge_port *skge = netdev_priv(dev); 4074 4075 if (netif_running(dev)) 4076 skge_down(dev); 4077 if (skge->wol) 4078 skge_wol_init(skge); 4079 4080 wol |= skge->wol; 4081 } 4082 4083 skge_write32(hw, B0_IMSK, 0); 4084 4085 pci_prepare_to_sleep(pdev); 4086 4087 return 0; 4088} 4089 4090static int skge_resume(struct pci_dev *pdev) 4091{ 4092 struct skge_hw *hw = pci_get_drvdata(pdev); 4093 int i, err; 4094 4095 if (!hw) 4096 return 0; 4097 4098 err = pci_back_from_sleep(pdev); 4099 if (err) 4100 goto out; 4101 4102 err = pci_restore_state(pdev); 4103 if (err) 4104 goto out; 4105 4106 err = skge_reset(hw); 4107 if (err) 4108 goto out; 4109 4110 for (i = 0; i < hw->ports; i++) { 4111 struct net_device *dev = hw->dev[i]; 4112 4113 if (netif_running(dev)) { 4114 err = skge_up(dev); 4115 4116 if (err) { 4117 printk(KERN_ERR PFX "%s: could not up: %d\n", 4118 dev->name, err); 4119 dev_close(dev); 4120 goto out; 4121 } 4122 } 4123 } 4124out: 4125 return err; 4126} 4127#endif 4128 4129static void skge_shutdown(struct pci_dev *pdev) 4130{ 4131 struct skge_hw *hw = pci_get_drvdata(pdev); 4132 int i, wol = 0; 4133 4134 if (!hw) 4135 return; 4136 4137 for (i = 0; i < hw->ports; i++) { 4138 struct net_device *dev = hw->dev[i]; 4139 struct skge_port *skge = netdev_priv(dev); 4140 4141 if (skge->wol) 4142 skge_wol_init(skge); 4143 wol |= skge->wol; 4144 } 4145 4146 if (pci_enable_wake(pdev, PCI_D3cold, wol)) 4147 pci_enable_wake(pdev, PCI_D3hot, wol); 4148 4149 pci_disable_device(pdev); 4150 pci_set_power_state(pdev, PCI_D3hot); 4151 4152} 4153 4154static struct pci_driver skge_driver = { 4155 .name = DRV_NAME, 4156 .id_table = skge_id_table, 4157 .probe = skge_probe, 4158 .remove = __devexit_p(skge_remove), 4159#ifdef CONFIG_PM 4160 .suspend = skge_suspend, 4161 .resume = skge_resume, 4162#endif 4163 .shutdown = skge_shutdown, 4164}; 4165 4166static int __init skge_init_module(void) 4167{ 4168 skge_debug_init(); 4169 return pci_register_driver(&skge_driver); 4170} 4171 4172static void __exit skge_cleanup_module(void) 4173{ 4174 pci_unregister_driver(&skge_driver); 4175 skge_debug_cleanup(); 4176} 4177 4178module_init(skge_init_module); 4179module_exit(skge_cleanup_module);