Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.39-rc4 3195 lines 80 kB view raw
1/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ 2 * sungem.c: Sun GEM ethernet driver. 3 * 4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com) 5 * 6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management 7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org) 8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. 9 * 10 * NAPI and NETPOLL support 11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) 12 * 13 * TODO: 14 * - Now that the driver was significantly simplified, I need to rework 15 * the locking. I'm sure we don't need _2_ spinlocks, and we probably 16 * can avoid taking most of them for so long period of time (and schedule 17 * instead). The main issues at this point are caused by the netdev layer 18 * though: 19 * 20 * gem_change_mtu() and gem_set_multicast() are called with a read_lock() 21 * help by net/core/dev.c, thus they can't schedule. That means they can't 22 * call napi_disable() neither, thus force gem_poll() to keep a spinlock 23 * where it could have been dropped. change_mtu especially would love also to 24 * be able to msleep instead of horrid locked delays when resetting the HW, 25 * but that read_lock() makes it impossible, unless I defer it's action to 26 * the reset task, which means it'll be asynchronous (won't take effect until 27 * the system schedules a bit). 28 * 29 * Also, it would probably be possible to also remove most of the long-life 30 * locking in open/resume code path (gem_reinit_chip) by beeing more careful 31 * about when we can start taking interrupts or get xmit() called... 32 */ 33 34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 35 36#include <linux/module.h> 37#include <linux/kernel.h> 38#include <linux/types.h> 39#include <linux/fcntl.h> 40#include <linux/interrupt.h> 41#include <linux/ioport.h> 42#include <linux/in.h> 43#include <linux/sched.h> 44#include <linux/string.h> 45#include <linux/delay.h> 46#include <linux/init.h> 47#include <linux/errno.h> 48#include <linux/pci.h> 49#include <linux/dma-mapping.h> 50#include <linux/netdevice.h> 51#include <linux/etherdevice.h> 52#include <linux/skbuff.h> 53#include <linux/mii.h> 54#include <linux/ethtool.h> 55#include <linux/crc32.h> 56#include <linux/random.h> 57#include <linux/workqueue.h> 58#include <linux/if_vlan.h> 59#include <linux/bitops.h> 60#include <linux/mutex.h> 61#include <linux/mm.h> 62#include <linux/gfp.h> 63 64#include <asm/system.h> 65#include <asm/io.h> 66#include <asm/byteorder.h> 67#include <asm/uaccess.h> 68#include <asm/irq.h> 69 70#ifdef CONFIG_SPARC 71#include <asm/idprom.h> 72#include <asm/prom.h> 73#endif 74 75#ifdef CONFIG_PPC_PMAC 76#include <asm/pci-bridge.h> 77#include <asm/prom.h> 78#include <asm/machdep.h> 79#include <asm/pmac_feature.h> 80#endif 81 82#include "sungem_phy.h" 83#include "sungem.h" 84 85/* Stripping FCS is causing problems, disabled for now */ 86#undef STRIP_FCS 87 88#define DEFAULT_MSG (NETIF_MSG_DRV | \ 89 NETIF_MSG_PROBE | \ 90 NETIF_MSG_LINK) 91 92#define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ 93 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ 94 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \ 95 SUPPORTED_Pause | SUPPORTED_Autoneg) 96 97#define DRV_NAME "sungem" 98#define DRV_VERSION "0.98" 99#define DRV_RELDATE "8/24/03" 100#define DRV_AUTHOR "David S. Miller (davem@redhat.com)" 101 102static char version[] __devinitdata = 103 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; 104 105MODULE_AUTHOR(DRV_AUTHOR); 106MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); 107MODULE_LICENSE("GPL"); 108 109#define GEM_MODULE_NAME "gem" 110 111static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = { 112 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, 113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 114 115 /* These models only differ from the original GEM in 116 * that their tx/rx fifos are of a different size and 117 * they only support 10/100 speeds. -DaveM 118 * 119 * Apple's GMAC does support gigabit on machines with 120 * the BCM54xx PHYs. -BenH 121 */ 122 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM, 123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 124 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC, 125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 126 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP, 127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 128 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2, 129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 130 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC, 131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 132 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, 133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 134 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC, 135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 136 {0, } 137}; 138 139MODULE_DEVICE_TABLE(pci, gem_pci_tbl); 140 141static u16 __phy_read(struct gem *gp, int phy_addr, int reg) 142{ 143 u32 cmd; 144 int limit = 10000; 145 146 cmd = (1 << 30); 147 cmd |= (2 << 28); 148 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; 149 cmd |= (reg << 18) & MIF_FRAME_REGAD; 150 cmd |= (MIF_FRAME_TAMSB); 151 writel(cmd, gp->regs + MIF_FRAME); 152 153 while (--limit) { 154 cmd = readl(gp->regs + MIF_FRAME); 155 if (cmd & MIF_FRAME_TALSB) 156 break; 157 158 udelay(10); 159 } 160 161 if (!limit) 162 cmd = 0xffff; 163 164 return cmd & MIF_FRAME_DATA; 165} 166 167static inline int _phy_read(struct net_device *dev, int mii_id, int reg) 168{ 169 struct gem *gp = netdev_priv(dev); 170 return __phy_read(gp, mii_id, reg); 171} 172 173static inline u16 phy_read(struct gem *gp, int reg) 174{ 175 return __phy_read(gp, gp->mii_phy_addr, reg); 176} 177 178static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) 179{ 180 u32 cmd; 181 int limit = 10000; 182 183 cmd = (1 << 30); 184 cmd |= (1 << 28); 185 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; 186 cmd |= (reg << 18) & MIF_FRAME_REGAD; 187 cmd |= (MIF_FRAME_TAMSB); 188 cmd |= (val & MIF_FRAME_DATA); 189 writel(cmd, gp->regs + MIF_FRAME); 190 191 while (limit--) { 192 cmd = readl(gp->regs + MIF_FRAME); 193 if (cmd & MIF_FRAME_TALSB) 194 break; 195 196 udelay(10); 197 } 198} 199 200static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val) 201{ 202 struct gem *gp = netdev_priv(dev); 203 __phy_write(gp, mii_id, reg, val & 0xffff); 204} 205 206static inline void phy_write(struct gem *gp, int reg, u16 val) 207{ 208 __phy_write(gp, gp->mii_phy_addr, reg, val); 209} 210 211static inline void gem_enable_ints(struct gem *gp) 212{ 213 /* Enable all interrupts but TXDONE */ 214 writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK); 215} 216 217static inline void gem_disable_ints(struct gem *gp) 218{ 219 /* Disable all interrupts, including TXDONE */ 220 writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); 221} 222 223static void gem_get_cell(struct gem *gp) 224{ 225 BUG_ON(gp->cell_enabled < 0); 226 gp->cell_enabled++; 227#ifdef CONFIG_PPC_PMAC 228 if (gp->cell_enabled == 1) { 229 mb(); 230 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1); 231 udelay(10); 232 } 233#endif /* CONFIG_PPC_PMAC */ 234} 235 236/* Turn off the chip's clock */ 237static void gem_put_cell(struct gem *gp) 238{ 239 BUG_ON(gp->cell_enabled <= 0); 240 gp->cell_enabled--; 241#ifdef CONFIG_PPC_PMAC 242 if (gp->cell_enabled == 0) { 243 mb(); 244 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0); 245 udelay(10); 246 } 247#endif /* CONFIG_PPC_PMAC */ 248} 249 250static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) 251{ 252 if (netif_msg_intr(gp)) 253 printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name); 254} 255 256static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 257{ 258 u32 pcs_istat = readl(gp->regs + PCS_ISTAT); 259 u32 pcs_miistat; 260 261 if (netif_msg_intr(gp)) 262 printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n", 263 gp->dev->name, pcs_istat); 264 265 if (!(pcs_istat & PCS_ISTAT_LSC)) { 266 netdev_err(dev, "PCS irq but no link status change???\n"); 267 return 0; 268 } 269 270 /* The link status bit latches on zero, so you must 271 * read it twice in such a case to see a transition 272 * to the link being up. 273 */ 274 pcs_miistat = readl(gp->regs + PCS_MIISTAT); 275 if (!(pcs_miistat & PCS_MIISTAT_LS)) 276 pcs_miistat |= 277 (readl(gp->regs + PCS_MIISTAT) & 278 PCS_MIISTAT_LS); 279 280 if (pcs_miistat & PCS_MIISTAT_ANC) { 281 /* The remote-fault indication is only valid 282 * when autoneg has completed. 283 */ 284 if (pcs_miistat & PCS_MIISTAT_RF) 285 netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n"); 286 else 287 netdev_info(dev, "PCS AutoNEG complete\n"); 288 } 289 290 if (pcs_miistat & PCS_MIISTAT_LS) { 291 netdev_info(dev, "PCS link is now up\n"); 292 netif_carrier_on(gp->dev); 293 } else { 294 netdev_info(dev, "PCS link is now down\n"); 295 netif_carrier_off(gp->dev); 296 /* If this happens and the link timer is not running, 297 * reset so we re-negotiate. 298 */ 299 if (!timer_pending(&gp->link_timer)) 300 return 1; 301 } 302 303 return 0; 304} 305 306static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 307{ 308 u32 txmac_stat = readl(gp->regs + MAC_TXSTAT); 309 310 if (netif_msg_intr(gp)) 311 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", 312 gp->dev->name, txmac_stat); 313 314 /* Defer timer expiration is quite normal, 315 * don't even log the event. 316 */ 317 if ((txmac_stat & MAC_TXSTAT_DTE) && 318 !(txmac_stat & ~MAC_TXSTAT_DTE)) 319 return 0; 320 321 if (txmac_stat & MAC_TXSTAT_URUN) { 322 netdev_err(dev, "TX MAC xmit underrun\n"); 323 dev->stats.tx_fifo_errors++; 324 } 325 326 if (txmac_stat & MAC_TXSTAT_MPE) { 327 netdev_err(dev, "TX MAC max packet size error\n"); 328 dev->stats.tx_errors++; 329 } 330 331 /* The rest are all cases of one of the 16-bit TX 332 * counters expiring. 333 */ 334 if (txmac_stat & MAC_TXSTAT_NCE) 335 dev->stats.collisions += 0x10000; 336 337 if (txmac_stat & MAC_TXSTAT_ECE) { 338 dev->stats.tx_aborted_errors += 0x10000; 339 dev->stats.collisions += 0x10000; 340 } 341 342 if (txmac_stat & MAC_TXSTAT_LCE) { 343 dev->stats.tx_aborted_errors += 0x10000; 344 dev->stats.collisions += 0x10000; 345 } 346 347 /* We do not keep track of MAC_TXSTAT_FCE and 348 * MAC_TXSTAT_PCE events. 349 */ 350 return 0; 351} 352 353/* When we get a RX fifo overflow, the RX unit in GEM is probably hung 354 * so we do the following. 355 * 356 * If any part of the reset goes wrong, we return 1 and that causes the 357 * whole chip to be reset. 358 */ 359static int gem_rxmac_reset(struct gem *gp) 360{ 361 struct net_device *dev = gp->dev; 362 int limit, i; 363 u64 desc_dma; 364 u32 val; 365 366 /* First, reset & disable MAC RX. */ 367 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); 368 for (limit = 0; limit < 5000; limit++) { 369 if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD)) 370 break; 371 udelay(10); 372 } 373 if (limit == 5000) { 374 netdev_err(dev, "RX MAC will not reset, resetting whole chip\n"); 375 return 1; 376 } 377 378 writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB, 379 gp->regs + MAC_RXCFG); 380 for (limit = 0; limit < 5000; limit++) { 381 if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB)) 382 break; 383 udelay(10); 384 } 385 if (limit == 5000) { 386 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n"); 387 return 1; 388 } 389 390 /* Second, disable RX DMA. */ 391 writel(0, gp->regs + RXDMA_CFG); 392 for (limit = 0; limit < 5000; limit++) { 393 if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE)) 394 break; 395 udelay(10); 396 } 397 if (limit == 5000) { 398 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n"); 399 return 1; 400 } 401 402 udelay(5000); 403 404 /* Execute RX reset command. */ 405 writel(gp->swrst_base | GREG_SWRST_RXRST, 406 gp->regs + GREG_SWRST); 407 for (limit = 0; limit < 5000; limit++) { 408 if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST)) 409 break; 410 udelay(10); 411 } 412 if (limit == 5000) { 413 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n"); 414 return 1; 415 } 416 417 /* Refresh the RX ring. */ 418 for (i = 0; i < RX_RING_SIZE; i++) { 419 struct gem_rxd *rxd = &gp->init_block->rxd[i]; 420 421 if (gp->rx_skbs[i] == NULL) { 422 netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n"); 423 return 1; 424 } 425 426 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 427 } 428 gp->rx_new = gp->rx_old = 0; 429 430 /* Now we must reprogram the rest of RX unit. */ 431 desc_dma = (u64) gp->gblock_dvma; 432 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); 433 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 434 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 435 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 436 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 437 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 438 writel(val, gp->regs + RXDMA_CFG); 439 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 440 writel(((5 & RXDMA_BLANK_IPKTS) | 441 ((8 << 12) & RXDMA_BLANK_ITIME)), 442 gp->regs + RXDMA_BLANK); 443 else 444 writel(((5 & RXDMA_BLANK_IPKTS) | 445 ((4 << 12) & RXDMA_BLANK_ITIME)), 446 gp->regs + RXDMA_BLANK); 447 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); 448 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); 449 writel(val, gp->regs + RXDMA_PTHRESH); 450 val = readl(gp->regs + RXDMA_CFG); 451 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 452 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); 453 val = readl(gp->regs + MAC_RXCFG); 454 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 455 456 return 0; 457} 458 459static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 460{ 461 u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT); 462 int ret = 0; 463 464 if (netif_msg_intr(gp)) 465 printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n", 466 gp->dev->name, rxmac_stat); 467 468 if (rxmac_stat & MAC_RXSTAT_OFLW) { 469 u32 smac = readl(gp->regs + MAC_SMACHINE); 470 471 netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac); 472 dev->stats.rx_over_errors++; 473 dev->stats.rx_fifo_errors++; 474 475 ret = gem_rxmac_reset(gp); 476 } 477 478 if (rxmac_stat & MAC_RXSTAT_ACE) 479 dev->stats.rx_frame_errors += 0x10000; 480 481 if (rxmac_stat & MAC_RXSTAT_CCE) 482 dev->stats.rx_crc_errors += 0x10000; 483 484 if (rxmac_stat & MAC_RXSTAT_LCE) 485 dev->stats.rx_length_errors += 0x10000; 486 487 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE 488 * events. 489 */ 490 return ret; 491} 492 493static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 494{ 495 u32 mac_cstat = readl(gp->regs + MAC_CSTAT); 496 497 if (netif_msg_intr(gp)) 498 printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n", 499 gp->dev->name, mac_cstat); 500 501 /* This interrupt is just for pause frame and pause 502 * tracking. It is useful for diagnostics and debug 503 * but probably by default we will mask these events. 504 */ 505 if (mac_cstat & MAC_CSTAT_PS) 506 gp->pause_entered++; 507 508 if (mac_cstat & MAC_CSTAT_PRCV) 509 gp->pause_last_time_recvd = (mac_cstat >> 16); 510 511 return 0; 512} 513 514static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 515{ 516 u32 mif_status = readl(gp->regs + MIF_STATUS); 517 u32 reg_val, changed_bits; 518 519 reg_val = (mif_status & MIF_STATUS_DATA) >> 16; 520 changed_bits = (mif_status & MIF_STATUS_STAT); 521 522 gem_handle_mif_event(gp, reg_val, changed_bits); 523 524 return 0; 525} 526 527static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 528{ 529 u32 pci_estat = readl(gp->regs + GREG_PCIESTAT); 530 531 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 532 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 533 netdev_err(dev, "PCI error [%04x]", pci_estat); 534 535 if (pci_estat & GREG_PCIESTAT_BADACK) 536 pr_cont(" <No ACK64# during ABS64 cycle>"); 537 if (pci_estat & GREG_PCIESTAT_DTRTO) 538 pr_cont(" <Delayed transaction timeout>"); 539 if (pci_estat & GREG_PCIESTAT_OTHER) 540 pr_cont(" <other>"); 541 pr_cont("\n"); 542 } else { 543 pci_estat |= GREG_PCIESTAT_OTHER; 544 netdev_err(dev, "PCI error\n"); 545 } 546 547 if (pci_estat & GREG_PCIESTAT_OTHER) { 548 u16 pci_cfg_stat; 549 550 /* Interrogate PCI config space for the 551 * true cause. 552 */ 553 pci_read_config_word(gp->pdev, PCI_STATUS, 554 &pci_cfg_stat); 555 netdev_err(dev, "Read PCI cfg space status [%04x]\n", 556 pci_cfg_stat); 557 if (pci_cfg_stat & PCI_STATUS_PARITY) 558 netdev_err(dev, "PCI parity error detected\n"); 559 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) 560 netdev_err(dev, "PCI target abort\n"); 561 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) 562 netdev_err(dev, "PCI master acks target abort\n"); 563 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) 564 netdev_err(dev, "PCI master abort\n"); 565 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) 566 netdev_err(dev, "PCI system error SERR#\n"); 567 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) 568 netdev_err(dev, "PCI parity error\n"); 569 570 /* Write the error bits back to clear them. */ 571 pci_cfg_stat &= (PCI_STATUS_PARITY | 572 PCI_STATUS_SIG_TARGET_ABORT | 573 PCI_STATUS_REC_TARGET_ABORT | 574 PCI_STATUS_REC_MASTER_ABORT | 575 PCI_STATUS_SIG_SYSTEM_ERROR | 576 PCI_STATUS_DETECTED_PARITY); 577 pci_write_config_word(gp->pdev, 578 PCI_STATUS, pci_cfg_stat); 579 } 580 581 /* For all PCI errors, we should reset the chip. */ 582 return 1; 583} 584 585/* All non-normal interrupt conditions get serviced here. 586 * Returns non-zero if we should just exit the interrupt 587 * handler right now (ie. if we reset the card which invalidates 588 * all of the other original irq status bits). 589 */ 590static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status) 591{ 592 if (gem_status & GREG_STAT_RXNOBUF) { 593 /* Frame arrived, no free RX buffers available. */ 594 if (netif_msg_rx_err(gp)) 595 printk(KERN_DEBUG "%s: no buffer for rx frame\n", 596 gp->dev->name); 597 dev->stats.rx_dropped++; 598 } 599 600 if (gem_status & GREG_STAT_RXTAGERR) { 601 /* corrupt RX tag framing */ 602 if (netif_msg_rx_err(gp)) 603 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 604 gp->dev->name); 605 dev->stats.rx_errors++; 606 607 goto do_reset; 608 } 609 610 if (gem_status & GREG_STAT_PCS) { 611 if (gem_pcs_interrupt(dev, gp, gem_status)) 612 goto do_reset; 613 } 614 615 if (gem_status & GREG_STAT_TXMAC) { 616 if (gem_txmac_interrupt(dev, gp, gem_status)) 617 goto do_reset; 618 } 619 620 if (gem_status & GREG_STAT_RXMAC) { 621 if (gem_rxmac_interrupt(dev, gp, gem_status)) 622 goto do_reset; 623 } 624 625 if (gem_status & GREG_STAT_MAC) { 626 if (gem_mac_interrupt(dev, gp, gem_status)) 627 goto do_reset; 628 } 629 630 if (gem_status & GREG_STAT_MIF) { 631 if (gem_mif_interrupt(dev, gp, gem_status)) 632 goto do_reset; 633 } 634 635 if (gem_status & GREG_STAT_PCIERR) { 636 if (gem_pci_interrupt(dev, gp, gem_status)) 637 goto do_reset; 638 } 639 640 return 0; 641 642do_reset: 643 gp->reset_task_pending = 1; 644 schedule_work(&gp->reset_task); 645 646 return 1; 647} 648 649static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) 650{ 651 int entry, limit; 652 653 if (netif_msg_intr(gp)) 654 printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n", 655 gp->dev->name, gem_status); 656 657 entry = gp->tx_old; 658 limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); 659 while (entry != limit) { 660 struct sk_buff *skb; 661 struct gem_txd *txd; 662 dma_addr_t dma_addr; 663 u32 dma_len; 664 int frag; 665 666 if (netif_msg_tx_done(gp)) 667 printk(KERN_DEBUG "%s: tx done, slot %d\n", 668 gp->dev->name, entry); 669 skb = gp->tx_skbs[entry]; 670 if (skb_shinfo(skb)->nr_frags) { 671 int last = entry + skb_shinfo(skb)->nr_frags; 672 int walk = entry; 673 int incomplete = 0; 674 675 last &= (TX_RING_SIZE - 1); 676 for (;;) { 677 walk = NEXT_TX(walk); 678 if (walk == limit) 679 incomplete = 1; 680 if (walk == last) 681 break; 682 } 683 if (incomplete) 684 break; 685 } 686 gp->tx_skbs[entry] = NULL; 687 dev->stats.tx_bytes += skb->len; 688 689 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 690 txd = &gp->init_block->txd[entry]; 691 692 dma_addr = le64_to_cpu(txd->buffer); 693 dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; 694 695 pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); 696 entry = NEXT_TX(entry); 697 } 698 699 dev->stats.tx_packets++; 700 dev_kfree_skb_irq(skb); 701 } 702 gp->tx_old = entry; 703 704 if (netif_queue_stopped(dev) && 705 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) 706 netif_wake_queue(dev); 707} 708 709static __inline__ void gem_post_rxds(struct gem *gp, int limit) 710{ 711 int cluster_start, curr, count, kick; 712 713 cluster_start = curr = (gp->rx_new & ~(4 - 1)); 714 count = 0; 715 kick = -1; 716 wmb(); 717 while (curr != limit) { 718 curr = NEXT_RX(curr); 719 if (++count == 4) { 720 struct gem_rxd *rxd = 721 &gp->init_block->rxd[cluster_start]; 722 for (;;) { 723 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 724 rxd++; 725 cluster_start = NEXT_RX(cluster_start); 726 if (cluster_start == curr) 727 break; 728 } 729 kick = curr; 730 count = 0; 731 } 732 } 733 if (kick >= 0) { 734 mb(); 735 writel(kick, gp->regs + RXDMA_KICK); 736 } 737} 738 739static int gem_rx(struct gem *gp, int work_to_do) 740{ 741 struct net_device *dev = gp->dev; 742 int entry, drops, work_done = 0; 743 u32 done; 744 __sum16 csum; 745 746 if (netif_msg_rx_status(gp)) 747 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", 748 gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); 749 750 entry = gp->rx_new; 751 drops = 0; 752 done = readl(gp->regs + RXDMA_DONE); 753 for (;;) { 754 struct gem_rxd *rxd = &gp->init_block->rxd[entry]; 755 struct sk_buff *skb; 756 u64 status = le64_to_cpu(rxd->status_word); 757 dma_addr_t dma_addr; 758 int len; 759 760 if ((status & RXDCTRL_OWN) != 0) 761 break; 762 763 if (work_done >= RX_RING_SIZE || work_done >= work_to_do) 764 break; 765 766 /* When writing back RX descriptor, GEM writes status 767 * then buffer address, possibly in separate transactions. 768 * If we don't wait for the chip to write both, we could 769 * post a new buffer to this descriptor then have GEM spam 770 * on the buffer address. We sync on the RX completion 771 * register to prevent this from happening. 772 */ 773 if (entry == done) { 774 done = readl(gp->regs + RXDMA_DONE); 775 if (entry == done) 776 break; 777 } 778 779 /* We can now account for the work we're about to do */ 780 work_done++; 781 782 skb = gp->rx_skbs[entry]; 783 784 len = (status & RXDCTRL_BUFSZ) >> 16; 785 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { 786 dev->stats.rx_errors++; 787 if (len < ETH_ZLEN) 788 dev->stats.rx_length_errors++; 789 if (len & RXDCTRL_BAD) 790 dev->stats.rx_crc_errors++; 791 792 /* We'll just return it to GEM. */ 793 drop_it: 794 dev->stats.rx_dropped++; 795 goto next; 796 } 797 798 dma_addr = le64_to_cpu(rxd->buffer); 799 if (len > RX_COPY_THRESHOLD) { 800 struct sk_buff *new_skb; 801 802 new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 803 if (new_skb == NULL) { 804 drops++; 805 goto drop_it; 806 } 807 pci_unmap_page(gp->pdev, dma_addr, 808 RX_BUF_ALLOC_SIZE(gp), 809 PCI_DMA_FROMDEVICE); 810 gp->rx_skbs[entry] = new_skb; 811 new_skb->dev = gp->dev; 812 skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); 813 rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, 814 virt_to_page(new_skb->data), 815 offset_in_page(new_skb->data), 816 RX_BUF_ALLOC_SIZE(gp), 817 PCI_DMA_FROMDEVICE)); 818 skb_reserve(new_skb, RX_OFFSET); 819 820 /* Trim the original skb for the netif. */ 821 skb_trim(skb, len); 822 } else { 823 struct sk_buff *copy_skb = dev_alloc_skb(len + 2); 824 825 if (copy_skb == NULL) { 826 drops++; 827 goto drop_it; 828 } 829 830 skb_reserve(copy_skb, 2); 831 skb_put(copy_skb, len); 832 pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 833 skb_copy_from_linear_data(skb, copy_skb->data, len); 834 pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 835 836 /* We'll reuse the original ring buffer. */ 837 skb = copy_skb; 838 } 839 840 csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); 841 skb->csum = csum_unfold(csum); 842 skb->ip_summed = CHECKSUM_COMPLETE; 843 skb->protocol = eth_type_trans(skb, gp->dev); 844 845 netif_receive_skb(skb); 846 847 dev->stats.rx_packets++; 848 dev->stats.rx_bytes += len; 849 850 next: 851 entry = NEXT_RX(entry); 852 } 853 854 gem_post_rxds(gp, entry); 855 856 gp->rx_new = entry; 857 858 if (drops) 859 netdev_info(gp->dev, "Memory squeeze, deferring packet\n"); 860 861 return work_done; 862} 863 864static int gem_poll(struct napi_struct *napi, int budget) 865{ 866 struct gem *gp = container_of(napi, struct gem, napi); 867 struct net_device *dev = gp->dev; 868 unsigned long flags; 869 int work_done; 870 871 /* 872 * NAPI locking nightmare: See comment at head of driver 873 */ 874 spin_lock_irqsave(&gp->lock, flags); 875 876 work_done = 0; 877 do { 878 /* Handle anomalies */ 879 if (gp->status & GREG_STAT_ABNORMAL) { 880 if (gem_abnormal_irq(dev, gp, gp->status)) 881 break; 882 } 883 884 /* Run TX completion thread */ 885 spin_lock(&gp->tx_lock); 886 gem_tx(dev, gp, gp->status); 887 spin_unlock(&gp->tx_lock); 888 889 spin_unlock_irqrestore(&gp->lock, flags); 890 891 /* Run RX thread. We don't use any locking here, 892 * code willing to do bad things - like cleaning the 893 * rx ring - must call napi_disable(), which 894 * schedule_timeout()'s if polling is already disabled. 895 */ 896 work_done += gem_rx(gp, budget - work_done); 897 898 if (work_done >= budget) 899 return work_done; 900 901 spin_lock_irqsave(&gp->lock, flags); 902 903 gp->status = readl(gp->regs + GREG_STAT); 904 } while (gp->status & GREG_STAT_NAPI); 905 906 __napi_complete(napi); 907 gem_enable_ints(gp); 908 909 spin_unlock_irqrestore(&gp->lock, flags); 910 911 return work_done; 912} 913 914static irqreturn_t gem_interrupt(int irq, void *dev_id) 915{ 916 struct net_device *dev = dev_id; 917 struct gem *gp = netdev_priv(dev); 918 unsigned long flags; 919 920 /* Swallow interrupts when shutting the chip down, though 921 * that shouldn't happen, we should have done free_irq() at 922 * this point... 923 */ 924 if (!gp->running) 925 return IRQ_HANDLED; 926 927 spin_lock_irqsave(&gp->lock, flags); 928 929 if (napi_schedule_prep(&gp->napi)) { 930 u32 gem_status = readl(gp->regs + GREG_STAT); 931 932 if (gem_status == 0) { 933 napi_enable(&gp->napi); 934 spin_unlock_irqrestore(&gp->lock, flags); 935 return IRQ_NONE; 936 } 937 gp->status = gem_status; 938 gem_disable_ints(gp); 939 __napi_schedule(&gp->napi); 940 } 941 942 spin_unlock_irqrestore(&gp->lock, flags); 943 944 /* If polling was disabled at the time we received that 945 * interrupt, we may return IRQ_HANDLED here while we 946 * should return IRQ_NONE. No big deal... 947 */ 948 return IRQ_HANDLED; 949} 950 951#ifdef CONFIG_NET_POLL_CONTROLLER 952static void gem_poll_controller(struct net_device *dev) 953{ 954 /* gem_interrupt is safe to reentrance so no need 955 * to disable_irq here. 956 */ 957 gem_interrupt(dev->irq, dev); 958} 959#endif 960 961static void gem_tx_timeout(struct net_device *dev) 962{ 963 struct gem *gp = netdev_priv(dev); 964 965 netdev_err(dev, "transmit timed out, resetting\n"); 966 if (!gp->running) { 967 netdev_err(dev, "hrm.. hw not running !\n"); 968 return; 969 } 970 netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n", 971 readl(gp->regs + TXDMA_CFG), 972 readl(gp->regs + MAC_TXSTAT), 973 readl(gp->regs + MAC_TXCFG)); 974 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n", 975 readl(gp->regs + RXDMA_CFG), 976 readl(gp->regs + MAC_RXSTAT), 977 readl(gp->regs + MAC_RXCFG)); 978 979 spin_lock_irq(&gp->lock); 980 spin_lock(&gp->tx_lock); 981 982 gp->reset_task_pending = 1; 983 schedule_work(&gp->reset_task); 984 985 spin_unlock(&gp->tx_lock); 986 spin_unlock_irq(&gp->lock); 987} 988 989static __inline__ int gem_intme(int entry) 990{ 991 /* Algorithm: IRQ every 1/2 of descriptors. */ 992 if (!(entry & ((TX_RING_SIZE>>1)-1))) 993 return 1; 994 995 return 0; 996} 997 998static netdev_tx_t gem_start_xmit(struct sk_buff *skb, 999 struct net_device *dev) 1000{ 1001 struct gem *gp = netdev_priv(dev); 1002 int entry; 1003 u64 ctrl; 1004 unsigned long flags; 1005 1006 ctrl = 0; 1007 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1008 const u64 csum_start_off = skb_checksum_start_offset(skb); 1009 const u64 csum_stuff_off = csum_start_off + skb->csum_offset; 1010 1011 ctrl = (TXDCTRL_CENAB | 1012 (csum_start_off << 15) | 1013 (csum_stuff_off << 21)); 1014 } 1015 1016 if (!spin_trylock_irqsave(&gp->tx_lock, flags)) { 1017 /* Tell upper layer to requeue */ 1018 return NETDEV_TX_LOCKED; 1019 } 1020 /* We raced with gem_do_stop() */ 1021 if (!gp->running) { 1022 spin_unlock_irqrestore(&gp->tx_lock, flags); 1023 return NETDEV_TX_BUSY; 1024 } 1025 1026 /* This is a hard error, log it. */ 1027 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { 1028 netif_stop_queue(dev); 1029 spin_unlock_irqrestore(&gp->tx_lock, flags); 1030 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 1031 return NETDEV_TX_BUSY; 1032 } 1033 1034 entry = gp->tx_new; 1035 gp->tx_skbs[entry] = skb; 1036 1037 if (skb_shinfo(skb)->nr_frags == 0) { 1038 struct gem_txd *txd = &gp->init_block->txd[entry]; 1039 dma_addr_t mapping; 1040 u32 len; 1041 1042 len = skb->len; 1043 mapping = pci_map_page(gp->pdev, 1044 virt_to_page(skb->data), 1045 offset_in_page(skb->data), 1046 len, PCI_DMA_TODEVICE); 1047 ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; 1048 if (gem_intme(entry)) 1049 ctrl |= TXDCTRL_INTME; 1050 txd->buffer = cpu_to_le64(mapping); 1051 wmb(); 1052 txd->control_word = cpu_to_le64(ctrl); 1053 entry = NEXT_TX(entry); 1054 } else { 1055 struct gem_txd *txd; 1056 u32 first_len; 1057 u64 intme; 1058 dma_addr_t first_mapping; 1059 int frag, first_entry = entry; 1060 1061 intme = 0; 1062 if (gem_intme(entry)) 1063 intme |= TXDCTRL_INTME; 1064 1065 /* We must give this initial chunk to the device last. 1066 * Otherwise we could race with the device. 1067 */ 1068 first_len = skb_headlen(skb); 1069 first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), 1070 offset_in_page(skb->data), 1071 first_len, PCI_DMA_TODEVICE); 1072 entry = NEXT_TX(entry); 1073 1074 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 1075 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 1076 u32 len; 1077 dma_addr_t mapping; 1078 u64 this_ctrl; 1079 1080 len = this_frag->size; 1081 mapping = pci_map_page(gp->pdev, 1082 this_frag->page, 1083 this_frag->page_offset, 1084 len, PCI_DMA_TODEVICE); 1085 this_ctrl = ctrl; 1086 if (frag == skb_shinfo(skb)->nr_frags - 1) 1087 this_ctrl |= TXDCTRL_EOF; 1088 1089 txd = &gp->init_block->txd[entry]; 1090 txd->buffer = cpu_to_le64(mapping); 1091 wmb(); 1092 txd->control_word = cpu_to_le64(this_ctrl | len); 1093 1094 if (gem_intme(entry)) 1095 intme |= TXDCTRL_INTME; 1096 1097 entry = NEXT_TX(entry); 1098 } 1099 txd = &gp->init_block->txd[first_entry]; 1100 txd->buffer = cpu_to_le64(first_mapping); 1101 wmb(); 1102 txd->control_word = 1103 cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); 1104 } 1105 1106 gp->tx_new = entry; 1107 if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1)) 1108 netif_stop_queue(dev); 1109 1110 if (netif_msg_tx_queued(gp)) 1111 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", 1112 dev->name, entry, skb->len); 1113 mb(); 1114 writel(gp->tx_new, gp->regs + TXDMA_KICK); 1115 spin_unlock_irqrestore(&gp->tx_lock, flags); 1116 1117 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ 1118 1119 return NETDEV_TX_OK; 1120} 1121 1122static void gem_pcs_reset(struct gem *gp) 1123{ 1124 int limit; 1125 u32 val; 1126 1127 /* Reset PCS unit. */ 1128 val = readl(gp->regs + PCS_MIICTRL); 1129 val |= PCS_MIICTRL_RST; 1130 writel(val, gp->regs + PCS_MIICTRL); 1131 1132 limit = 32; 1133 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { 1134 udelay(100); 1135 if (limit-- <= 0) 1136 break; 1137 } 1138 if (limit < 0) 1139 netdev_warn(gp->dev, "PCS reset bit would not clear\n"); 1140} 1141 1142static void gem_pcs_reinit_adv(struct gem *gp) 1143{ 1144 u32 val; 1145 1146 /* Make sure PCS is disabled while changing advertisement 1147 * configuration. 1148 */ 1149 val = readl(gp->regs + PCS_CFG); 1150 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); 1151 writel(val, gp->regs + PCS_CFG); 1152 1153 /* Advertise all capabilities except asymmetric 1154 * pause. 1155 */ 1156 val = readl(gp->regs + PCS_MIIADV); 1157 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | 1158 PCS_MIIADV_SP | PCS_MIIADV_AP); 1159 writel(val, gp->regs + PCS_MIIADV); 1160 1161 /* Enable and restart auto-negotiation, disable wrapback/loopback, 1162 * and re-enable PCS. 1163 */ 1164 val = readl(gp->regs + PCS_MIICTRL); 1165 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); 1166 val &= ~PCS_MIICTRL_WB; 1167 writel(val, gp->regs + PCS_MIICTRL); 1168 1169 val = readl(gp->regs + PCS_CFG); 1170 val |= PCS_CFG_ENABLE; 1171 writel(val, gp->regs + PCS_CFG); 1172 1173 /* Make sure serialink loopback is off. The meaning 1174 * of this bit is logically inverted based upon whether 1175 * you are in Serialink or SERDES mode. 1176 */ 1177 val = readl(gp->regs + PCS_SCTRL); 1178 if (gp->phy_type == phy_serialink) 1179 val &= ~PCS_SCTRL_LOOP; 1180 else 1181 val |= PCS_SCTRL_LOOP; 1182 writel(val, gp->regs + PCS_SCTRL); 1183} 1184 1185#define STOP_TRIES 32 1186 1187/* Must be invoked under gp->lock and gp->tx_lock. */ 1188static void gem_reset(struct gem *gp) 1189{ 1190 int limit; 1191 u32 val; 1192 1193 /* Make sure we won't get any more interrupts */ 1194 writel(0xffffffff, gp->regs + GREG_IMASK); 1195 1196 /* Reset the chip */ 1197 writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST, 1198 gp->regs + GREG_SWRST); 1199 1200 limit = STOP_TRIES; 1201 1202 do { 1203 udelay(20); 1204 val = readl(gp->regs + GREG_SWRST); 1205 if (limit-- <= 0) 1206 break; 1207 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); 1208 1209 if (limit < 0) 1210 netdev_err(gp->dev, "SW reset is ghetto\n"); 1211 1212 if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) 1213 gem_pcs_reinit_adv(gp); 1214} 1215 1216/* Must be invoked under gp->lock and gp->tx_lock. */ 1217static void gem_start_dma(struct gem *gp) 1218{ 1219 u32 val; 1220 1221 /* We are ready to rock, turn everything on. */ 1222 val = readl(gp->regs + TXDMA_CFG); 1223 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); 1224 val = readl(gp->regs + RXDMA_CFG); 1225 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 1226 val = readl(gp->regs + MAC_TXCFG); 1227 writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); 1228 val = readl(gp->regs + MAC_RXCFG); 1229 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 1230 1231 (void) readl(gp->regs + MAC_RXCFG); 1232 udelay(100); 1233 1234 gem_enable_ints(gp); 1235 1236 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 1237} 1238 1239/* Must be invoked under gp->lock and gp->tx_lock. DMA won't be 1240 * actually stopped before about 4ms tho ... 1241 */ 1242static void gem_stop_dma(struct gem *gp) 1243{ 1244 u32 val; 1245 1246 /* We are done rocking, turn everything off. */ 1247 val = readl(gp->regs + TXDMA_CFG); 1248 writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); 1249 val = readl(gp->regs + RXDMA_CFG); 1250 writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 1251 val = readl(gp->regs + MAC_TXCFG); 1252 writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); 1253 val = readl(gp->regs + MAC_RXCFG); 1254 writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 1255 1256 (void) readl(gp->regs + MAC_RXCFG); 1257 1258 /* Need to wait a bit ... done by the caller */ 1259} 1260 1261 1262/* Must be invoked under gp->lock and gp->tx_lock. */ 1263// XXX dbl check what that function should do when called on PCS PHY 1264static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) 1265{ 1266 u32 advertise, features; 1267 int autoneg; 1268 int speed; 1269 int duplex; 1270 1271 if (gp->phy_type != phy_mii_mdio0 && 1272 gp->phy_type != phy_mii_mdio1) 1273 goto non_mii; 1274 1275 /* Setup advertise */ 1276 if (found_mii_phy(gp)) 1277 features = gp->phy_mii.def->features; 1278 else 1279 features = 0; 1280 1281 advertise = features & ADVERTISE_MASK; 1282 if (gp->phy_mii.advertising != 0) 1283 advertise &= gp->phy_mii.advertising; 1284 1285 autoneg = gp->want_autoneg; 1286 speed = gp->phy_mii.speed; 1287 duplex = gp->phy_mii.duplex; 1288 1289 /* Setup link parameters */ 1290 if (!ep) 1291 goto start_aneg; 1292 if (ep->autoneg == AUTONEG_ENABLE) { 1293 advertise = ep->advertising; 1294 autoneg = 1; 1295 } else { 1296 autoneg = 0; 1297 speed = ep->speed; 1298 duplex = ep->duplex; 1299 } 1300 1301start_aneg: 1302 /* Sanitize settings based on PHY capabilities */ 1303 if ((features & SUPPORTED_Autoneg) == 0) 1304 autoneg = 0; 1305 if (speed == SPEED_1000 && 1306 !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full))) 1307 speed = SPEED_100; 1308 if (speed == SPEED_100 && 1309 !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full))) 1310 speed = SPEED_10; 1311 if (duplex == DUPLEX_FULL && 1312 !(features & (SUPPORTED_1000baseT_Full | 1313 SUPPORTED_100baseT_Full | 1314 SUPPORTED_10baseT_Full))) 1315 duplex = DUPLEX_HALF; 1316 if (speed == 0) 1317 speed = SPEED_10; 1318 1319 /* If we are asleep, we don't try to actually setup the PHY, we 1320 * just store the settings 1321 */ 1322 if (gp->asleep) { 1323 gp->phy_mii.autoneg = gp->want_autoneg = autoneg; 1324 gp->phy_mii.speed = speed; 1325 gp->phy_mii.duplex = duplex; 1326 return; 1327 } 1328 1329 /* Configure PHY & start aneg */ 1330 gp->want_autoneg = autoneg; 1331 if (autoneg) { 1332 if (found_mii_phy(gp)) 1333 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise); 1334 gp->lstate = link_aneg; 1335 } else { 1336 if (found_mii_phy(gp)) 1337 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex); 1338 gp->lstate = link_force_ok; 1339 } 1340 1341non_mii: 1342 gp->timer_ticks = 0; 1343 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 1344} 1345 1346/* A link-up condition has occurred, initialize and enable the 1347 * rest of the chip. 1348 * 1349 * Must be invoked under gp->lock and gp->tx_lock. 1350 */ 1351static int gem_set_link_modes(struct gem *gp) 1352{ 1353 u32 val; 1354 int full_duplex, speed, pause; 1355 1356 full_duplex = 0; 1357 speed = SPEED_10; 1358 pause = 0; 1359 1360 if (found_mii_phy(gp)) { 1361 if (gp->phy_mii.def->ops->read_link(&gp->phy_mii)) 1362 return 1; 1363 full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL); 1364 speed = gp->phy_mii.speed; 1365 pause = gp->phy_mii.pause; 1366 } else if (gp->phy_type == phy_serialink || 1367 gp->phy_type == phy_serdes) { 1368 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1369 1370 if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes) 1371 full_duplex = 1; 1372 speed = SPEED_1000; 1373 } 1374 1375 netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n", 1376 speed, (full_duplex ? "full" : "half")); 1377 1378 if (!gp->running) 1379 return 0; 1380 1381 val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); 1382 if (full_duplex) { 1383 val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); 1384 } else { 1385 /* MAC_TXCFG_NBO must be zero. */ 1386 } 1387 writel(val, gp->regs + MAC_TXCFG); 1388 1389 val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); 1390 if (!full_duplex && 1391 (gp->phy_type == phy_mii_mdio0 || 1392 gp->phy_type == phy_mii_mdio1)) { 1393 val |= MAC_XIFCFG_DISE; 1394 } else if (full_duplex) { 1395 val |= MAC_XIFCFG_FLED; 1396 } 1397 1398 if (speed == SPEED_1000) 1399 val |= (MAC_XIFCFG_GMII); 1400 1401 writel(val, gp->regs + MAC_XIFCFG); 1402 1403 /* If gigabit and half-duplex, enable carrier extension 1404 * mode. Else, disable it. 1405 */ 1406 if (speed == SPEED_1000 && !full_duplex) { 1407 val = readl(gp->regs + MAC_TXCFG); 1408 writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); 1409 1410 val = readl(gp->regs + MAC_RXCFG); 1411 writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); 1412 } else { 1413 val = readl(gp->regs + MAC_TXCFG); 1414 writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); 1415 1416 val = readl(gp->regs + MAC_RXCFG); 1417 writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); 1418 } 1419 1420 if (gp->phy_type == phy_serialink || 1421 gp->phy_type == phy_serdes) { 1422 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1423 1424 if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) 1425 pause = 1; 1426 } 1427 1428 if (netif_msg_link(gp)) { 1429 if (pause) { 1430 netdev_info(gp->dev, 1431 "Pause is enabled (rxfifo: %d off: %d on: %d)\n", 1432 gp->rx_fifo_sz, 1433 gp->rx_pause_off, 1434 gp->rx_pause_on); 1435 } else { 1436 netdev_info(gp->dev, "Pause is disabled\n"); 1437 } 1438 } 1439 1440 if (!full_duplex) 1441 writel(512, gp->regs + MAC_STIME); 1442 else 1443 writel(64, gp->regs + MAC_STIME); 1444 val = readl(gp->regs + MAC_MCCFG); 1445 if (pause) 1446 val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); 1447 else 1448 val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); 1449 writel(val, gp->regs + MAC_MCCFG); 1450 1451 gem_start_dma(gp); 1452 1453 return 0; 1454} 1455 1456/* Must be invoked under gp->lock and gp->tx_lock. */ 1457static int gem_mdio_link_not_up(struct gem *gp) 1458{ 1459 switch (gp->lstate) { 1460 case link_force_ret: 1461 netif_info(gp, link, gp->dev, 1462 "Autoneg failed again, keeping forced mode\n"); 1463 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, 1464 gp->last_forced_speed, DUPLEX_HALF); 1465 gp->timer_ticks = 5; 1466 gp->lstate = link_force_ok; 1467 return 0; 1468 case link_aneg: 1469 /* We try forced modes after a failed aneg only on PHYs that don't 1470 * have "magic_aneg" bit set, which means they internally do the 1471 * while forced-mode thingy. On these, we just restart aneg 1472 */ 1473 if (gp->phy_mii.def->magic_aneg) 1474 return 1; 1475 netif_info(gp, link, gp->dev, "switching to forced 100bt\n"); 1476 /* Try forced modes. */ 1477 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, 1478 DUPLEX_HALF); 1479 gp->timer_ticks = 5; 1480 gp->lstate = link_force_try; 1481 return 0; 1482 case link_force_try: 1483 /* Downgrade from 100 to 10 Mbps if necessary. 1484 * If already at 10Mbps, warn user about the 1485 * situation every 10 ticks. 1486 */ 1487 if (gp->phy_mii.speed == SPEED_100) { 1488 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, 1489 DUPLEX_HALF); 1490 gp->timer_ticks = 5; 1491 netif_info(gp, link, gp->dev, 1492 "switching to forced 10bt\n"); 1493 return 0; 1494 } else 1495 return 1; 1496 default: 1497 return 0; 1498 } 1499} 1500 1501static void gem_link_timer(unsigned long data) 1502{ 1503 struct gem *gp = (struct gem *) data; 1504 int restart_aneg = 0; 1505 1506 if (gp->asleep) 1507 return; 1508 1509 spin_lock_irq(&gp->lock); 1510 spin_lock(&gp->tx_lock); 1511 gem_get_cell(gp); 1512 1513 /* If the reset task is still pending, we just 1514 * reschedule the link timer 1515 */ 1516 if (gp->reset_task_pending) 1517 goto restart; 1518 1519 if (gp->phy_type == phy_serialink || 1520 gp->phy_type == phy_serdes) { 1521 u32 val = readl(gp->regs + PCS_MIISTAT); 1522 1523 if (!(val & PCS_MIISTAT_LS)) 1524 val = readl(gp->regs + PCS_MIISTAT); 1525 1526 if ((val & PCS_MIISTAT_LS) != 0) { 1527 if (gp->lstate == link_up) 1528 goto restart; 1529 1530 gp->lstate = link_up; 1531 netif_carrier_on(gp->dev); 1532 (void)gem_set_link_modes(gp); 1533 } 1534 goto restart; 1535 } 1536 if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) { 1537 /* Ok, here we got a link. If we had it due to a forced 1538 * fallback, and we were configured for autoneg, we do 1539 * retry a short autoneg pass. If you know your hub is 1540 * broken, use ethtool ;) 1541 */ 1542 if (gp->lstate == link_force_try && gp->want_autoneg) { 1543 gp->lstate = link_force_ret; 1544 gp->last_forced_speed = gp->phy_mii.speed; 1545 gp->timer_ticks = 5; 1546 if (netif_msg_link(gp)) 1547 netdev_info(gp->dev, 1548 "Got link after fallback, retrying autoneg once...\n"); 1549 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); 1550 } else if (gp->lstate != link_up) { 1551 gp->lstate = link_up; 1552 netif_carrier_on(gp->dev); 1553 if (gem_set_link_modes(gp)) 1554 restart_aneg = 1; 1555 } 1556 } else { 1557 /* If the link was previously up, we restart the 1558 * whole process 1559 */ 1560 if (gp->lstate == link_up) { 1561 gp->lstate = link_down; 1562 netif_info(gp, link, gp->dev, "Link down\n"); 1563 netif_carrier_off(gp->dev); 1564 gp->reset_task_pending = 1; 1565 schedule_work(&gp->reset_task); 1566 restart_aneg = 1; 1567 } else if (++gp->timer_ticks > 10) { 1568 if (found_mii_phy(gp)) 1569 restart_aneg = gem_mdio_link_not_up(gp); 1570 else 1571 restart_aneg = 1; 1572 } 1573 } 1574 if (restart_aneg) { 1575 gem_begin_auto_negotiation(gp, NULL); 1576 goto out_unlock; 1577 } 1578restart: 1579 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 1580out_unlock: 1581 gem_put_cell(gp); 1582 spin_unlock(&gp->tx_lock); 1583 spin_unlock_irq(&gp->lock); 1584} 1585 1586/* Must be invoked under gp->lock and gp->tx_lock. */ 1587static void gem_clean_rings(struct gem *gp) 1588{ 1589 struct gem_init_block *gb = gp->init_block; 1590 struct sk_buff *skb; 1591 int i; 1592 dma_addr_t dma_addr; 1593 1594 for (i = 0; i < RX_RING_SIZE; i++) { 1595 struct gem_rxd *rxd; 1596 1597 rxd = &gb->rxd[i]; 1598 if (gp->rx_skbs[i] != NULL) { 1599 skb = gp->rx_skbs[i]; 1600 dma_addr = le64_to_cpu(rxd->buffer); 1601 pci_unmap_page(gp->pdev, dma_addr, 1602 RX_BUF_ALLOC_SIZE(gp), 1603 PCI_DMA_FROMDEVICE); 1604 dev_kfree_skb_any(skb); 1605 gp->rx_skbs[i] = NULL; 1606 } 1607 rxd->status_word = 0; 1608 wmb(); 1609 rxd->buffer = 0; 1610 } 1611 1612 for (i = 0; i < TX_RING_SIZE; i++) { 1613 if (gp->tx_skbs[i] != NULL) { 1614 struct gem_txd *txd; 1615 int frag; 1616 1617 skb = gp->tx_skbs[i]; 1618 gp->tx_skbs[i] = NULL; 1619 1620 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 1621 int ent = i & (TX_RING_SIZE - 1); 1622 1623 txd = &gb->txd[ent]; 1624 dma_addr = le64_to_cpu(txd->buffer); 1625 pci_unmap_page(gp->pdev, dma_addr, 1626 le64_to_cpu(txd->control_word) & 1627 TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); 1628 1629 if (frag != skb_shinfo(skb)->nr_frags) 1630 i++; 1631 } 1632 dev_kfree_skb_any(skb); 1633 } 1634 } 1635} 1636 1637/* Must be invoked under gp->lock and gp->tx_lock. */ 1638static void gem_init_rings(struct gem *gp) 1639{ 1640 struct gem_init_block *gb = gp->init_block; 1641 struct net_device *dev = gp->dev; 1642 int i; 1643 dma_addr_t dma_addr; 1644 1645 gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; 1646 1647 gem_clean_rings(gp); 1648 1649 gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, 1650 (unsigned)VLAN_ETH_FRAME_LEN); 1651 1652 for (i = 0; i < RX_RING_SIZE; i++) { 1653 struct sk_buff *skb; 1654 struct gem_rxd *rxd = &gb->rxd[i]; 1655 1656 skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 1657 if (!skb) { 1658 rxd->buffer = 0; 1659 rxd->status_word = 0; 1660 continue; 1661 } 1662 1663 gp->rx_skbs[i] = skb; 1664 skb->dev = dev; 1665 skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); 1666 dma_addr = pci_map_page(gp->pdev, 1667 virt_to_page(skb->data), 1668 offset_in_page(skb->data), 1669 RX_BUF_ALLOC_SIZE(gp), 1670 PCI_DMA_FROMDEVICE); 1671 rxd->buffer = cpu_to_le64(dma_addr); 1672 wmb(); 1673 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 1674 skb_reserve(skb, RX_OFFSET); 1675 } 1676 1677 for (i = 0; i < TX_RING_SIZE; i++) { 1678 struct gem_txd *txd = &gb->txd[i]; 1679 1680 txd->control_word = 0; 1681 wmb(); 1682 txd->buffer = 0; 1683 } 1684 wmb(); 1685} 1686 1687/* Init PHY interface and start link poll state machine */ 1688static void gem_init_phy(struct gem *gp) 1689{ 1690 u32 mifcfg; 1691 1692 /* Revert MIF CFG setting done on stop_phy */ 1693 mifcfg = readl(gp->regs + MIF_CFG); 1694 mifcfg &= ~MIF_CFG_BBMODE; 1695 writel(mifcfg, gp->regs + MIF_CFG); 1696 1697 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { 1698 int i; 1699 1700 /* Those delay sucks, the HW seem to love them though, I'll 1701 * serisouly consider breaking some locks here to be able 1702 * to schedule instead 1703 */ 1704 for (i = 0; i < 3; i++) { 1705#ifdef CONFIG_PPC_PMAC 1706 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); 1707 msleep(20); 1708#endif 1709 /* Some PHYs used by apple have problem getting back to us, 1710 * we do an additional reset here 1711 */ 1712 phy_write(gp, MII_BMCR, BMCR_RESET); 1713 msleep(20); 1714 if (phy_read(gp, MII_BMCR) != 0xffff) 1715 break; 1716 if (i == 2) 1717 netdev_warn(gp->dev, "GMAC PHY not responding !\n"); 1718 } 1719 } 1720 1721 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 1722 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 1723 u32 val; 1724 1725 /* Init datapath mode register. */ 1726 if (gp->phy_type == phy_mii_mdio0 || 1727 gp->phy_type == phy_mii_mdio1) { 1728 val = PCS_DMODE_MGM; 1729 } else if (gp->phy_type == phy_serialink) { 1730 val = PCS_DMODE_SM | PCS_DMODE_GMOE; 1731 } else { 1732 val = PCS_DMODE_ESM; 1733 } 1734 1735 writel(val, gp->regs + PCS_DMODE); 1736 } 1737 1738 if (gp->phy_type == phy_mii_mdio0 || 1739 gp->phy_type == phy_mii_mdio1) { 1740 // XXX check for errors 1741 mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr); 1742 1743 /* Init PHY */ 1744 if (gp->phy_mii.def && gp->phy_mii.def->ops->init) 1745 gp->phy_mii.def->ops->init(&gp->phy_mii); 1746 } else { 1747 gem_pcs_reset(gp); 1748 gem_pcs_reinit_adv(gp); 1749 } 1750 1751 /* Default aneg parameters */ 1752 gp->timer_ticks = 0; 1753 gp->lstate = link_down; 1754 netif_carrier_off(gp->dev); 1755 1756 /* Can I advertise gigabit here ? I'd need BCM PHY docs... */ 1757 spin_lock_irq(&gp->lock); 1758 gem_begin_auto_negotiation(gp, NULL); 1759 spin_unlock_irq(&gp->lock); 1760} 1761 1762/* Must be invoked under gp->lock and gp->tx_lock. */ 1763static void gem_init_dma(struct gem *gp) 1764{ 1765 u64 desc_dma = (u64) gp->gblock_dvma; 1766 u32 val; 1767 1768 val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE); 1769 writel(val, gp->regs + TXDMA_CFG); 1770 1771 writel(desc_dma >> 32, gp->regs + TXDMA_DBHI); 1772 writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW); 1773 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); 1774 1775 writel(0, gp->regs + TXDMA_KICK); 1776 1777 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 1778 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 1779 writel(val, gp->regs + RXDMA_CFG); 1780 1781 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 1782 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 1783 1784 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 1785 1786 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); 1787 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); 1788 writel(val, gp->regs + RXDMA_PTHRESH); 1789 1790 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 1791 writel(((5 & RXDMA_BLANK_IPKTS) | 1792 ((8 << 12) & RXDMA_BLANK_ITIME)), 1793 gp->regs + RXDMA_BLANK); 1794 else 1795 writel(((5 & RXDMA_BLANK_IPKTS) | 1796 ((4 << 12) & RXDMA_BLANK_ITIME)), 1797 gp->regs + RXDMA_BLANK); 1798} 1799 1800/* Must be invoked under gp->lock and gp->tx_lock. */ 1801static u32 gem_setup_multicast(struct gem *gp) 1802{ 1803 u32 rxcfg = 0; 1804 int i; 1805 1806 if ((gp->dev->flags & IFF_ALLMULTI) || 1807 (netdev_mc_count(gp->dev) > 256)) { 1808 for (i=0; i<16; i++) 1809 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2)); 1810 rxcfg |= MAC_RXCFG_HFE; 1811 } else if (gp->dev->flags & IFF_PROMISC) { 1812 rxcfg |= MAC_RXCFG_PROM; 1813 } else { 1814 u16 hash_table[16]; 1815 u32 crc; 1816 struct netdev_hw_addr *ha; 1817 int i; 1818 1819 memset(hash_table, 0, sizeof(hash_table)); 1820 netdev_for_each_mc_addr(ha, gp->dev) { 1821 char *addrs = ha->addr; 1822 1823 if (!(*addrs & 1)) 1824 continue; 1825 1826 crc = ether_crc_le(6, addrs); 1827 crc >>= 24; 1828 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 1829 } 1830 for (i=0; i<16; i++) 1831 writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2)); 1832 rxcfg |= MAC_RXCFG_HFE; 1833 } 1834 1835 return rxcfg; 1836} 1837 1838/* Must be invoked under gp->lock and gp->tx_lock. */ 1839static void gem_init_mac(struct gem *gp) 1840{ 1841 unsigned char *e = &gp->dev->dev_addr[0]; 1842 1843 writel(0x1bf0, gp->regs + MAC_SNDPAUSE); 1844 1845 writel(0x00, gp->regs + MAC_IPG0); 1846 writel(0x08, gp->regs + MAC_IPG1); 1847 writel(0x04, gp->regs + MAC_IPG2); 1848 writel(0x40, gp->regs + MAC_STIME); 1849 writel(0x40, gp->regs + MAC_MINFSZ); 1850 1851 /* Ethernet payload + header + FCS + optional VLAN tag. */ 1852 writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ); 1853 1854 writel(0x07, gp->regs + MAC_PASIZE); 1855 writel(0x04, gp->regs + MAC_JAMSIZE); 1856 writel(0x10, gp->regs + MAC_ATTLIM); 1857 writel(0x8808, gp->regs + MAC_MCTYPE); 1858 1859 writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED); 1860 1861 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); 1862 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); 1863 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); 1864 1865 writel(0, gp->regs + MAC_ADDR3); 1866 writel(0, gp->regs + MAC_ADDR4); 1867 writel(0, gp->regs + MAC_ADDR5); 1868 1869 writel(0x0001, gp->regs + MAC_ADDR6); 1870 writel(0xc200, gp->regs + MAC_ADDR7); 1871 writel(0x0180, gp->regs + MAC_ADDR8); 1872 1873 writel(0, gp->regs + MAC_AFILT0); 1874 writel(0, gp->regs + MAC_AFILT1); 1875 writel(0, gp->regs + MAC_AFILT2); 1876 writel(0, gp->regs + MAC_AF21MSK); 1877 writel(0, gp->regs + MAC_AF0MSK); 1878 1879 gp->mac_rx_cfg = gem_setup_multicast(gp); 1880#ifdef STRIP_FCS 1881 gp->mac_rx_cfg |= MAC_RXCFG_SFCS; 1882#endif 1883 writel(0, gp->regs + MAC_NCOLL); 1884 writel(0, gp->regs + MAC_FASUCC); 1885 writel(0, gp->regs + MAC_ECOLL); 1886 writel(0, gp->regs + MAC_LCOLL); 1887 writel(0, gp->regs + MAC_DTIMER); 1888 writel(0, gp->regs + MAC_PATMPS); 1889 writel(0, gp->regs + MAC_RFCTR); 1890 writel(0, gp->regs + MAC_LERR); 1891 writel(0, gp->regs + MAC_AERR); 1892 writel(0, gp->regs + MAC_FCSERR); 1893 writel(0, gp->regs + MAC_RXCVERR); 1894 1895 /* Clear RX/TX/MAC/XIF config, we will set these up and enable 1896 * them once a link is established. 1897 */ 1898 writel(0, gp->regs + MAC_TXCFG); 1899 writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG); 1900 writel(0, gp->regs + MAC_MCCFG); 1901 writel(0, gp->regs + MAC_XIFCFG); 1902 1903 /* Setup MAC interrupts. We want to get all of the interesting 1904 * counter expiration events, but we do not want to hear about 1905 * normal rx/tx as the DMA engine tells us that. 1906 */ 1907 writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK); 1908 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); 1909 1910 /* Don't enable even the PAUSE interrupts for now, we 1911 * make no use of those events other than to record them. 1912 */ 1913 writel(0xffffffff, gp->regs + MAC_MCMASK); 1914 1915 /* Don't enable GEM's WOL in normal operations 1916 */ 1917 if (gp->has_wol) 1918 writel(0, gp->regs + WOL_WAKECSR); 1919} 1920 1921/* Must be invoked under gp->lock and gp->tx_lock. */ 1922static void gem_init_pause_thresholds(struct gem *gp) 1923{ 1924 u32 cfg; 1925 1926 /* Calculate pause thresholds. Setting the OFF threshold to the 1927 * full RX fifo size effectively disables PAUSE generation which 1928 * is what we do for 10/100 only GEMs which have FIFOs too small 1929 * to make real gains from PAUSE. 1930 */ 1931 if (gp->rx_fifo_sz <= (2 * 1024)) { 1932 gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; 1933 } else { 1934 int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; 1935 int off = (gp->rx_fifo_sz - (max_frame * 2)); 1936 int on = off - max_frame; 1937 1938 gp->rx_pause_off = off; 1939 gp->rx_pause_on = on; 1940 } 1941 1942 1943 /* Configure the chip "burst" DMA mode & enable some 1944 * HW bug fixes on Apple version 1945 */ 1946 cfg = 0; 1947 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) 1948 cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX; 1949#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) 1950 cfg |= GREG_CFG_IBURST; 1951#endif 1952 cfg |= ((31 << 1) & GREG_CFG_TXDMALIM); 1953 cfg |= ((31 << 6) & GREG_CFG_RXDMALIM); 1954 writel(cfg, gp->regs + GREG_CFG); 1955 1956 /* If Infinite Burst didn't stick, then use different 1957 * thresholds (and Apple bug fixes don't exist) 1958 */ 1959 if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) { 1960 cfg = ((2 << 1) & GREG_CFG_TXDMALIM); 1961 cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); 1962 writel(cfg, gp->regs + GREG_CFG); 1963 } 1964} 1965 1966static int gem_check_invariants(struct gem *gp) 1967{ 1968 struct pci_dev *pdev = gp->pdev; 1969 u32 mif_cfg; 1970 1971 /* On Apple's sungem, we can't rely on registers as the chip 1972 * was been powered down by the firmware. The PHY is looked 1973 * up later on. 1974 */ 1975 if (pdev->vendor == PCI_VENDOR_ID_APPLE) { 1976 gp->phy_type = phy_mii_mdio0; 1977 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; 1978 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; 1979 gp->swrst_base = 0; 1980 1981 mif_cfg = readl(gp->regs + MIF_CFG); 1982 mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); 1983 mif_cfg |= MIF_CFG_MDI0; 1984 writel(mif_cfg, gp->regs + MIF_CFG); 1985 writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE); 1986 writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG); 1987 1988 /* We hard-code the PHY address so we can properly bring it out of 1989 * reset later on, we can't really probe it at this point, though 1990 * that isn't an issue. 1991 */ 1992 if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC) 1993 gp->mii_phy_addr = 1; 1994 else 1995 gp->mii_phy_addr = 0; 1996 1997 return 0; 1998 } 1999 2000 mif_cfg = readl(gp->regs + MIF_CFG); 2001 2002 if (pdev->vendor == PCI_VENDOR_ID_SUN && 2003 pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) { 2004 /* One of the MII PHYs _must_ be present 2005 * as this chip has no gigabit PHY. 2006 */ 2007 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { 2008 pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n", 2009 mif_cfg); 2010 return -1; 2011 } 2012 } 2013 2014 /* Determine initial PHY interface type guess. MDIO1 is the 2015 * external PHY and thus takes precedence over MDIO0. 2016 */ 2017 2018 if (mif_cfg & MIF_CFG_MDI1) { 2019 gp->phy_type = phy_mii_mdio1; 2020 mif_cfg |= MIF_CFG_PSELECT; 2021 writel(mif_cfg, gp->regs + MIF_CFG); 2022 } else if (mif_cfg & MIF_CFG_MDI0) { 2023 gp->phy_type = phy_mii_mdio0; 2024 mif_cfg &= ~MIF_CFG_PSELECT; 2025 writel(mif_cfg, gp->regs + MIF_CFG); 2026 } else { 2027#ifdef CONFIG_SPARC 2028 const char *p; 2029 2030 p = of_get_property(gp->of_node, "shared-pins", NULL); 2031 if (p && !strcmp(p, "serdes")) 2032 gp->phy_type = phy_serdes; 2033 else 2034#endif 2035 gp->phy_type = phy_serialink; 2036 } 2037 if (gp->phy_type == phy_mii_mdio1 || 2038 gp->phy_type == phy_mii_mdio0) { 2039 int i; 2040 2041 for (i = 0; i < 32; i++) { 2042 gp->mii_phy_addr = i; 2043 if (phy_read(gp, MII_BMCR) != 0xffff) 2044 break; 2045 } 2046 if (i == 32) { 2047 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { 2048 pr_err("RIO MII phy will not respond\n"); 2049 return -1; 2050 } 2051 gp->phy_type = phy_serdes; 2052 } 2053 } 2054 2055 /* Fetch the FIFO configurations now too. */ 2056 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; 2057 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; 2058 2059 if (pdev->vendor == PCI_VENDOR_ID_SUN) { 2060 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { 2061 if (gp->tx_fifo_sz != (9 * 1024) || 2062 gp->rx_fifo_sz != (20 * 1024)) { 2063 pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2064 gp->tx_fifo_sz, gp->rx_fifo_sz); 2065 return -1; 2066 } 2067 gp->swrst_base = 0; 2068 } else { 2069 if (gp->tx_fifo_sz != (2 * 1024) || 2070 gp->rx_fifo_sz != (2 * 1024)) { 2071 pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2072 gp->tx_fifo_sz, gp->rx_fifo_sz); 2073 return -1; 2074 } 2075 gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT; 2076 } 2077 } 2078 2079 return 0; 2080} 2081 2082/* Must be invoked under gp->lock and gp->tx_lock. */ 2083static void gem_reinit_chip(struct gem *gp) 2084{ 2085 /* Reset the chip */ 2086 gem_reset(gp); 2087 2088 /* Make sure ints are disabled */ 2089 gem_disable_ints(gp); 2090 2091 /* Allocate & setup ring buffers */ 2092 gem_init_rings(gp); 2093 2094 /* Configure pause thresholds */ 2095 gem_init_pause_thresholds(gp); 2096 2097 /* Init DMA & MAC engines */ 2098 gem_init_dma(gp); 2099 gem_init_mac(gp); 2100} 2101 2102 2103/* Must be invoked with no lock held. */ 2104static void gem_stop_phy(struct gem *gp, int wol) 2105{ 2106 u32 mifcfg; 2107 unsigned long flags; 2108 2109 /* Let the chip settle down a bit, it seems that helps 2110 * for sleep mode on some models 2111 */ 2112 msleep(10); 2113 2114 /* Make sure we aren't polling PHY status change. We 2115 * don't currently use that feature though 2116 */ 2117 mifcfg = readl(gp->regs + MIF_CFG); 2118 mifcfg &= ~MIF_CFG_POLL; 2119 writel(mifcfg, gp->regs + MIF_CFG); 2120 2121 if (wol && gp->has_wol) { 2122 unsigned char *e = &gp->dev->dev_addr[0]; 2123 u32 csr; 2124 2125 /* Setup wake-on-lan for MAGIC packet */ 2126 writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, 2127 gp->regs + MAC_RXCFG); 2128 writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0); 2129 writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1); 2130 writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2); 2131 2132 writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT); 2133 csr = WOL_WAKECSR_ENABLE; 2134 if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0) 2135 csr |= WOL_WAKECSR_MII; 2136 writel(csr, gp->regs + WOL_WAKECSR); 2137 } else { 2138 writel(0, gp->regs + MAC_RXCFG); 2139 (void)readl(gp->regs + MAC_RXCFG); 2140 /* Machine sleep will die in strange ways if we 2141 * dont wait a bit here, looks like the chip takes 2142 * some time to really shut down 2143 */ 2144 msleep(10); 2145 } 2146 2147 writel(0, gp->regs + MAC_TXCFG); 2148 writel(0, gp->regs + MAC_XIFCFG); 2149 writel(0, gp->regs + TXDMA_CFG); 2150 writel(0, gp->regs + RXDMA_CFG); 2151 2152 if (!wol) { 2153 spin_lock_irqsave(&gp->lock, flags); 2154 spin_lock(&gp->tx_lock); 2155 gem_reset(gp); 2156 writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); 2157 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); 2158 spin_unlock(&gp->tx_lock); 2159 spin_unlock_irqrestore(&gp->lock, flags); 2160 2161 /* No need to take the lock here */ 2162 2163 if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) 2164 gp->phy_mii.def->ops->suspend(&gp->phy_mii); 2165 2166 /* According to Apple, we must set the MDIO pins to this begnign 2167 * state or we may 1) eat more current, 2) damage some PHYs 2168 */ 2169 writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); 2170 writel(0, gp->regs + MIF_BBCLK); 2171 writel(0, gp->regs + MIF_BBDATA); 2172 writel(0, gp->regs + MIF_BBOENAB); 2173 writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG); 2174 (void) readl(gp->regs + MAC_XIFCFG); 2175 } 2176} 2177 2178 2179static int gem_do_start(struct net_device *dev) 2180{ 2181 struct gem *gp = netdev_priv(dev); 2182 unsigned long flags; 2183 2184 spin_lock_irqsave(&gp->lock, flags); 2185 spin_lock(&gp->tx_lock); 2186 2187 /* Enable the cell */ 2188 gem_get_cell(gp); 2189 2190 /* Init & setup chip hardware */ 2191 gem_reinit_chip(gp); 2192 2193 gp->running = 1; 2194 2195 napi_enable(&gp->napi); 2196 2197 if (gp->lstate == link_up) { 2198 netif_carrier_on(gp->dev); 2199 gem_set_link_modes(gp); 2200 } 2201 2202 netif_wake_queue(gp->dev); 2203 2204 spin_unlock(&gp->tx_lock); 2205 spin_unlock_irqrestore(&gp->lock, flags); 2206 2207 if (request_irq(gp->pdev->irq, gem_interrupt, 2208 IRQF_SHARED, dev->name, (void *)dev)) { 2209 netdev_err(dev, "failed to request irq !\n"); 2210 2211 spin_lock_irqsave(&gp->lock, flags); 2212 spin_lock(&gp->tx_lock); 2213 2214 napi_disable(&gp->napi); 2215 2216 gp->running = 0; 2217 gem_reset(gp); 2218 gem_clean_rings(gp); 2219 gem_put_cell(gp); 2220 2221 spin_unlock(&gp->tx_lock); 2222 spin_unlock_irqrestore(&gp->lock, flags); 2223 2224 return -EAGAIN; 2225 } 2226 2227 return 0; 2228} 2229 2230static void gem_do_stop(struct net_device *dev, int wol) 2231{ 2232 struct gem *gp = netdev_priv(dev); 2233 unsigned long flags; 2234 2235 spin_lock_irqsave(&gp->lock, flags); 2236 spin_lock(&gp->tx_lock); 2237 2238 gp->running = 0; 2239 2240 /* Stop netif queue */ 2241 netif_stop_queue(dev); 2242 2243 /* Make sure ints are disabled */ 2244 gem_disable_ints(gp); 2245 2246 /* We can drop the lock now */ 2247 spin_unlock(&gp->tx_lock); 2248 spin_unlock_irqrestore(&gp->lock, flags); 2249 2250 /* If we are going to sleep with WOL */ 2251 gem_stop_dma(gp); 2252 msleep(10); 2253 if (!wol) 2254 gem_reset(gp); 2255 msleep(10); 2256 2257 /* Get rid of rings */ 2258 gem_clean_rings(gp); 2259 2260 /* No irq needed anymore */ 2261 free_irq(gp->pdev->irq, (void *) dev); 2262 2263 /* Cell not needed neither if no WOL */ 2264 if (!wol) { 2265 spin_lock_irqsave(&gp->lock, flags); 2266 gem_put_cell(gp); 2267 spin_unlock_irqrestore(&gp->lock, flags); 2268 } 2269} 2270 2271static void gem_reset_task(struct work_struct *work) 2272{ 2273 struct gem *gp = container_of(work, struct gem, reset_task); 2274 2275 mutex_lock(&gp->pm_mutex); 2276 2277 if (gp->opened) 2278 napi_disable(&gp->napi); 2279 2280 spin_lock_irq(&gp->lock); 2281 spin_lock(&gp->tx_lock); 2282 2283 if (gp->running) { 2284 netif_stop_queue(gp->dev); 2285 2286 /* Reset the chip & rings */ 2287 gem_reinit_chip(gp); 2288 if (gp->lstate == link_up) 2289 gem_set_link_modes(gp); 2290 netif_wake_queue(gp->dev); 2291 } 2292 2293 gp->reset_task_pending = 0; 2294 2295 spin_unlock(&gp->tx_lock); 2296 spin_unlock_irq(&gp->lock); 2297 2298 if (gp->opened) 2299 napi_enable(&gp->napi); 2300 2301 mutex_unlock(&gp->pm_mutex); 2302} 2303 2304 2305static int gem_open(struct net_device *dev) 2306{ 2307 struct gem *gp = netdev_priv(dev); 2308 int rc = 0; 2309 2310 mutex_lock(&gp->pm_mutex); 2311 2312 /* We need the cell enabled */ 2313 if (!gp->asleep) 2314 rc = gem_do_start(dev); 2315 gp->opened = (rc == 0); 2316 2317 mutex_unlock(&gp->pm_mutex); 2318 2319 return rc; 2320} 2321 2322static int gem_close(struct net_device *dev) 2323{ 2324 struct gem *gp = netdev_priv(dev); 2325 2326 mutex_lock(&gp->pm_mutex); 2327 2328 napi_disable(&gp->napi); 2329 2330 gp->opened = 0; 2331 if (!gp->asleep) 2332 gem_do_stop(dev, 0); 2333 2334 mutex_unlock(&gp->pm_mutex); 2335 2336 return 0; 2337} 2338 2339#ifdef CONFIG_PM 2340static int gem_suspend(struct pci_dev *pdev, pm_message_t state) 2341{ 2342 struct net_device *dev = pci_get_drvdata(pdev); 2343 struct gem *gp = netdev_priv(dev); 2344 unsigned long flags; 2345 2346 mutex_lock(&gp->pm_mutex); 2347 2348 netdev_info(dev, "suspending, WakeOnLan %s\n", 2349 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled"); 2350 2351 /* Keep the cell enabled during the entire operation */ 2352 spin_lock_irqsave(&gp->lock, flags); 2353 spin_lock(&gp->tx_lock); 2354 gem_get_cell(gp); 2355 spin_unlock(&gp->tx_lock); 2356 spin_unlock_irqrestore(&gp->lock, flags); 2357 2358 /* If the driver is opened, we stop the MAC */ 2359 if (gp->opened) { 2360 napi_disable(&gp->napi); 2361 2362 /* Stop traffic, mark us closed */ 2363 netif_device_detach(dev); 2364 2365 /* Switch off MAC, remember WOL setting */ 2366 gp->asleep_wol = gp->wake_on_lan; 2367 gem_do_stop(dev, gp->asleep_wol); 2368 } else 2369 gp->asleep_wol = 0; 2370 2371 /* Mark us asleep */ 2372 gp->asleep = 1; 2373 wmb(); 2374 2375 /* Stop the link timer */ 2376 del_timer_sync(&gp->link_timer); 2377 2378 /* Now we release the mutex to not block the reset task who 2379 * can take it too. We are marked asleep, so there will be no 2380 * conflict here 2381 */ 2382 mutex_unlock(&gp->pm_mutex); 2383 2384 /* Wait for the pending reset task to complete */ 2385 flush_work_sync(&gp->reset_task); 2386 2387 /* Shut the PHY down eventually and setup WOL */ 2388 gem_stop_phy(gp, gp->asleep_wol); 2389 2390 /* Make sure bus master is disabled */ 2391 pci_disable_device(gp->pdev); 2392 2393 /* Release the cell, no need to take a lock at this point since 2394 * nothing else can happen now 2395 */ 2396 gem_put_cell(gp); 2397 2398 return 0; 2399} 2400 2401static int gem_resume(struct pci_dev *pdev) 2402{ 2403 struct net_device *dev = pci_get_drvdata(pdev); 2404 struct gem *gp = netdev_priv(dev); 2405 unsigned long flags; 2406 2407 netdev_info(dev, "resuming\n"); 2408 2409 mutex_lock(&gp->pm_mutex); 2410 2411 /* Keep the cell enabled during the entire operation, no need to 2412 * take a lock here tho since nothing else can happen while we are 2413 * marked asleep 2414 */ 2415 gem_get_cell(gp); 2416 2417 /* Make sure PCI access and bus master are enabled */ 2418 if (pci_enable_device(gp->pdev)) { 2419 netdev_err(dev, "Can't re-enable chip !\n"); 2420 /* Put cell and forget it for now, it will be considered as 2421 * still asleep, a new sleep cycle may bring it back 2422 */ 2423 gem_put_cell(gp); 2424 mutex_unlock(&gp->pm_mutex); 2425 return 0; 2426 } 2427 pci_set_master(gp->pdev); 2428 2429 /* Reset everything */ 2430 gem_reset(gp); 2431 2432 /* Mark us woken up */ 2433 gp->asleep = 0; 2434 wmb(); 2435 2436 /* Bring the PHY back. Again, lock is useless at this point as 2437 * nothing can be happening until we restart the whole thing 2438 */ 2439 gem_init_phy(gp); 2440 2441 /* If we were opened, bring everything back */ 2442 if (gp->opened) { 2443 /* Restart MAC */ 2444 gem_do_start(dev); 2445 2446 /* Re-attach net device */ 2447 netif_device_attach(dev); 2448 } 2449 2450 spin_lock_irqsave(&gp->lock, flags); 2451 spin_lock(&gp->tx_lock); 2452 2453 /* If we had WOL enabled, the cell clock was never turned off during 2454 * sleep, so we end up beeing unbalanced. Fix that here 2455 */ 2456 if (gp->asleep_wol) 2457 gem_put_cell(gp); 2458 2459 /* This function doesn't need to hold the cell, it will be held if the 2460 * driver is open by gem_do_start(). 2461 */ 2462 gem_put_cell(gp); 2463 2464 spin_unlock(&gp->tx_lock); 2465 spin_unlock_irqrestore(&gp->lock, flags); 2466 2467 mutex_unlock(&gp->pm_mutex); 2468 2469 return 0; 2470} 2471#endif /* CONFIG_PM */ 2472 2473static struct net_device_stats *gem_get_stats(struct net_device *dev) 2474{ 2475 struct gem *gp = netdev_priv(dev); 2476 2477 spin_lock_irq(&gp->lock); 2478 spin_lock(&gp->tx_lock); 2479 2480 /* I have seen this being called while the PM was in progress, 2481 * so we shield against this 2482 */ 2483 if (gp->running) { 2484 dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR); 2485 writel(0, gp->regs + MAC_FCSERR); 2486 2487 dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR); 2488 writel(0, gp->regs + MAC_AERR); 2489 2490 dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR); 2491 writel(0, gp->regs + MAC_LERR); 2492 2493 dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL); 2494 dev->stats.collisions += 2495 (readl(gp->regs + MAC_ECOLL) + 2496 readl(gp->regs + MAC_LCOLL)); 2497 writel(0, gp->regs + MAC_ECOLL); 2498 writel(0, gp->regs + MAC_LCOLL); 2499 } 2500 2501 spin_unlock(&gp->tx_lock); 2502 spin_unlock_irq(&gp->lock); 2503 2504 return &dev->stats; 2505} 2506 2507static int gem_set_mac_address(struct net_device *dev, void *addr) 2508{ 2509 struct sockaddr *macaddr = (struct sockaddr *) addr; 2510 struct gem *gp = netdev_priv(dev); 2511 unsigned char *e = &dev->dev_addr[0]; 2512 2513 if (!is_valid_ether_addr(macaddr->sa_data)) 2514 return -EADDRNOTAVAIL; 2515 2516 if (!netif_running(dev) || !netif_device_present(dev)) { 2517 /* We'll just catch it later when the 2518 * device is up'd or resumed. 2519 */ 2520 memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); 2521 return 0; 2522 } 2523 2524 mutex_lock(&gp->pm_mutex); 2525 memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); 2526 if (gp->running) { 2527 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); 2528 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); 2529 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); 2530 } 2531 mutex_unlock(&gp->pm_mutex); 2532 2533 return 0; 2534} 2535 2536static void gem_set_multicast(struct net_device *dev) 2537{ 2538 struct gem *gp = netdev_priv(dev); 2539 u32 rxcfg, rxcfg_new; 2540 int limit = 10000; 2541 2542 2543 spin_lock_irq(&gp->lock); 2544 spin_lock(&gp->tx_lock); 2545 2546 if (!gp->running) 2547 goto bail; 2548 2549 netif_stop_queue(dev); 2550 2551 rxcfg = readl(gp->regs + MAC_RXCFG); 2552 rxcfg_new = gem_setup_multicast(gp); 2553#ifdef STRIP_FCS 2554 rxcfg_new |= MAC_RXCFG_SFCS; 2555#endif 2556 gp->mac_rx_cfg = rxcfg_new; 2557 2558 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 2559 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { 2560 if (!limit--) 2561 break; 2562 udelay(10); 2563 } 2564 2565 rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE); 2566 rxcfg |= rxcfg_new; 2567 2568 writel(rxcfg, gp->regs + MAC_RXCFG); 2569 2570 netif_wake_queue(dev); 2571 2572 bail: 2573 spin_unlock(&gp->tx_lock); 2574 spin_unlock_irq(&gp->lock); 2575} 2576 2577/* Jumbo-grams don't seem to work :-( */ 2578#define GEM_MIN_MTU 68 2579#if 1 2580#define GEM_MAX_MTU 1500 2581#else 2582#define GEM_MAX_MTU 9000 2583#endif 2584 2585static int gem_change_mtu(struct net_device *dev, int new_mtu) 2586{ 2587 struct gem *gp = netdev_priv(dev); 2588 2589 if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) 2590 return -EINVAL; 2591 2592 if (!netif_running(dev) || !netif_device_present(dev)) { 2593 /* We'll just catch it later when the 2594 * device is up'd or resumed. 2595 */ 2596 dev->mtu = new_mtu; 2597 return 0; 2598 } 2599 2600 mutex_lock(&gp->pm_mutex); 2601 spin_lock_irq(&gp->lock); 2602 spin_lock(&gp->tx_lock); 2603 dev->mtu = new_mtu; 2604 if (gp->running) { 2605 gem_reinit_chip(gp); 2606 if (gp->lstate == link_up) 2607 gem_set_link_modes(gp); 2608 } 2609 spin_unlock(&gp->tx_lock); 2610 spin_unlock_irq(&gp->lock); 2611 mutex_unlock(&gp->pm_mutex); 2612 2613 return 0; 2614} 2615 2616static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2617{ 2618 struct gem *gp = netdev_priv(dev); 2619 2620 strcpy(info->driver, DRV_NAME); 2621 strcpy(info->version, DRV_VERSION); 2622 strcpy(info->bus_info, pci_name(gp->pdev)); 2623} 2624 2625static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2626{ 2627 struct gem *gp = netdev_priv(dev); 2628 2629 if (gp->phy_type == phy_mii_mdio0 || 2630 gp->phy_type == phy_mii_mdio1) { 2631 if (gp->phy_mii.def) 2632 cmd->supported = gp->phy_mii.def->features; 2633 else 2634 cmd->supported = (SUPPORTED_10baseT_Half | 2635 SUPPORTED_10baseT_Full); 2636 2637 /* XXX hardcoded stuff for now */ 2638 cmd->port = PORT_MII; 2639 cmd->transceiver = XCVR_EXTERNAL; 2640 cmd->phy_address = 0; /* XXX fixed PHYAD */ 2641 2642 /* Return current PHY settings */ 2643 spin_lock_irq(&gp->lock); 2644 cmd->autoneg = gp->want_autoneg; 2645 cmd->speed = gp->phy_mii.speed; 2646 cmd->duplex = gp->phy_mii.duplex; 2647 cmd->advertising = gp->phy_mii.advertising; 2648 2649 /* If we started with a forced mode, we don't have a default 2650 * advertise set, we need to return something sensible so 2651 * userland can re-enable autoneg properly. 2652 */ 2653 if (cmd->advertising == 0) 2654 cmd->advertising = cmd->supported; 2655 spin_unlock_irq(&gp->lock); 2656 } else { // XXX PCS ? 2657 cmd->supported = 2658 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2659 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 2660 SUPPORTED_Autoneg); 2661 cmd->advertising = cmd->supported; 2662 cmd->speed = 0; 2663 cmd->duplex = cmd->port = cmd->phy_address = 2664 cmd->transceiver = cmd->autoneg = 0; 2665 2666 /* serdes means usually a Fibre connector, with most fixed */ 2667 if (gp->phy_type == phy_serdes) { 2668 cmd->port = PORT_FIBRE; 2669 cmd->supported = (SUPPORTED_1000baseT_Half | 2670 SUPPORTED_1000baseT_Full | 2671 SUPPORTED_FIBRE | SUPPORTED_Autoneg | 2672 SUPPORTED_Pause | SUPPORTED_Asym_Pause); 2673 cmd->advertising = cmd->supported; 2674 cmd->transceiver = XCVR_INTERNAL; 2675 if (gp->lstate == link_up) 2676 cmd->speed = SPEED_1000; 2677 cmd->duplex = DUPLEX_FULL; 2678 cmd->autoneg = 1; 2679 } 2680 } 2681 cmd->maxtxpkt = cmd->maxrxpkt = 0; 2682 2683 return 0; 2684} 2685 2686static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2687{ 2688 struct gem *gp = netdev_priv(dev); 2689 2690 /* Verify the settings we care about. */ 2691 if (cmd->autoneg != AUTONEG_ENABLE && 2692 cmd->autoneg != AUTONEG_DISABLE) 2693 return -EINVAL; 2694 2695 if (cmd->autoneg == AUTONEG_ENABLE && 2696 cmd->advertising == 0) 2697 return -EINVAL; 2698 2699 if (cmd->autoneg == AUTONEG_DISABLE && 2700 ((cmd->speed != SPEED_1000 && 2701 cmd->speed != SPEED_100 && 2702 cmd->speed != SPEED_10) || 2703 (cmd->duplex != DUPLEX_HALF && 2704 cmd->duplex != DUPLEX_FULL))) 2705 return -EINVAL; 2706 2707 /* Apply settings and restart link process. */ 2708 spin_lock_irq(&gp->lock); 2709 gem_get_cell(gp); 2710 gem_begin_auto_negotiation(gp, cmd); 2711 gem_put_cell(gp); 2712 spin_unlock_irq(&gp->lock); 2713 2714 return 0; 2715} 2716 2717static int gem_nway_reset(struct net_device *dev) 2718{ 2719 struct gem *gp = netdev_priv(dev); 2720 2721 if (!gp->want_autoneg) 2722 return -EINVAL; 2723 2724 /* Restart link process. */ 2725 spin_lock_irq(&gp->lock); 2726 gem_get_cell(gp); 2727 gem_begin_auto_negotiation(gp, NULL); 2728 gem_put_cell(gp); 2729 spin_unlock_irq(&gp->lock); 2730 2731 return 0; 2732} 2733 2734static u32 gem_get_msglevel(struct net_device *dev) 2735{ 2736 struct gem *gp = netdev_priv(dev); 2737 return gp->msg_enable; 2738} 2739 2740static void gem_set_msglevel(struct net_device *dev, u32 value) 2741{ 2742 struct gem *gp = netdev_priv(dev); 2743 gp->msg_enable = value; 2744} 2745 2746 2747/* Add more when I understand how to program the chip */ 2748/* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */ 2749 2750#define WOL_SUPPORTED_MASK (WAKE_MAGIC) 2751 2752static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2753{ 2754 struct gem *gp = netdev_priv(dev); 2755 2756 /* Add more when I understand how to program the chip */ 2757 if (gp->has_wol) { 2758 wol->supported = WOL_SUPPORTED_MASK; 2759 wol->wolopts = gp->wake_on_lan; 2760 } else { 2761 wol->supported = 0; 2762 wol->wolopts = 0; 2763 } 2764} 2765 2766static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2767{ 2768 struct gem *gp = netdev_priv(dev); 2769 2770 if (!gp->has_wol) 2771 return -EOPNOTSUPP; 2772 gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK; 2773 return 0; 2774} 2775 2776static const struct ethtool_ops gem_ethtool_ops = { 2777 .get_drvinfo = gem_get_drvinfo, 2778 .get_link = ethtool_op_get_link, 2779 .get_settings = gem_get_settings, 2780 .set_settings = gem_set_settings, 2781 .nway_reset = gem_nway_reset, 2782 .get_msglevel = gem_get_msglevel, 2783 .set_msglevel = gem_set_msglevel, 2784 .get_wol = gem_get_wol, 2785 .set_wol = gem_set_wol, 2786}; 2787 2788static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2789{ 2790 struct gem *gp = netdev_priv(dev); 2791 struct mii_ioctl_data *data = if_mii(ifr); 2792 int rc = -EOPNOTSUPP; 2793 unsigned long flags; 2794 2795 /* Hold the PM mutex while doing ioctl's or we may collide 2796 * with power management. 2797 */ 2798 mutex_lock(&gp->pm_mutex); 2799 2800 spin_lock_irqsave(&gp->lock, flags); 2801 gem_get_cell(gp); 2802 spin_unlock_irqrestore(&gp->lock, flags); 2803 2804 switch (cmd) { 2805 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 2806 data->phy_id = gp->mii_phy_addr; 2807 /* Fallthrough... */ 2808 2809 case SIOCGMIIREG: /* Read MII PHY register. */ 2810 if (!gp->running) 2811 rc = -EAGAIN; 2812 else { 2813 data->val_out = __phy_read(gp, data->phy_id & 0x1f, 2814 data->reg_num & 0x1f); 2815 rc = 0; 2816 } 2817 break; 2818 2819 case SIOCSMIIREG: /* Write MII PHY register. */ 2820 if (!gp->running) 2821 rc = -EAGAIN; 2822 else { 2823 __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, 2824 data->val_in); 2825 rc = 0; 2826 } 2827 break; 2828 }; 2829 2830 spin_lock_irqsave(&gp->lock, flags); 2831 gem_put_cell(gp); 2832 spin_unlock_irqrestore(&gp->lock, flags); 2833 2834 mutex_unlock(&gp->pm_mutex); 2835 2836 return rc; 2837} 2838 2839#if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC)) 2840/* Fetch MAC address from vital product data of PCI ROM. */ 2841static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) 2842{ 2843 int this_offset; 2844 2845 for (this_offset = 0x20; this_offset < len; this_offset++) { 2846 void __iomem *p = rom_base + this_offset; 2847 int i; 2848 2849 if (readb(p + 0) != 0x90 || 2850 readb(p + 1) != 0x00 || 2851 readb(p + 2) != 0x09 || 2852 readb(p + 3) != 0x4e || 2853 readb(p + 4) != 0x41 || 2854 readb(p + 5) != 0x06) 2855 continue; 2856 2857 this_offset += 6; 2858 p += 6; 2859 2860 for (i = 0; i < 6; i++) 2861 dev_addr[i] = readb(p + i); 2862 return 1; 2863 } 2864 return 0; 2865} 2866 2867static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) 2868{ 2869 size_t size; 2870 void __iomem *p = pci_map_rom(pdev, &size); 2871 2872 if (p) { 2873 int found; 2874 2875 found = readb(p) == 0x55 && 2876 readb(p + 1) == 0xaa && 2877 find_eth_addr_in_vpd(p, (64 * 1024), dev_addr); 2878 pci_unmap_rom(pdev, p); 2879 if (found) 2880 return; 2881 } 2882 2883 /* Sun MAC prefix then 3 random bytes. */ 2884 dev_addr[0] = 0x08; 2885 dev_addr[1] = 0x00; 2886 dev_addr[2] = 0x20; 2887 get_random_bytes(dev_addr + 3, 3); 2888} 2889#endif /* not Sparc and not PPC */ 2890 2891static int __devinit gem_get_device_address(struct gem *gp) 2892{ 2893#if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC) 2894 struct net_device *dev = gp->dev; 2895 const unsigned char *addr; 2896 2897 addr = of_get_property(gp->of_node, "local-mac-address", NULL); 2898 if (addr == NULL) { 2899#ifdef CONFIG_SPARC 2900 addr = idprom->id_ethaddr; 2901#else 2902 printk("\n"); 2903 pr_err("%s: can't get mac-address\n", dev->name); 2904 return -1; 2905#endif 2906 } 2907 memcpy(dev->dev_addr, addr, 6); 2908#else 2909 get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); 2910#endif 2911 return 0; 2912} 2913 2914static void gem_remove_one(struct pci_dev *pdev) 2915{ 2916 struct net_device *dev = pci_get_drvdata(pdev); 2917 2918 if (dev) { 2919 struct gem *gp = netdev_priv(dev); 2920 2921 unregister_netdev(dev); 2922 2923 /* Stop the link timer */ 2924 del_timer_sync(&gp->link_timer); 2925 2926 /* We shouldn't need any locking here */ 2927 gem_get_cell(gp); 2928 2929 /* Cancel reset task */ 2930 cancel_work_sync(&gp->reset_task); 2931 2932 /* Shut the PHY down */ 2933 gem_stop_phy(gp, 0); 2934 2935 gem_put_cell(gp); 2936 2937 /* Make sure bus master is disabled */ 2938 pci_disable_device(gp->pdev); 2939 2940 /* Free resources */ 2941 pci_free_consistent(pdev, 2942 sizeof(struct gem_init_block), 2943 gp->init_block, 2944 gp->gblock_dvma); 2945 iounmap(gp->regs); 2946 pci_release_regions(pdev); 2947 free_netdev(dev); 2948 2949 pci_set_drvdata(pdev, NULL); 2950 } 2951} 2952 2953static const struct net_device_ops gem_netdev_ops = { 2954 .ndo_open = gem_open, 2955 .ndo_stop = gem_close, 2956 .ndo_start_xmit = gem_start_xmit, 2957 .ndo_get_stats = gem_get_stats, 2958 .ndo_set_multicast_list = gem_set_multicast, 2959 .ndo_do_ioctl = gem_ioctl, 2960 .ndo_tx_timeout = gem_tx_timeout, 2961 .ndo_change_mtu = gem_change_mtu, 2962 .ndo_validate_addr = eth_validate_addr, 2963 .ndo_set_mac_address = gem_set_mac_address, 2964#ifdef CONFIG_NET_POLL_CONTROLLER 2965 .ndo_poll_controller = gem_poll_controller, 2966#endif 2967}; 2968 2969static int __devinit gem_init_one(struct pci_dev *pdev, 2970 const struct pci_device_id *ent) 2971{ 2972 unsigned long gemreg_base, gemreg_len; 2973 struct net_device *dev; 2974 struct gem *gp; 2975 int err, pci_using_dac; 2976 2977 printk_once(KERN_INFO "%s", version); 2978 2979 /* Apple gmac note: during probe, the chip is powered up by 2980 * the arch code to allow the code below to work (and to let 2981 * the chip be probed on the config space. It won't stay powered 2982 * up until the interface is brought up however, so we can't rely 2983 * on register configuration done at this point. 2984 */ 2985 err = pci_enable_device(pdev); 2986 if (err) { 2987 pr_err("Cannot enable MMIO operation, aborting\n"); 2988 return err; 2989 } 2990 pci_set_master(pdev); 2991 2992 /* Configure DMA attributes. */ 2993 2994 /* All of the GEM documentation states that 64-bit DMA addressing 2995 * is fully supported and should work just fine. However the 2996 * front end for RIO based GEMs is different and only supports 2997 * 32-bit addressing. 2998 * 2999 * For now we assume the various PPC GEMs are 32-bit only as well. 3000 */ 3001 if (pdev->vendor == PCI_VENDOR_ID_SUN && 3002 pdev->device == PCI_DEVICE_ID_SUN_GEM && 3003 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3004 pci_using_dac = 1; 3005 } else { 3006 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3007 if (err) { 3008 pr_err("No usable DMA configuration, aborting\n"); 3009 goto err_disable_device; 3010 } 3011 pci_using_dac = 0; 3012 } 3013 3014 gemreg_base = pci_resource_start(pdev, 0); 3015 gemreg_len = pci_resource_len(pdev, 0); 3016 3017 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { 3018 pr_err("Cannot find proper PCI device base address, aborting\n"); 3019 err = -ENODEV; 3020 goto err_disable_device; 3021 } 3022 3023 dev = alloc_etherdev(sizeof(*gp)); 3024 if (!dev) { 3025 pr_err("Etherdev alloc failed, aborting\n"); 3026 err = -ENOMEM; 3027 goto err_disable_device; 3028 } 3029 SET_NETDEV_DEV(dev, &pdev->dev); 3030 3031 gp = netdev_priv(dev); 3032 3033 err = pci_request_regions(pdev, DRV_NAME); 3034 if (err) { 3035 pr_err("Cannot obtain PCI resources, aborting\n"); 3036 goto err_out_free_netdev; 3037 } 3038 3039 gp->pdev = pdev; 3040 dev->base_addr = (long) pdev; 3041 gp->dev = dev; 3042 3043 gp->msg_enable = DEFAULT_MSG; 3044 3045 spin_lock_init(&gp->lock); 3046 spin_lock_init(&gp->tx_lock); 3047 mutex_init(&gp->pm_mutex); 3048 3049 init_timer(&gp->link_timer); 3050 gp->link_timer.function = gem_link_timer; 3051 gp->link_timer.data = (unsigned long) gp; 3052 3053 INIT_WORK(&gp->reset_task, gem_reset_task); 3054 3055 gp->lstate = link_down; 3056 gp->timer_ticks = 0; 3057 netif_carrier_off(dev); 3058 3059 gp->regs = ioremap(gemreg_base, gemreg_len); 3060 if (!gp->regs) { 3061 pr_err("Cannot map device registers, aborting\n"); 3062 err = -EIO; 3063 goto err_out_free_res; 3064 } 3065 3066 /* On Apple, we want a reference to the Open Firmware device-tree 3067 * node. We use it for clock control. 3068 */ 3069#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) 3070 gp->of_node = pci_device_to_OF_node(pdev); 3071#endif 3072 3073 /* Only Apple version supports WOL afaik */ 3074 if (pdev->vendor == PCI_VENDOR_ID_APPLE) 3075 gp->has_wol = 1; 3076 3077 /* Make sure cell is enabled */ 3078 gem_get_cell(gp); 3079 3080 /* Make sure everything is stopped and in init state */ 3081 gem_reset(gp); 3082 3083 /* Fill up the mii_phy structure (even if we won't use it) */ 3084 gp->phy_mii.dev = dev; 3085 gp->phy_mii.mdio_read = _phy_read; 3086 gp->phy_mii.mdio_write = _phy_write; 3087#ifdef CONFIG_PPC_PMAC 3088 gp->phy_mii.platform_data = gp->of_node; 3089#endif 3090 /* By default, we start with autoneg */ 3091 gp->want_autoneg = 1; 3092 3093 /* Check fifo sizes, PHY type, etc... */ 3094 if (gem_check_invariants(gp)) { 3095 err = -ENODEV; 3096 goto err_out_iounmap; 3097 } 3098 3099 /* It is guaranteed that the returned buffer will be at least 3100 * PAGE_SIZE aligned. 3101 */ 3102 gp->init_block = (struct gem_init_block *) 3103 pci_alloc_consistent(pdev, sizeof(struct gem_init_block), 3104 &gp->gblock_dvma); 3105 if (!gp->init_block) { 3106 pr_err("Cannot allocate init block, aborting\n"); 3107 err = -ENOMEM; 3108 goto err_out_iounmap; 3109 } 3110 3111 if (gem_get_device_address(gp)) 3112 goto err_out_free_consistent; 3113 3114 dev->netdev_ops = &gem_netdev_ops; 3115 netif_napi_add(dev, &gp->napi, gem_poll, 64); 3116 dev->ethtool_ops = &gem_ethtool_ops; 3117 dev->watchdog_timeo = 5 * HZ; 3118 dev->irq = pdev->irq; 3119 dev->dma = 0; 3120 3121 /* Set that now, in case PM kicks in now */ 3122 pci_set_drvdata(pdev, dev); 3123 3124 /* Detect & init PHY, start autoneg, we release the cell now 3125 * too, it will be managed by whoever needs it 3126 */ 3127 gem_init_phy(gp); 3128 3129 spin_lock_irq(&gp->lock); 3130 gem_put_cell(gp); 3131 spin_unlock_irq(&gp->lock); 3132 3133 /* Register with kernel */ 3134 if (register_netdev(dev)) { 3135 pr_err("Cannot register net device, aborting\n"); 3136 err = -ENOMEM; 3137 goto err_out_free_consistent; 3138 } 3139 3140 netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", 3141 dev->dev_addr); 3142 3143 if (gp->phy_type == phy_mii_mdio0 || 3144 gp->phy_type == phy_mii_mdio1) 3145 netdev_info(dev, "Found %s PHY\n", 3146 gp->phy_mii.def ? gp->phy_mii.def->name : "no"); 3147 3148 /* GEM can do it all... */ 3149 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; 3150 if (pci_using_dac) 3151 dev->features |= NETIF_F_HIGHDMA; 3152 3153 return 0; 3154 3155err_out_free_consistent: 3156 gem_remove_one(pdev); 3157err_out_iounmap: 3158 gem_put_cell(gp); 3159 iounmap(gp->regs); 3160 3161err_out_free_res: 3162 pci_release_regions(pdev); 3163 3164err_out_free_netdev: 3165 free_netdev(dev); 3166err_disable_device: 3167 pci_disable_device(pdev); 3168 return err; 3169 3170} 3171 3172 3173static struct pci_driver gem_driver = { 3174 .name = GEM_MODULE_NAME, 3175 .id_table = gem_pci_tbl, 3176 .probe = gem_init_one, 3177 .remove = gem_remove_one, 3178#ifdef CONFIG_PM 3179 .suspend = gem_suspend, 3180 .resume = gem_resume, 3181#endif /* CONFIG_PM */ 3182}; 3183 3184static int __init gem_init(void) 3185{ 3186 return pci_register_driver(&gem_driver); 3187} 3188 3189static void __exit gem_cleanup(void) 3190{ 3191 pci_unregister_driver(&gem_driver); 3192} 3193 3194module_init(gem_init); 3195module_exit(gem_cleanup);