Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.32-rc4 3243 lines 82 kB view raw
1/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ 2 * sungem.c: Sun GEM ethernet driver. 3 * 4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com) 5 * 6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management 7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org) 8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. 9 * 10 * NAPI and NETPOLL support 11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) 12 * 13 * TODO: 14 * - Now that the driver was significantly simplified, I need to rework 15 * the locking. I'm sure we don't need _2_ spinlocks, and we probably 16 * can avoid taking most of them for so long period of time (and schedule 17 * instead). The main issues at this point are caused by the netdev layer 18 * though: 19 * 20 * gem_change_mtu() and gem_set_multicast() are called with a read_lock() 21 * help by net/core/dev.c, thus they can't schedule. That means they can't 22 * call napi_disable() neither, thus force gem_poll() to keep a spinlock 23 * where it could have been dropped. change_mtu especially would love also to 24 * be able to msleep instead of horrid locked delays when resetting the HW, 25 * but that read_lock() makes it impossible, unless I defer it's action to 26 * the reset task, which means it'll be asynchronous (won't take effect until 27 * the system schedules a bit). 28 * 29 * Also, it would probably be possible to also remove most of the long-life 30 * locking in open/resume code path (gem_reinit_chip) by beeing more careful 31 * about when we can start taking interrupts or get xmit() called... 32 */ 33 34#include <linux/module.h> 35#include <linux/kernel.h> 36#include <linux/types.h> 37#include <linux/fcntl.h> 38#include <linux/interrupt.h> 39#include <linux/ioport.h> 40#include <linux/in.h> 41#include <linux/sched.h> 42#include <linux/slab.h> 43#include <linux/string.h> 44#include <linux/delay.h> 45#include <linux/init.h> 46#include <linux/errno.h> 47#include <linux/pci.h> 48#include <linux/dma-mapping.h> 49#include <linux/netdevice.h> 50#include <linux/etherdevice.h> 51#include <linux/skbuff.h> 52#include <linux/mii.h> 53#include <linux/ethtool.h> 54#include <linux/crc32.h> 55#include <linux/random.h> 56#include <linux/workqueue.h> 57#include <linux/if_vlan.h> 58#include <linux/bitops.h> 59#include <linux/mutex.h> 60#include <linux/mm.h> 61 62#include <asm/system.h> 63#include <asm/io.h> 64#include <asm/byteorder.h> 65#include <asm/uaccess.h> 66#include <asm/irq.h> 67 68#ifdef CONFIG_SPARC 69#include <asm/idprom.h> 70#include <asm/prom.h> 71#endif 72 73#ifdef CONFIG_PPC_PMAC 74#include <asm/pci-bridge.h> 75#include <asm/prom.h> 76#include <asm/machdep.h> 77#include <asm/pmac_feature.h> 78#endif 79 80#include "sungem_phy.h" 81#include "sungem.h" 82 83/* Stripping FCS is causing problems, disabled for now */ 84#undef STRIP_FCS 85 86#define DEFAULT_MSG (NETIF_MSG_DRV | \ 87 NETIF_MSG_PROBE | \ 88 NETIF_MSG_LINK) 89 90#define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ 91 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ 92 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \ 93 SUPPORTED_Pause | SUPPORTED_Autoneg) 94 95#define DRV_NAME "sungem" 96#define DRV_VERSION "0.98" 97#define DRV_RELDATE "8/24/03" 98#define DRV_AUTHOR "David S. Miller (davem@redhat.com)" 99 100static char version[] __devinitdata = 101 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; 102 103MODULE_AUTHOR(DRV_AUTHOR); 104MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); 105MODULE_LICENSE("GPL"); 106 107#define GEM_MODULE_NAME "gem" 108#define PFX GEM_MODULE_NAME ": " 109 110static struct pci_device_id gem_pci_tbl[] = { 111 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, 112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 113 114 /* These models only differ from the original GEM in 115 * that their tx/rx fifos are of a different size and 116 * they only support 10/100 speeds. -DaveM 117 * 118 * Apple's GMAC does support gigabit on machines with 119 * the BCM54xx PHYs. -BenH 120 */ 121 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM, 122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 123 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC, 124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 125 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP, 126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 127 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2, 128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 129 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC, 130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 131 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, 132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 133 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC, 134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 135 {0, } 136}; 137 138MODULE_DEVICE_TABLE(pci, gem_pci_tbl); 139 140static u16 __phy_read(struct gem *gp, int phy_addr, int reg) 141{ 142 u32 cmd; 143 int limit = 10000; 144 145 cmd = (1 << 30); 146 cmd |= (2 << 28); 147 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; 148 cmd |= (reg << 18) & MIF_FRAME_REGAD; 149 cmd |= (MIF_FRAME_TAMSB); 150 writel(cmd, gp->regs + MIF_FRAME); 151 152 while (--limit) { 153 cmd = readl(gp->regs + MIF_FRAME); 154 if (cmd & MIF_FRAME_TALSB) 155 break; 156 157 udelay(10); 158 } 159 160 if (!limit) 161 cmd = 0xffff; 162 163 return cmd & MIF_FRAME_DATA; 164} 165 166static inline int _phy_read(struct net_device *dev, int mii_id, int reg) 167{ 168 struct gem *gp = netdev_priv(dev); 169 return __phy_read(gp, mii_id, reg); 170} 171 172static inline u16 phy_read(struct gem *gp, int reg) 173{ 174 return __phy_read(gp, gp->mii_phy_addr, reg); 175} 176 177static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) 178{ 179 u32 cmd; 180 int limit = 10000; 181 182 cmd = (1 << 30); 183 cmd |= (1 << 28); 184 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; 185 cmd |= (reg << 18) & MIF_FRAME_REGAD; 186 cmd |= (MIF_FRAME_TAMSB); 187 cmd |= (val & MIF_FRAME_DATA); 188 writel(cmd, gp->regs + MIF_FRAME); 189 190 while (limit--) { 191 cmd = readl(gp->regs + MIF_FRAME); 192 if (cmd & MIF_FRAME_TALSB) 193 break; 194 195 udelay(10); 196 } 197} 198 199static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val) 200{ 201 struct gem *gp = netdev_priv(dev); 202 __phy_write(gp, mii_id, reg, val & 0xffff); 203} 204 205static inline void phy_write(struct gem *gp, int reg, u16 val) 206{ 207 __phy_write(gp, gp->mii_phy_addr, reg, val); 208} 209 210static inline void gem_enable_ints(struct gem *gp) 211{ 212 /* Enable all interrupts but TXDONE */ 213 writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK); 214} 215 216static inline void gem_disable_ints(struct gem *gp) 217{ 218 /* Disable all interrupts, including TXDONE */ 219 writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); 220} 221 222static void gem_get_cell(struct gem *gp) 223{ 224 BUG_ON(gp->cell_enabled < 0); 225 gp->cell_enabled++; 226#ifdef CONFIG_PPC_PMAC 227 if (gp->cell_enabled == 1) { 228 mb(); 229 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1); 230 udelay(10); 231 } 232#endif /* CONFIG_PPC_PMAC */ 233} 234 235/* Turn off the chip's clock */ 236static void gem_put_cell(struct gem *gp) 237{ 238 BUG_ON(gp->cell_enabled <= 0); 239 gp->cell_enabled--; 240#ifdef CONFIG_PPC_PMAC 241 if (gp->cell_enabled == 0) { 242 mb(); 243 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0); 244 udelay(10); 245 } 246#endif /* CONFIG_PPC_PMAC */ 247} 248 249static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) 250{ 251 if (netif_msg_intr(gp)) 252 printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name); 253} 254 255static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 256{ 257 u32 pcs_istat = readl(gp->regs + PCS_ISTAT); 258 u32 pcs_miistat; 259 260 if (netif_msg_intr(gp)) 261 printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n", 262 gp->dev->name, pcs_istat); 263 264 if (!(pcs_istat & PCS_ISTAT_LSC)) { 265 printk(KERN_ERR "%s: PCS irq but no link status change???\n", 266 dev->name); 267 return 0; 268 } 269 270 /* The link status bit latches on zero, so you must 271 * read it twice in such a case to see a transition 272 * to the link being up. 273 */ 274 pcs_miistat = readl(gp->regs + PCS_MIISTAT); 275 if (!(pcs_miistat & PCS_MIISTAT_LS)) 276 pcs_miistat |= 277 (readl(gp->regs + PCS_MIISTAT) & 278 PCS_MIISTAT_LS); 279 280 if (pcs_miistat & PCS_MIISTAT_ANC) { 281 /* The remote-fault indication is only valid 282 * when autoneg has completed. 283 */ 284 if (pcs_miistat & PCS_MIISTAT_RF) 285 printk(KERN_INFO "%s: PCS AutoNEG complete, " 286 "RemoteFault\n", dev->name); 287 else 288 printk(KERN_INFO "%s: PCS AutoNEG complete.\n", 289 dev->name); 290 } 291 292 if (pcs_miistat & PCS_MIISTAT_LS) { 293 printk(KERN_INFO "%s: PCS link is now up.\n", 294 dev->name); 295 netif_carrier_on(gp->dev); 296 } else { 297 printk(KERN_INFO "%s: PCS link is now down.\n", 298 dev->name); 299 netif_carrier_off(gp->dev); 300 /* If this happens and the link timer is not running, 301 * reset so we re-negotiate. 302 */ 303 if (!timer_pending(&gp->link_timer)) 304 return 1; 305 } 306 307 return 0; 308} 309 310static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 311{ 312 u32 txmac_stat = readl(gp->regs + MAC_TXSTAT); 313 314 if (netif_msg_intr(gp)) 315 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", 316 gp->dev->name, txmac_stat); 317 318 /* Defer timer expiration is quite normal, 319 * don't even log the event. 320 */ 321 if ((txmac_stat & MAC_TXSTAT_DTE) && 322 !(txmac_stat & ~MAC_TXSTAT_DTE)) 323 return 0; 324 325 if (txmac_stat & MAC_TXSTAT_URUN) { 326 printk(KERN_ERR "%s: TX MAC xmit underrun.\n", 327 dev->name); 328 gp->net_stats.tx_fifo_errors++; 329 } 330 331 if (txmac_stat & MAC_TXSTAT_MPE) { 332 printk(KERN_ERR "%s: TX MAC max packet size error.\n", 333 dev->name); 334 gp->net_stats.tx_errors++; 335 } 336 337 /* The rest are all cases of one of the 16-bit TX 338 * counters expiring. 339 */ 340 if (txmac_stat & MAC_TXSTAT_NCE) 341 gp->net_stats.collisions += 0x10000; 342 343 if (txmac_stat & MAC_TXSTAT_ECE) { 344 gp->net_stats.tx_aborted_errors += 0x10000; 345 gp->net_stats.collisions += 0x10000; 346 } 347 348 if (txmac_stat & MAC_TXSTAT_LCE) { 349 gp->net_stats.tx_aborted_errors += 0x10000; 350 gp->net_stats.collisions += 0x10000; 351 } 352 353 /* We do not keep track of MAC_TXSTAT_FCE and 354 * MAC_TXSTAT_PCE events. 355 */ 356 return 0; 357} 358 359/* When we get a RX fifo overflow, the RX unit in GEM is probably hung 360 * so we do the following. 361 * 362 * If any part of the reset goes wrong, we return 1 and that causes the 363 * whole chip to be reset. 364 */ 365static int gem_rxmac_reset(struct gem *gp) 366{ 367 struct net_device *dev = gp->dev; 368 int limit, i; 369 u64 desc_dma; 370 u32 val; 371 372 /* First, reset & disable MAC RX. */ 373 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); 374 for (limit = 0; limit < 5000; limit++) { 375 if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD)) 376 break; 377 udelay(10); 378 } 379 if (limit == 5000) { 380 printk(KERN_ERR "%s: RX MAC will not reset, resetting whole " 381 "chip.\n", dev->name); 382 return 1; 383 } 384 385 writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB, 386 gp->regs + MAC_RXCFG); 387 for (limit = 0; limit < 5000; limit++) { 388 if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB)) 389 break; 390 udelay(10); 391 } 392 if (limit == 5000) { 393 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " 394 "chip.\n", dev->name); 395 return 1; 396 } 397 398 /* Second, disable RX DMA. */ 399 writel(0, gp->regs + RXDMA_CFG); 400 for (limit = 0; limit < 5000; limit++) { 401 if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE)) 402 break; 403 udelay(10); 404 } 405 if (limit == 5000) { 406 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " 407 "chip.\n", dev->name); 408 return 1; 409 } 410 411 udelay(5000); 412 413 /* Execute RX reset command. */ 414 writel(gp->swrst_base | GREG_SWRST_RXRST, 415 gp->regs + GREG_SWRST); 416 for (limit = 0; limit < 5000; limit++) { 417 if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST)) 418 break; 419 udelay(10); 420 } 421 if (limit == 5000) { 422 printk(KERN_ERR "%s: RX reset command will not execute, resetting " 423 "whole chip.\n", dev->name); 424 return 1; 425 } 426 427 /* Refresh the RX ring. */ 428 for (i = 0; i < RX_RING_SIZE; i++) { 429 struct gem_rxd *rxd = &gp->init_block->rxd[i]; 430 431 if (gp->rx_skbs[i] == NULL) { 432 printk(KERN_ERR "%s: Parts of RX ring empty, resetting " 433 "whole chip.\n", dev->name); 434 return 1; 435 } 436 437 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 438 } 439 gp->rx_new = gp->rx_old = 0; 440 441 /* Now we must reprogram the rest of RX unit. */ 442 desc_dma = (u64) gp->gblock_dvma; 443 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); 444 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 445 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 446 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 447 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 448 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 449 writel(val, gp->regs + RXDMA_CFG); 450 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 451 writel(((5 & RXDMA_BLANK_IPKTS) | 452 ((8 << 12) & RXDMA_BLANK_ITIME)), 453 gp->regs + RXDMA_BLANK); 454 else 455 writel(((5 & RXDMA_BLANK_IPKTS) | 456 ((4 << 12) & RXDMA_BLANK_ITIME)), 457 gp->regs + RXDMA_BLANK); 458 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); 459 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); 460 writel(val, gp->regs + RXDMA_PTHRESH); 461 val = readl(gp->regs + RXDMA_CFG); 462 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 463 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); 464 val = readl(gp->regs + MAC_RXCFG); 465 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 466 467 return 0; 468} 469 470static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 471{ 472 u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT); 473 int ret = 0; 474 475 if (netif_msg_intr(gp)) 476 printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n", 477 gp->dev->name, rxmac_stat); 478 479 if (rxmac_stat & MAC_RXSTAT_OFLW) { 480 u32 smac = readl(gp->regs + MAC_SMACHINE); 481 482 printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n", 483 dev->name, smac); 484 gp->net_stats.rx_over_errors++; 485 gp->net_stats.rx_fifo_errors++; 486 487 ret = gem_rxmac_reset(gp); 488 } 489 490 if (rxmac_stat & MAC_RXSTAT_ACE) 491 gp->net_stats.rx_frame_errors += 0x10000; 492 493 if (rxmac_stat & MAC_RXSTAT_CCE) 494 gp->net_stats.rx_crc_errors += 0x10000; 495 496 if (rxmac_stat & MAC_RXSTAT_LCE) 497 gp->net_stats.rx_length_errors += 0x10000; 498 499 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE 500 * events. 501 */ 502 return ret; 503} 504 505static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 506{ 507 u32 mac_cstat = readl(gp->regs + MAC_CSTAT); 508 509 if (netif_msg_intr(gp)) 510 printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n", 511 gp->dev->name, mac_cstat); 512 513 /* This interrupt is just for pause frame and pause 514 * tracking. It is useful for diagnostics and debug 515 * but probably by default we will mask these events. 516 */ 517 if (mac_cstat & MAC_CSTAT_PS) 518 gp->pause_entered++; 519 520 if (mac_cstat & MAC_CSTAT_PRCV) 521 gp->pause_last_time_recvd = (mac_cstat >> 16); 522 523 return 0; 524} 525 526static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 527{ 528 u32 mif_status = readl(gp->regs + MIF_STATUS); 529 u32 reg_val, changed_bits; 530 531 reg_val = (mif_status & MIF_STATUS_DATA) >> 16; 532 changed_bits = (mif_status & MIF_STATUS_STAT); 533 534 gem_handle_mif_event(gp, reg_val, changed_bits); 535 536 return 0; 537} 538 539static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 540{ 541 u32 pci_estat = readl(gp->regs + GREG_PCIESTAT); 542 543 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 544 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 545 printk(KERN_ERR "%s: PCI error [%04x] ", 546 dev->name, pci_estat); 547 548 if (pci_estat & GREG_PCIESTAT_BADACK) 549 printk("<No ACK64# during ABS64 cycle> "); 550 if (pci_estat & GREG_PCIESTAT_DTRTO) 551 printk("<Delayed transaction timeout> "); 552 if (pci_estat & GREG_PCIESTAT_OTHER) 553 printk("<other>"); 554 printk("\n"); 555 } else { 556 pci_estat |= GREG_PCIESTAT_OTHER; 557 printk(KERN_ERR "%s: PCI error\n", dev->name); 558 } 559 560 if (pci_estat & GREG_PCIESTAT_OTHER) { 561 u16 pci_cfg_stat; 562 563 /* Interrogate PCI config space for the 564 * true cause. 565 */ 566 pci_read_config_word(gp->pdev, PCI_STATUS, 567 &pci_cfg_stat); 568 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", 569 dev->name, pci_cfg_stat); 570 if (pci_cfg_stat & PCI_STATUS_PARITY) 571 printk(KERN_ERR "%s: PCI parity error detected.\n", 572 dev->name); 573 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) 574 printk(KERN_ERR "%s: PCI target abort.\n", 575 dev->name); 576 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) 577 printk(KERN_ERR "%s: PCI master acks target abort.\n", 578 dev->name); 579 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) 580 printk(KERN_ERR "%s: PCI master abort.\n", 581 dev->name); 582 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) 583 printk(KERN_ERR "%s: PCI system error SERR#.\n", 584 dev->name); 585 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) 586 printk(KERN_ERR "%s: PCI parity error.\n", 587 dev->name); 588 589 /* Write the error bits back to clear them. */ 590 pci_cfg_stat &= (PCI_STATUS_PARITY | 591 PCI_STATUS_SIG_TARGET_ABORT | 592 PCI_STATUS_REC_TARGET_ABORT | 593 PCI_STATUS_REC_MASTER_ABORT | 594 PCI_STATUS_SIG_SYSTEM_ERROR | 595 PCI_STATUS_DETECTED_PARITY); 596 pci_write_config_word(gp->pdev, 597 PCI_STATUS, pci_cfg_stat); 598 } 599 600 /* For all PCI errors, we should reset the chip. */ 601 return 1; 602} 603 604/* All non-normal interrupt conditions get serviced here. 605 * Returns non-zero if we should just exit the interrupt 606 * handler right now (ie. if we reset the card which invalidates 607 * all of the other original irq status bits). 608 */ 609static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status) 610{ 611 if (gem_status & GREG_STAT_RXNOBUF) { 612 /* Frame arrived, no free RX buffers available. */ 613 if (netif_msg_rx_err(gp)) 614 printk(KERN_DEBUG "%s: no buffer for rx frame\n", 615 gp->dev->name); 616 gp->net_stats.rx_dropped++; 617 } 618 619 if (gem_status & GREG_STAT_RXTAGERR) { 620 /* corrupt RX tag framing */ 621 if (netif_msg_rx_err(gp)) 622 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 623 gp->dev->name); 624 gp->net_stats.rx_errors++; 625 626 goto do_reset; 627 } 628 629 if (gem_status & GREG_STAT_PCS) { 630 if (gem_pcs_interrupt(dev, gp, gem_status)) 631 goto do_reset; 632 } 633 634 if (gem_status & GREG_STAT_TXMAC) { 635 if (gem_txmac_interrupt(dev, gp, gem_status)) 636 goto do_reset; 637 } 638 639 if (gem_status & GREG_STAT_RXMAC) { 640 if (gem_rxmac_interrupt(dev, gp, gem_status)) 641 goto do_reset; 642 } 643 644 if (gem_status & GREG_STAT_MAC) { 645 if (gem_mac_interrupt(dev, gp, gem_status)) 646 goto do_reset; 647 } 648 649 if (gem_status & GREG_STAT_MIF) { 650 if (gem_mif_interrupt(dev, gp, gem_status)) 651 goto do_reset; 652 } 653 654 if (gem_status & GREG_STAT_PCIERR) { 655 if (gem_pci_interrupt(dev, gp, gem_status)) 656 goto do_reset; 657 } 658 659 return 0; 660 661do_reset: 662 gp->reset_task_pending = 1; 663 schedule_work(&gp->reset_task); 664 665 return 1; 666} 667 668static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) 669{ 670 int entry, limit; 671 672 if (netif_msg_intr(gp)) 673 printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n", 674 gp->dev->name, gem_status); 675 676 entry = gp->tx_old; 677 limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); 678 while (entry != limit) { 679 struct sk_buff *skb; 680 struct gem_txd *txd; 681 dma_addr_t dma_addr; 682 u32 dma_len; 683 int frag; 684 685 if (netif_msg_tx_done(gp)) 686 printk(KERN_DEBUG "%s: tx done, slot %d\n", 687 gp->dev->name, entry); 688 skb = gp->tx_skbs[entry]; 689 if (skb_shinfo(skb)->nr_frags) { 690 int last = entry + skb_shinfo(skb)->nr_frags; 691 int walk = entry; 692 int incomplete = 0; 693 694 last &= (TX_RING_SIZE - 1); 695 for (;;) { 696 walk = NEXT_TX(walk); 697 if (walk == limit) 698 incomplete = 1; 699 if (walk == last) 700 break; 701 } 702 if (incomplete) 703 break; 704 } 705 gp->tx_skbs[entry] = NULL; 706 gp->net_stats.tx_bytes += skb->len; 707 708 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 709 txd = &gp->init_block->txd[entry]; 710 711 dma_addr = le64_to_cpu(txd->buffer); 712 dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; 713 714 pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); 715 entry = NEXT_TX(entry); 716 } 717 718 gp->net_stats.tx_packets++; 719 dev_kfree_skb_irq(skb); 720 } 721 gp->tx_old = entry; 722 723 if (netif_queue_stopped(dev) && 724 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) 725 netif_wake_queue(dev); 726} 727 728static __inline__ void gem_post_rxds(struct gem *gp, int limit) 729{ 730 int cluster_start, curr, count, kick; 731 732 cluster_start = curr = (gp->rx_new & ~(4 - 1)); 733 count = 0; 734 kick = -1; 735 wmb(); 736 while (curr != limit) { 737 curr = NEXT_RX(curr); 738 if (++count == 4) { 739 struct gem_rxd *rxd = 740 &gp->init_block->rxd[cluster_start]; 741 for (;;) { 742 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 743 rxd++; 744 cluster_start = NEXT_RX(cluster_start); 745 if (cluster_start == curr) 746 break; 747 } 748 kick = curr; 749 count = 0; 750 } 751 } 752 if (kick >= 0) { 753 mb(); 754 writel(kick, gp->regs + RXDMA_KICK); 755 } 756} 757 758static int gem_rx(struct gem *gp, int work_to_do) 759{ 760 int entry, drops, work_done = 0; 761 u32 done; 762 __sum16 csum; 763 764 if (netif_msg_rx_status(gp)) 765 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", 766 gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); 767 768 entry = gp->rx_new; 769 drops = 0; 770 done = readl(gp->regs + RXDMA_DONE); 771 for (;;) { 772 struct gem_rxd *rxd = &gp->init_block->rxd[entry]; 773 struct sk_buff *skb; 774 u64 status = le64_to_cpu(rxd->status_word); 775 dma_addr_t dma_addr; 776 int len; 777 778 if ((status & RXDCTRL_OWN) != 0) 779 break; 780 781 if (work_done >= RX_RING_SIZE || work_done >= work_to_do) 782 break; 783 784 /* When writing back RX descriptor, GEM writes status 785 * then buffer address, possibly in seperate transactions. 786 * If we don't wait for the chip to write both, we could 787 * post a new buffer to this descriptor then have GEM spam 788 * on the buffer address. We sync on the RX completion 789 * register to prevent this from happening. 790 */ 791 if (entry == done) { 792 done = readl(gp->regs + RXDMA_DONE); 793 if (entry == done) 794 break; 795 } 796 797 /* We can now account for the work we're about to do */ 798 work_done++; 799 800 skb = gp->rx_skbs[entry]; 801 802 len = (status & RXDCTRL_BUFSZ) >> 16; 803 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { 804 gp->net_stats.rx_errors++; 805 if (len < ETH_ZLEN) 806 gp->net_stats.rx_length_errors++; 807 if (len & RXDCTRL_BAD) 808 gp->net_stats.rx_crc_errors++; 809 810 /* We'll just return it to GEM. */ 811 drop_it: 812 gp->net_stats.rx_dropped++; 813 goto next; 814 } 815 816 dma_addr = le64_to_cpu(rxd->buffer); 817 if (len > RX_COPY_THRESHOLD) { 818 struct sk_buff *new_skb; 819 820 new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 821 if (new_skb == NULL) { 822 drops++; 823 goto drop_it; 824 } 825 pci_unmap_page(gp->pdev, dma_addr, 826 RX_BUF_ALLOC_SIZE(gp), 827 PCI_DMA_FROMDEVICE); 828 gp->rx_skbs[entry] = new_skb; 829 new_skb->dev = gp->dev; 830 skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); 831 rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, 832 virt_to_page(new_skb->data), 833 offset_in_page(new_skb->data), 834 RX_BUF_ALLOC_SIZE(gp), 835 PCI_DMA_FROMDEVICE)); 836 skb_reserve(new_skb, RX_OFFSET); 837 838 /* Trim the original skb for the netif. */ 839 skb_trim(skb, len); 840 } else { 841 struct sk_buff *copy_skb = dev_alloc_skb(len + 2); 842 843 if (copy_skb == NULL) { 844 drops++; 845 goto drop_it; 846 } 847 848 skb_reserve(copy_skb, 2); 849 skb_put(copy_skb, len); 850 pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 851 skb_copy_from_linear_data(skb, copy_skb->data, len); 852 pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 853 854 /* We'll reuse the original ring buffer. */ 855 skb = copy_skb; 856 } 857 858 csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); 859 skb->csum = csum_unfold(csum); 860 skb->ip_summed = CHECKSUM_COMPLETE; 861 skb->protocol = eth_type_trans(skb, gp->dev); 862 863 netif_receive_skb(skb); 864 865 gp->net_stats.rx_packets++; 866 gp->net_stats.rx_bytes += len; 867 868 next: 869 entry = NEXT_RX(entry); 870 } 871 872 gem_post_rxds(gp, entry); 873 874 gp->rx_new = entry; 875 876 if (drops) 877 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", 878 gp->dev->name); 879 880 return work_done; 881} 882 883static int gem_poll(struct napi_struct *napi, int budget) 884{ 885 struct gem *gp = container_of(napi, struct gem, napi); 886 struct net_device *dev = gp->dev; 887 unsigned long flags; 888 int work_done; 889 890 /* 891 * NAPI locking nightmare: See comment at head of driver 892 */ 893 spin_lock_irqsave(&gp->lock, flags); 894 895 work_done = 0; 896 do { 897 /* Handle anomalies */ 898 if (gp->status & GREG_STAT_ABNORMAL) { 899 if (gem_abnormal_irq(dev, gp, gp->status)) 900 break; 901 } 902 903 /* Run TX completion thread */ 904 spin_lock(&gp->tx_lock); 905 gem_tx(dev, gp, gp->status); 906 spin_unlock(&gp->tx_lock); 907 908 spin_unlock_irqrestore(&gp->lock, flags); 909 910 /* Run RX thread. We don't use any locking here, 911 * code willing to do bad things - like cleaning the 912 * rx ring - must call napi_disable(), which 913 * schedule_timeout()'s if polling is already disabled. 914 */ 915 work_done += gem_rx(gp, budget - work_done); 916 917 if (work_done >= budget) 918 return work_done; 919 920 spin_lock_irqsave(&gp->lock, flags); 921 922 gp->status = readl(gp->regs + GREG_STAT); 923 } while (gp->status & GREG_STAT_NAPI); 924 925 __napi_complete(napi); 926 gem_enable_ints(gp); 927 928 spin_unlock_irqrestore(&gp->lock, flags); 929 930 return work_done; 931} 932 933static irqreturn_t gem_interrupt(int irq, void *dev_id) 934{ 935 struct net_device *dev = dev_id; 936 struct gem *gp = netdev_priv(dev); 937 unsigned long flags; 938 939 /* Swallow interrupts when shutting the chip down, though 940 * that shouldn't happen, we should have done free_irq() at 941 * this point... 942 */ 943 if (!gp->running) 944 return IRQ_HANDLED; 945 946 spin_lock_irqsave(&gp->lock, flags); 947 948 if (napi_schedule_prep(&gp->napi)) { 949 u32 gem_status = readl(gp->regs + GREG_STAT); 950 951 if (gem_status == 0) { 952 napi_enable(&gp->napi); 953 spin_unlock_irqrestore(&gp->lock, flags); 954 return IRQ_NONE; 955 } 956 gp->status = gem_status; 957 gem_disable_ints(gp); 958 __napi_schedule(&gp->napi); 959 } 960 961 spin_unlock_irqrestore(&gp->lock, flags); 962 963 /* If polling was disabled at the time we received that 964 * interrupt, we may return IRQ_HANDLED here while we 965 * should return IRQ_NONE. No big deal... 966 */ 967 return IRQ_HANDLED; 968} 969 970#ifdef CONFIG_NET_POLL_CONTROLLER 971static void gem_poll_controller(struct net_device *dev) 972{ 973 /* gem_interrupt is safe to reentrance so no need 974 * to disable_irq here. 975 */ 976 gem_interrupt(dev->irq, dev); 977} 978#endif 979 980static void gem_tx_timeout(struct net_device *dev) 981{ 982 struct gem *gp = netdev_priv(dev); 983 984 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 985 if (!gp->running) { 986 printk("%s: hrm.. hw not running !\n", dev->name); 987 return; 988 } 989 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n", 990 dev->name, 991 readl(gp->regs + TXDMA_CFG), 992 readl(gp->regs + MAC_TXSTAT), 993 readl(gp->regs + MAC_TXCFG)); 994 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", 995 dev->name, 996 readl(gp->regs + RXDMA_CFG), 997 readl(gp->regs + MAC_RXSTAT), 998 readl(gp->regs + MAC_RXCFG)); 999 1000 spin_lock_irq(&gp->lock); 1001 spin_lock(&gp->tx_lock); 1002 1003 gp->reset_task_pending = 1; 1004 schedule_work(&gp->reset_task); 1005 1006 spin_unlock(&gp->tx_lock); 1007 spin_unlock_irq(&gp->lock); 1008} 1009 1010static __inline__ int gem_intme(int entry) 1011{ 1012 /* Algorithm: IRQ every 1/2 of descriptors. */ 1013 if (!(entry & ((TX_RING_SIZE>>1)-1))) 1014 return 1; 1015 1016 return 0; 1017} 1018 1019static netdev_tx_t gem_start_xmit(struct sk_buff *skb, 1020 struct net_device *dev) 1021{ 1022 struct gem *gp = netdev_priv(dev); 1023 int entry; 1024 u64 ctrl; 1025 unsigned long flags; 1026 1027 ctrl = 0; 1028 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1029 const u64 csum_start_off = skb_transport_offset(skb); 1030 const u64 csum_stuff_off = csum_start_off + skb->csum_offset; 1031 1032 ctrl = (TXDCTRL_CENAB | 1033 (csum_start_off << 15) | 1034 (csum_stuff_off << 21)); 1035 } 1036 1037 local_irq_save(flags); 1038 if (!spin_trylock(&gp->tx_lock)) { 1039 /* Tell upper layer to requeue */ 1040 local_irq_restore(flags); 1041 return NETDEV_TX_LOCKED; 1042 } 1043 /* We raced with gem_do_stop() */ 1044 if (!gp->running) { 1045 spin_unlock_irqrestore(&gp->tx_lock, flags); 1046 return NETDEV_TX_BUSY; 1047 } 1048 1049 /* This is a hard error, log it. */ 1050 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { 1051 netif_stop_queue(dev); 1052 spin_unlock_irqrestore(&gp->tx_lock, flags); 1053 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", 1054 dev->name); 1055 return NETDEV_TX_BUSY; 1056 } 1057 1058 entry = gp->tx_new; 1059 gp->tx_skbs[entry] = skb; 1060 1061 if (skb_shinfo(skb)->nr_frags == 0) { 1062 struct gem_txd *txd = &gp->init_block->txd[entry]; 1063 dma_addr_t mapping; 1064 u32 len; 1065 1066 len = skb->len; 1067 mapping = pci_map_page(gp->pdev, 1068 virt_to_page(skb->data), 1069 offset_in_page(skb->data), 1070 len, PCI_DMA_TODEVICE); 1071 ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; 1072 if (gem_intme(entry)) 1073 ctrl |= TXDCTRL_INTME; 1074 txd->buffer = cpu_to_le64(mapping); 1075 wmb(); 1076 txd->control_word = cpu_to_le64(ctrl); 1077 entry = NEXT_TX(entry); 1078 } else { 1079 struct gem_txd *txd; 1080 u32 first_len; 1081 u64 intme; 1082 dma_addr_t first_mapping; 1083 int frag, first_entry = entry; 1084 1085 intme = 0; 1086 if (gem_intme(entry)) 1087 intme |= TXDCTRL_INTME; 1088 1089 /* We must give this initial chunk to the device last. 1090 * Otherwise we could race with the device. 1091 */ 1092 first_len = skb_headlen(skb); 1093 first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), 1094 offset_in_page(skb->data), 1095 first_len, PCI_DMA_TODEVICE); 1096 entry = NEXT_TX(entry); 1097 1098 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 1099 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 1100 u32 len; 1101 dma_addr_t mapping; 1102 u64 this_ctrl; 1103 1104 len = this_frag->size; 1105 mapping = pci_map_page(gp->pdev, 1106 this_frag->page, 1107 this_frag->page_offset, 1108 len, PCI_DMA_TODEVICE); 1109 this_ctrl = ctrl; 1110 if (frag == skb_shinfo(skb)->nr_frags - 1) 1111 this_ctrl |= TXDCTRL_EOF; 1112 1113 txd = &gp->init_block->txd[entry]; 1114 txd->buffer = cpu_to_le64(mapping); 1115 wmb(); 1116 txd->control_word = cpu_to_le64(this_ctrl | len); 1117 1118 if (gem_intme(entry)) 1119 intme |= TXDCTRL_INTME; 1120 1121 entry = NEXT_TX(entry); 1122 } 1123 txd = &gp->init_block->txd[first_entry]; 1124 txd->buffer = cpu_to_le64(first_mapping); 1125 wmb(); 1126 txd->control_word = 1127 cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); 1128 } 1129 1130 gp->tx_new = entry; 1131 if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1)) 1132 netif_stop_queue(dev); 1133 1134 if (netif_msg_tx_queued(gp)) 1135 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", 1136 dev->name, entry, skb->len); 1137 mb(); 1138 writel(gp->tx_new, gp->regs + TXDMA_KICK); 1139 spin_unlock_irqrestore(&gp->tx_lock, flags); 1140 1141 dev->trans_start = jiffies; 1142 1143 return NETDEV_TX_OK; 1144} 1145 1146static void gem_pcs_reset(struct gem *gp) 1147{ 1148 int limit; 1149 u32 val; 1150 1151 /* Reset PCS unit. */ 1152 val = readl(gp->regs + PCS_MIICTRL); 1153 val |= PCS_MIICTRL_RST; 1154 writel(val, gp->regs + PCS_MIICTRL); 1155 1156 limit = 32; 1157 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { 1158 udelay(100); 1159 if (limit-- <= 0) 1160 break; 1161 } 1162 if (limit < 0) 1163 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n", 1164 gp->dev->name); 1165} 1166 1167static void gem_pcs_reinit_adv(struct gem *gp) 1168{ 1169 u32 val; 1170 1171 /* Make sure PCS is disabled while changing advertisement 1172 * configuration. 1173 */ 1174 val = readl(gp->regs + PCS_CFG); 1175 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); 1176 writel(val, gp->regs + PCS_CFG); 1177 1178 /* Advertise all capabilities except assymetric 1179 * pause. 1180 */ 1181 val = readl(gp->regs + PCS_MIIADV); 1182 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | 1183 PCS_MIIADV_SP | PCS_MIIADV_AP); 1184 writel(val, gp->regs + PCS_MIIADV); 1185 1186 /* Enable and restart auto-negotiation, disable wrapback/loopback, 1187 * and re-enable PCS. 1188 */ 1189 val = readl(gp->regs + PCS_MIICTRL); 1190 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); 1191 val &= ~PCS_MIICTRL_WB; 1192 writel(val, gp->regs + PCS_MIICTRL); 1193 1194 val = readl(gp->regs + PCS_CFG); 1195 val |= PCS_CFG_ENABLE; 1196 writel(val, gp->regs + PCS_CFG); 1197 1198 /* Make sure serialink loopback is off. The meaning 1199 * of this bit is logically inverted based upon whether 1200 * you are in Serialink or SERDES mode. 1201 */ 1202 val = readl(gp->regs + PCS_SCTRL); 1203 if (gp->phy_type == phy_serialink) 1204 val &= ~PCS_SCTRL_LOOP; 1205 else 1206 val |= PCS_SCTRL_LOOP; 1207 writel(val, gp->regs + PCS_SCTRL); 1208} 1209 1210#define STOP_TRIES 32 1211 1212/* Must be invoked under gp->lock and gp->tx_lock. */ 1213static void gem_reset(struct gem *gp) 1214{ 1215 int limit; 1216 u32 val; 1217 1218 /* Make sure we won't get any more interrupts */ 1219 writel(0xffffffff, gp->regs + GREG_IMASK); 1220 1221 /* Reset the chip */ 1222 writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST, 1223 gp->regs + GREG_SWRST); 1224 1225 limit = STOP_TRIES; 1226 1227 do { 1228 udelay(20); 1229 val = readl(gp->regs + GREG_SWRST); 1230 if (limit-- <= 0) 1231 break; 1232 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); 1233 1234 if (limit < 0) 1235 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); 1236 1237 if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) 1238 gem_pcs_reinit_adv(gp); 1239} 1240 1241/* Must be invoked under gp->lock and gp->tx_lock. */ 1242static void gem_start_dma(struct gem *gp) 1243{ 1244 u32 val; 1245 1246 /* We are ready to rock, turn everything on. */ 1247 val = readl(gp->regs + TXDMA_CFG); 1248 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); 1249 val = readl(gp->regs + RXDMA_CFG); 1250 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 1251 val = readl(gp->regs + MAC_TXCFG); 1252 writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); 1253 val = readl(gp->regs + MAC_RXCFG); 1254 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 1255 1256 (void) readl(gp->regs + MAC_RXCFG); 1257 udelay(100); 1258 1259 gem_enable_ints(gp); 1260 1261 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 1262} 1263 1264/* Must be invoked under gp->lock and gp->tx_lock. DMA won't be 1265 * actually stopped before about 4ms tho ... 1266 */ 1267static void gem_stop_dma(struct gem *gp) 1268{ 1269 u32 val; 1270 1271 /* We are done rocking, turn everything off. */ 1272 val = readl(gp->regs + TXDMA_CFG); 1273 writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); 1274 val = readl(gp->regs + RXDMA_CFG); 1275 writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 1276 val = readl(gp->regs + MAC_TXCFG); 1277 writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); 1278 val = readl(gp->regs + MAC_RXCFG); 1279 writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 1280 1281 (void) readl(gp->regs + MAC_RXCFG); 1282 1283 /* Need to wait a bit ... done by the caller */ 1284} 1285 1286 1287/* Must be invoked under gp->lock and gp->tx_lock. */ 1288// XXX dbl check what that function should do when called on PCS PHY 1289static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) 1290{ 1291 u32 advertise, features; 1292 int autoneg; 1293 int speed; 1294 int duplex; 1295 1296 if (gp->phy_type != phy_mii_mdio0 && 1297 gp->phy_type != phy_mii_mdio1) 1298 goto non_mii; 1299 1300 /* Setup advertise */ 1301 if (found_mii_phy(gp)) 1302 features = gp->phy_mii.def->features; 1303 else 1304 features = 0; 1305 1306 advertise = features & ADVERTISE_MASK; 1307 if (gp->phy_mii.advertising != 0) 1308 advertise &= gp->phy_mii.advertising; 1309 1310 autoneg = gp->want_autoneg; 1311 speed = gp->phy_mii.speed; 1312 duplex = gp->phy_mii.duplex; 1313 1314 /* Setup link parameters */ 1315 if (!ep) 1316 goto start_aneg; 1317 if (ep->autoneg == AUTONEG_ENABLE) { 1318 advertise = ep->advertising; 1319 autoneg = 1; 1320 } else { 1321 autoneg = 0; 1322 speed = ep->speed; 1323 duplex = ep->duplex; 1324 } 1325 1326start_aneg: 1327 /* Sanitize settings based on PHY capabilities */ 1328 if ((features & SUPPORTED_Autoneg) == 0) 1329 autoneg = 0; 1330 if (speed == SPEED_1000 && 1331 !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full))) 1332 speed = SPEED_100; 1333 if (speed == SPEED_100 && 1334 !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full))) 1335 speed = SPEED_10; 1336 if (duplex == DUPLEX_FULL && 1337 !(features & (SUPPORTED_1000baseT_Full | 1338 SUPPORTED_100baseT_Full | 1339 SUPPORTED_10baseT_Full))) 1340 duplex = DUPLEX_HALF; 1341 if (speed == 0) 1342 speed = SPEED_10; 1343 1344 /* If we are asleep, we don't try to actually setup the PHY, we 1345 * just store the settings 1346 */ 1347 if (gp->asleep) { 1348 gp->phy_mii.autoneg = gp->want_autoneg = autoneg; 1349 gp->phy_mii.speed = speed; 1350 gp->phy_mii.duplex = duplex; 1351 return; 1352 } 1353 1354 /* Configure PHY & start aneg */ 1355 gp->want_autoneg = autoneg; 1356 if (autoneg) { 1357 if (found_mii_phy(gp)) 1358 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise); 1359 gp->lstate = link_aneg; 1360 } else { 1361 if (found_mii_phy(gp)) 1362 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex); 1363 gp->lstate = link_force_ok; 1364 } 1365 1366non_mii: 1367 gp->timer_ticks = 0; 1368 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 1369} 1370 1371/* A link-up condition has occurred, initialize and enable the 1372 * rest of the chip. 1373 * 1374 * Must be invoked under gp->lock and gp->tx_lock. 1375 */ 1376static int gem_set_link_modes(struct gem *gp) 1377{ 1378 u32 val; 1379 int full_duplex, speed, pause; 1380 1381 full_duplex = 0; 1382 speed = SPEED_10; 1383 pause = 0; 1384 1385 if (found_mii_phy(gp)) { 1386 if (gp->phy_mii.def->ops->read_link(&gp->phy_mii)) 1387 return 1; 1388 full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL); 1389 speed = gp->phy_mii.speed; 1390 pause = gp->phy_mii.pause; 1391 } else if (gp->phy_type == phy_serialink || 1392 gp->phy_type == phy_serdes) { 1393 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1394 1395 if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes) 1396 full_duplex = 1; 1397 speed = SPEED_1000; 1398 } 1399 1400 if (netif_msg_link(gp)) 1401 printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n", 1402 gp->dev->name, speed, (full_duplex ? "full" : "half")); 1403 1404 if (!gp->running) 1405 return 0; 1406 1407 val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); 1408 if (full_duplex) { 1409 val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); 1410 } else { 1411 /* MAC_TXCFG_NBO must be zero. */ 1412 } 1413 writel(val, gp->regs + MAC_TXCFG); 1414 1415 val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); 1416 if (!full_duplex && 1417 (gp->phy_type == phy_mii_mdio0 || 1418 gp->phy_type == phy_mii_mdio1)) { 1419 val |= MAC_XIFCFG_DISE; 1420 } else if (full_duplex) { 1421 val |= MAC_XIFCFG_FLED; 1422 } 1423 1424 if (speed == SPEED_1000) 1425 val |= (MAC_XIFCFG_GMII); 1426 1427 writel(val, gp->regs + MAC_XIFCFG); 1428 1429 /* If gigabit and half-duplex, enable carrier extension 1430 * mode. Else, disable it. 1431 */ 1432 if (speed == SPEED_1000 && !full_duplex) { 1433 val = readl(gp->regs + MAC_TXCFG); 1434 writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); 1435 1436 val = readl(gp->regs + MAC_RXCFG); 1437 writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); 1438 } else { 1439 val = readl(gp->regs + MAC_TXCFG); 1440 writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); 1441 1442 val = readl(gp->regs + MAC_RXCFG); 1443 writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); 1444 } 1445 1446 if (gp->phy_type == phy_serialink || 1447 gp->phy_type == phy_serdes) { 1448 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1449 1450 if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) 1451 pause = 1; 1452 } 1453 1454 if (netif_msg_link(gp)) { 1455 if (pause) { 1456 printk(KERN_INFO "%s: Pause is enabled " 1457 "(rxfifo: %d off: %d on: %d)\n", 1458 gp->dev->name, 1459 gp->rx_fifo_sz, 1460 gp->rx_pause_off, 1461 gp->rx_pause_on); 1462 } else { 1463 printk(KERN_INFO "%s: Pause is disabled\n", 1464 gp->dev->name); 1465 } 1466 } 1467 1468 if (!full_duplex) 1469 writel(512, gp->regs + MAC_STIME); 1470 else 1471 writel(64, gp->regs + MAC_STIME); 1472 val = readl(gp->regs + MAC_MCCFG); 1473 if (pause) 1474 val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); 1475 else 1476 val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); 1477 writel(val, gp->regs + MAC_MCCFG); 1478 1479 gem_start_dma(gp); 1480 1481 return 0; 1482} 1483 1484/* Must be invoked under gp->lock and gp->tx_lock. */ 1485static int gem_mdio_link_not_up(struct gem *gp) 1486{ 1487 switch (gp->lstate) { 1488 case link_force_ret: 1489 if (netif_msg_link(gp)) 1490 printk(KERN_INFO "%s: Autoneg failed again, keeping" 1491 " forced mode\n", gp->dev->name); 1492 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, 1493 gp->last_forced_speed, DUPLEX_HALF); 1494 gp->timer_ticks = 5; 1495 gp->lstate = link_force_ok; 1496 return 0; 1497 case link_aneg: 1498 /* We try forced modes after a failed aneg only on PHYs that don't 1499 * have "magic_aneg" bit set, which means they internally do the 1500 * while forced-mode thingy. On these, we just restart aneg 1501 */ 1502 if (gp->phy_mii.def->magic_aneg) 1503 return 1; 1504 if (netif_msg_link(gp)) 1505 printk(KERN_INFO "%s: switching to forced 100bt\n", 1506 gp->dev->name); 1507 /* Try forced modes. */ 1508 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, 1509 DUPLEX_HALF); 1510 gp->timer_ticks = 5; 1511 gp->lstate = link_force_try; 1512 return 0; 1513 case link_force_try: 1514 /* Downgrade from 100 to 10 Mbps if necessary. 1515 * If already at 10Mbps, warn user about the 1516 * situation every 10 ticks. 1517 */ 1518 if (gp->phy_mii.speed == SPEED_100) { 1519 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, 1520 DUPLEX_HALF); 1521 gp->timer_ticks = 5; 1522 if (netif_msg_link(gp)) 1523 printk(KERN_INFO "%s: switching to forced 10bt\n", 1524 gp->dev->name); 1525 return 0; 1526 } else 1527 return 1; 1528 default: 1529 return 0; 1530 } 1531} 1532 1533static void gem_link_timer(unsigned long data) 1534{ 1535 struct gem *gp = (struct gem *) data; 1536 int restart_aneg = 0; 1537 1538 if (gp->asleep) 1539 return; 1540 1541 spin_lock_irq(&gp->lock); 1542 spin_lock(&gp->tx_lock); 1543 gem_get_cell(gp); 1544 1545 /* If the reset task is still pending, we just 1546 * reschedule the link timer 1547 */ 1548 if (gp->reset_task_pending) 1549 goto restart; 1550 1551 if (gp->phy_type == phy_serialink || 1552 gp->phy_type == phy_serdes) { 1553 u32 val = readl(gp->regs + PCS_MIISTAT); 1554 1555 if (!(val & PCS_MIISTAT_LS)) 1556 val = readl(gp->regs + PCS_MIISTAT); 1557 1558 if ((val & PCS_MIISTAT_LS) != 0) { 1559 if (gp->lstate == link_up) 1560 goto restart; 1561 1562 gp->lstate = link_up; 1563 netif_carrier_on(gp->dev); 1564 (void)gem_set_link_modes(gp); 1565 } 1566 goto restart; 1567 } 1568 if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) { 1569 /* Ok, here we got a link. If we had it due to a forced 1570 * fallback, and we were configured for autoneg, we do 1571 * retry a short autoneg pass. If you know your hub is 1572 * broken, use ethtool ;) 1573 */ 1574 if (gp->lstate == link_force_try && gp->want_autoneg) { 1575 gp->lstate = link_force_ret; 1576 gp->last_forced_speed = gp->phy_mii.speed; 1577 gp->timer_ticks = 5; 1578 if (netif_msg_link(gp)) 1579 printk(KERN_INFO "%s: Got link after fallback, retrying" 1580 " autoneg once...\n", gp->dev->name); 1581 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); 1582 } else if (gp->lstate != link_up) { 1583 gp->lstate = link_up; 1584 netif_carrier_on(gp->dev); 1585 if (gem_set_link_modes(gp)) 1586 restart_aneg = 1; 1587 } 1588 } else { 1589 /* If the link was previously up, we restart the 1590 * whole process 1591 */ 1592 if (gp->lstate == link_up) { 1593 gp->lstate = link_down; 1594 if (netif_msg_link(gp)) 1595 printk(KERN_INFO "%s: Link down\n", 1596 gp->dev->name); 1597 netif_carrier_off(gp->dev); 1598 gp->reset_task_pending = 1; 1599 schedule_work(&gp->reset_task); 1600 restart_aneg = 1; 1601 } else if (++gp->timer_ticks > 10) { 1602 if (found_mii_phy(gp)) 1603 restart_aneg = gem_mdio_link_not_up(gp); 1604 else 1605 restart_aneg = 1; 1606 } 1607 } 1608 if (restart_aneg) { 1609 gem_begin_auto_negotiation(gp, NULL); 1610 goto out_unlock; 1611 } 1612restart: 1613 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 1614out_unlock: 1615 gem_put_cell(gp); 1616 spin_unlock(&gp->tx_lock); 1617 spin_unlock_irq(&gp->lock); 1618} 1619 1620/* Must be invoked under gp->lock and gp->tx_lock. */ 1621static void gem_clean_rings(struct gem *gp) 1622{ 1623 struct gem_init_block *gb = gp->init_block; 1624 struct sk_buff *skb; 1625 int i; 1626 dma_addr_t dma_addr; 1627 1628 for (i = 0; i < RX_RING_SIZE; i++) { 1629 struct gem_rxd *rxd; 1630 1631 rxd = &gb->rxd[i]; 1632 if (gp->rx_skbs[i] != NULL) { 1633 skb = gp->rx_skbs[i]; 1634 dma_addr = le64_to_cpu(rxd->buffer); 1635 pci_unmap_page(gp->pdev, dma_addr, 1636 RX_BUF_ALLOC_SIZE(gp), 1637 PCI_DMA_FROMDEVICE); 1638 dev_kfree_skb_any(skb); 1639 gp->rx_skbs[i] = NULL; 1640 } 1641 rxd->status_word = 0; 1642 wmb(); 1643 rxd->buffer = 0; 1644 } 1645 1646 for (i = 0; i < TX_RING_SIZE; i++) { 1647 if (gp->tx_skbs[i] != NULL) { 1648 struct gem_txd *txd; 1649 int frag; 1650 1651 skb = gp->tx_skbs[i]; 1652 gp->tx_skbs[i] = NULL; 1653 1654 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 1655 int ent = i & (TX_RING_SIZE - 1); 1656 1657 txd = &gb->txd[ent]; 1658 dma_addr = le64_to_cpu(txd->buffer); 1659 pci_unmap_page(gp->pdev, dma_addr, 1660 le64_to_cpu(txd->control_word) & 1661 TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); 1662 1663 if (frag != skb_shinfo(skb)->nr_frags) 1664 i++; 1665 } 1666 dev_kfree_skb_any(skb); 1667 } 1668 } 1669} 1670 1671/* Must be invoked under gp->lock and gp->tx_lock. */ 1672static void gem_init_rings(struct gem *gp) 1673{ 1674 struct gem_init_block *gb = gp->init_block; 1675 struct net_device *dev = gp->dev; 1676 int i; 1677 dma_addr_t dma_addr; 1678 1679 gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; 1680 1681 gem_clean_rings(gp); 1682 1683 gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, 1684 (unsigned)VLAN_ETH_FRAME_LEN); 1685 1686 for (i = 0; i < RX_RING_SIZE; i++) { 1687 struct sk_buff *skb; 1688 struct gem_rxd *rxd = &gb->rxd[i]; 1689 1690 skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 1691 if (!skb) { 1692 rxd->buffer = 0; 1693 rxd->status_word = 0; 1694 continue; 1695 } 1696 1697 gp->rx_skbs[i] = skb; 1698 skb->dev = dev; 1699 skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); 1700 dma_addr = pci_map_page(gp->pdev, 1701 virt_to_page(skb->data), 1702 offset_in_page(skb->data), 1703 RX_BUF_ALLOC_SIZE(gp), 1704 PCI_DMA_FROMDEVICE); 1705 rxd->buffer = cpu_to_le64(dma_addr); 1706 wmb(); 1707 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 1708 skb_reserve(skb, RX_OFFSET); 1709 } 1710 1711 for (i = 0; i < TX_RING_SIZE; i++) { 1712 struct gem_txd *txd = &gb->txd[i]; 1713 1714 txd->control_word = 0; 1715 wmb(); 1716 txd->buffer = 0; 1717 } 1718 wmb(); 1719} 1720 1721/* Init PHY interface and start link poll state machine */ 1722static void gem_init_phy(struct gem *gp) 1723{ 1724 u32 mifcfg; 1725 1726 /* Revert MIF CFG setting done on stop_phy */ 1727 mifcfg = readl(gp->regs + MIF_CFG); 1728 mifcfg &= ~MIF_CFG_BBMODE; 1729 writel(mifcfg, gp->regs + MIF_CFG); 1730 1731 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { 1732 int i; 1733 1734 /* Those delay sucks, the HW seem to love them though, I'll 1735 * serisouly consider breaking some locks here to be able 1736 * to schedule instead 1737 */ 1738 for (i = 0; i < 3; i++) { 1739#ifdef CONFIG_PPC_PMAC 1740 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); 1741 msleep(20); 1742#endif 1743 /* Some PHYs used by apple have problem getting back to us, 1744 * we do an additional reset here 1745 */ 1746 phy_write(gp, MII_BMCR, BMCR_RESET); 1747 msleep(20); 1748 if (phy_read(gp, MII_BMCR) != 0xffff) 1749 break; 1750 if (i == 2) 1751 printk(KERN_WARNING "%s: GMAC PHY not responding !\n", 1752 gp->dev->name); 1753 } 1754 } 1755 1756 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 1757 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 1758 u32 val; 1759 1760 /* Init datapath mode register. */ 1761 if (gp->phy_type == phy_mii_mdio0 || 1762 gp->phy_type == phy_mii_mdio1) { 1763 val = PCS_DMODE_MGM; 1764 } else if (gp->phy_type == phy_serialink) { 1765 val = PCS_DMODE_SM | PCS_DMODE_GMOE; 1766 } else { 1767 val = PCS_DMODE_ESM; 1768 } 1769 1770 writel(val, gp->regs + PCS_DMODE); 1771 } 1772 1773 if (gp->phy_type == phy_mii_mdio0 || 1774 gp->phy_type == phy_mii_mdio1) { 1775 // XXX check for errors 1776 mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr); 1777 1778 /* Init PHY */ 1779 if (gp->phy_mii.def && gp->phy_mii.def->ops->init) 1780 gp->phy_mii.def->ops->init(&gp->phy_mii); 1781 } else { 1782 gem_pcs_reset(gp); 1783 gem_pcs_reinit_adv(gp); 1784 } 1785 1786 /* Default aneg parameters */ 1787 gp->timer_ticks = 0; 1788 gp->lstate = link_down; 1789 netif_carrier_off(gp->dev); 1790 1791 /* Can I advertise gigabit here ? I'd need BCM PHY docs... */ 1792 spin_lock_irq(&gp->lock); 1793 gem_begin_auto_negotiation(gp, NULL); 1794 spin_unlock_irq(&gp->lock); 1795} 1796 1797/* Must be invoked under gp->lock and gp->tx_lock. */ 1798static void gem_init_dma(struct gem *gp) 1799{ 1800 u64 desc_dma = (u64) gp->gblock_dvma; 1801 u32 val; 1802 1803 val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE); 1804 writel(val, gp->regs + TXDMA_CFG); 1805 1806 writel(desc_dma >> 32, gp->regs + TXDMA_DBHI); 1807 writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW); 1808 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); 1809 1810 writel(0, gp->regs + TXDMA_KICK); 1811 1812 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 1813 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 1814 writel(val, gp->regs + RXDMA_CFG); 1815 1816 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 1817 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 1818 1819 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 1820 1821 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); 1822 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); 1823 writel(val, gp->regs + RXDMA_PTHRESH); 1824 1825 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 1826 writel(((5 & RXDMA_BLANK_IPKTS) | 1827 ((8 << 12) & RXDMA_BLANK_ITIME)), 1828 gp->regs + RXDMA_BLANK); 1829 else 1830 writel(((5 & RXDMA_BLANK_IPKTS) | 1831 ((4 << 12) & RXDMA_BLANK_ITIME)), 1832 gp->regs + RXDMA_BLANK); 1833} 1834 1835/* Must be invoked under gp->lock and gp->tx_lock. */ 1836static u32 gem_setup_multicast(struct gem *gp) 1837{ 1838 u32 rxcfg = 0; 1839 int i; 1840 1841 if ((gp->dev->flags & IFF_ALLMULTI) || 1842 (gp->dev->mc_count > 256)) { 1843 for (i=0; i<16; i++) 1844 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2)); 1845 rxcfg |= MAC_RXCFG_HFE; 1846 } else if (gp->dev->flags & IFF_PROMISC) { 1847 rxcfg |= MAC_RXCFG_PROM; 1848 } else { 1849 u16 hash_table[16]; 1850 u32 crc; 1851 struct dev_mc_list *dmi = gp->dev->mc_list; 1852 int i; 1853 1854 for (i = 0; i < 16; i++) 1855 hash_table[i] = 0; 1856 1857 for (i = 0; i < gp->dev->mc_count; i++) { 1858 char *addrs = dmi->dmi_addr; 1859 1860 dmi = dmi->next; 1861 1862 if (!(*addrs & 1)) 1863 continue; 1864 1865 crc = ether_crc_le(6, addrs); 1866 crc >>= 24; 1867 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 1868 } 1869 for (i=0; i<16; i++) 1870 writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2)); 1871 rxcfg |= MAC_RXCFG_HFE; 1872 } 1873 1874 return rxcfg; 1875} 1876 1877/* Must be invoked under gp->lock and gp->tx_lock. */ 1878static void gem_init_mac(struct gem *gp) 1879{ 1880 unsigned char *e = &gp->dev->dev_addr[0]; 1881 1882 writel(0x1bf0, gp->regs + MAC_SNDPAUSE); 1883 1884 writel(0x00, gp->regs + MAC_IPG0); 1885 writel(0x08, gp->regs + MAC_IPG1); 1886 writel(0x04, gp->regs + MAC_IPG2); 1887 writel(0x40, gp->regs + MAC_STIME); 1888 writel(0x40, gp->regs + MAC_MINFSZ); 1889 1890 /* Ethernet payload + header + FCS + optional VLAN tag. */ 1891 writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ); 1892 1893 writel(0x07, gp->regs + MAC_PASIZE); 1894 writel(0x04, gp->regs + MAC_JAMSIZE); 1895 writel(0x10, gp->regs + MAC_ATTLIM); 1896 writel(0x8808, gp->regs + MAC_MCTYPE); 1897 1898 writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED); 1899 1900 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); 1901 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); 1902 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); 1903 1904 writel(0, gp->regs + MAC_ADDR3); 1905 writel(0, gp->regs + MAC_ADDR4); 1906 writel(0, gp->regs + MAC_ADDR5); 1907 1908 writel(0x0001, gp->regs + MAC_ADDR6); 1909 writel(0xc200, gp->regs + MAC_ADDR7); 1910 writel(0x0180, gp->regs + MAC_ADDR8); 1911 1912 writel(0, gp->regs + MAC_AFILT0); 1913 writel(0, gp->regs + MAC_AFILT1); 1914 writel(0, gp->regs + MAC_AFILT2); 1915 writel(0, gp->regs + MAC_AF21MSK); 1916 writel(0, gp->regs + MAC_AF0MSK); 1917 1918 gp->mac_rx_cfg = gem_setup_multicast(gp); 1919#ifdef STRIP_FCS 1920 gp->mac_rx_cfg |= MAC_RXCFG_SFCS; 1921#endif 1922 writel(0, gp->regs + MAC_NCOLL); 1923 writel(0, gp->regs + MAC_FASUCC); 1924 writel(0, gp->regs + MAC_ECOLL); 1925 writel(0, gp->regs + MAC_LCOLL); 1926 writel(0, gp->regs + MAC_DTIMER); 1927 writel(0, gp->regs + MAC_PATMPS); 1928 writel(0, gp->regs + MAC_RFCTR); 1929 writel(0, gp->regs + MAC_LERR); 1930 writel(0, gp->regs + MAC_AERR); 1931 writel(0, gp->regs + MAC_FCSERR); 1932 writel(0, gp->regs + MAC_RXCVERR); 1933 1934 /* Clear RX/TX/MAC/XIF config, we will set these up and enable 1935 * them once a link is established. 1936 */ 1937 writel(0, gp->regs + MAC_TXCFG); 1938 writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG); 1939 writel(0, gp->regs + MAC_MCCFG); 1940 writel(0, gp->regs + MAC_XIFCFG); 1941 1942 /* Setup MAC interrupts. We want to get all of the interesting 1943 * counter expiration events, but we do not want to hear about 1944 * normal rx/tx as the DMA engine tells us that. 1945 */ 1946 writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK); 1947 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); 1948 1949 /* Don't enable even the PAUSE interrupts for now, we 1950 * make no use of those events other than to record them. 1951 */ 1952 writel(0xffffffff, gp->regs + MAC_MCMASK); 1953 1954 /* Don't enable GEM's WOL in normal operations 1955 */ 1956 if (gp->has_wol) 1957 writel(0, gp->regs + WOL_WAKECSR); 1958} 1959 1960/* Must be invoked under gp->lock and gp->tx_lock. */ 1961static void gem_init_pause_thresholds(struct gem *gp) 1962{ 1963 u32 cfg; 1964 1965 /* Calculate pause thresholds. Setting the OFF threshold to the 1966 * full RX fifo size effectively disables PAUSE generation which 1967 * is what we do for 10/100 only GEMs which have FIFOs too small 1968 * to make real gains from PAUSE. 1969 */ 1970 if (gp->rx_fifo_sz <= (2 * 1024)) { 1971 gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; 1972 } else { 1973 int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; 1974 int off = (gp->rx_fifo_sz - (max_frame * 2)); 1975 int on = off - max_frame; 1976 1977 gp->rx_pause_off = off; 1978 gp->rx_pause_on = on; 1979 } 1980 1981 1982 /* Configure the chip "burst" DMA mode & enable some 1983 * HW bug fixes on Apple version 1984 */ 1985 cfg = 0; 1986 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) 1987 cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX; 1988#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) 1989 cfg |= GREG_CFG_IBURST; 1990#endif 1991 cfg |= ((31 << 1) & GREG_CFG_TXDMALIM); 1992 cfg |= ((31 << 6) & GREG_CFG_RXDMALIM); 1993 writel(cfg, gp->regs + GREG_CFG); 1994 1995 /* If Infinite Burst didn't stick, then use different 1996 * thresholds (and Apple bug fixes don't exist) 1997 */ 1998 if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) { 1999 cfg = ((2 << 1) & GREG_CFG_TXDMALIM); 2000 cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); 2001 writel(cfg, gp->regs + GREG_CFG); 2002 } 2003} 2004 2005static int gem_check_invariants(struct gem *gp) 2006{ 2007 struct pci_dev *pdev = gp->pdev; 2008 u32 mif_cfg; 2009 2010 /* On Apple's sungem, we can't rely on registers as the chip 2011 * was been powered down by the firmware. The PHY is looked 2012 * up later on. 2013 */ 2014 if (pdev->vendor == PCI_VENDOR_ID_APPLE) { 2015 gp->phy_type = phy_mii_mdio0; 2016 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; 2017 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; 2018 gp->swrst_base = 0; 2019 2020 mif_cfg = readl(gp->regs + MIF_CFG); 2021 mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); 2022 mif_cfg |= MIF_CFG_MDI0; 2023 writel(mif_cfg, gp->regs + MIF_CFG); 2024 writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE); 2025 writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG); 2026 2027 /* We hard-code the PHY address so we can properly bring it out of 2028 * reset later on, we can't really probe it at this point, though 2029 * that isn't an issue. 2030 */ 2031 if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC) 2032 gp->mii_phy_addr = 1; 2033 else 2034 gp->mii_phy_addr = 0; 2035 2036 return 0; 2037 } 2038 2039 mif_cfg = readl(gp->regs + MIF_CFG); 2040 2041 if (pdev->vendor == PCI_VENDOR_ID_SUN && 2042 pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) { 2043 /* One of the MII PHYs _must_ be present 2044 * as this chip has no gigabit PHY. 2045 */ 2046 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { 2047 printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n", 2048 mif_cfg); 2049 return -1; 2050 } 2051 } 2052 2053 /* Determine initial PHY interface type guess. MDIO1 is the 2054 * external PHY and thus takes precedence over MDIO0. 2055 */ 2056 2057 if (mif_cfg & MIF_CFG_MDI1) { 2058 gp->phy_type = phy_mii_mdio1; 2059 mif_cfg |= MIF_CFG_PSELECT; 2060 writel(mif_cfg, gp->regs + MIF_CFG); 2061 } else if (mif_cfg & MIF_CFG_MDI0) { 2062 gp->phy_type = phy_mii_mdio0; 2063 mif_cfg &= ~MIF_CFG_PSELECT; 2064 writel(mif_cfg, gp->regs + MIF_CFG); 2065 } else { 2066 gp->phy_type = phy_serialink; 2067 } 2068 if (gp->phy_type == phy_mii_mdio1 || 2069 gp->phy_type == phy_mii_mdio0) { 2070 int i; 2071 2072 for (i = 0; i < 32; i++) { 2073 gp->mii_phy_addr = i; 2074 if (phy_read(gp, MII_BMCR) != 0xffff) 2075 break; 2076 } 2077 if (i == 32) { 2078 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { 2079 printk(KERN_ERR PFX "RIO MII phy will not respond.\n"); 2080 return -1; 2081 } 2082 gp->phy_type = phy_serdes; 2083 } 2084 } 2085 2086 /* Fetch the FIFO configurations now too. */ 2087 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; 2088 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; 2089 2090 if (pdev->vendor == PCI_VENDOR_ID_SUN) { 2091 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { 2092 if (gp->tx_fifo_sz != (9 * 1024) || 2093 gp->rx_fifo_sz != (20 * 1024)) { 2094 printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2095 gp->tx_fifo_sz, gp->rx_fifo_sz); 2096 return -1; 2097 } 2098 gp->swrst_base = 0; 2099 } else { 2100 if (gp->tx_fifo_sz != (2 * 1024) || 2101 gp->rx_fifo_sz != (2 * 1024)) { 2102 printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2103 gp->tx_fifo_sz, gp->rx_fifo_sz); 2104 return -1; 2105 } 2106 gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT; 2107 } 2108 } 2109 2110 return 0; 2111} 2112 2113/* Must be invoked under gp->lock and gp->tx_lock. */ 2114static void gem_reinit_chip(struct gem *gp) 2115{ 2116 /* Reset the chip */ 2117 gem_reset(gp); 2118 2119 /* Make sure ints are disabled */ 2120 gem_disable_ints(gp); 2121 2122 /* Allocate & setup ring buffers */ 2123 gem_init_rings(gp); 2124 2125 /* Configure pause thresholds */ 2126 gem_init_pause_thresholds(gp); 2127 2128 /* Init DMA & MAC engines */ 2129 gem_init_dma(gp); 2130 gem_init_mac(gp); 2131} 2132 2133 2134/* Must be invoked with no lock held. */ 2135static void gem_stop_phy(struct gem *gp, int wol) 2136{ 2137 u32 mifcfg; 2138 unsigned long flags; 2139 2140 /* Let the chip settle down a bit, it seems that helps 2141 * for sleep mode on some models 2142 */ 2143 msleep(10); 2144 2145 /* Make sure we aren't polling PHY status change. We 2146 * don't currently use that feature though 2147 */ 2148 mifcfg = readl(gp->regs + MIF_CFG); 2149 mifcfg &= ~MIF_CFG_POLL; 2150 writel(mifcfg, gp->regs + MIF_CFG); 2151 2152 if (wol && gp->has_wol) { 2153 unsigned char *e = &gp->dev->dev_addr[0]; 2154 u32 csr; 2155 2156 /* Setup wake-on-lan for MAGIC packet */ 2157 writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, 2158 gp->regs + MAC_RXCFG); 2159 writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0); 2160 writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1); 2161 writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2); 2162 2163 writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT); 2164 csr = WOL_WAKECSR_ENABLE; 2165 if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0) 2166 csr |= WOL_WAKECSR_MII; 2167 writel(csr, gp->regs + WOL_WAKECSR); 2168 } else { 2169 writel(0, gp->regs + MAC_RXCFG); 2170 (void)readl(gp->regs + MAC_RXCFG); 2171 /* Machine sleep will die in strange ways if we 2172 * dont wait a bit here, looks like the chip takes 2173 * some time to really shut down 2174 */ 2175 msleep(10); 2176 } 2177 2178 writel(0, gp->regs + MAC_TXCFG); 2179 writel(0, gp->regs + MAC_XIFCFG); 2180 writel(0, gp->regs + TXDMA_CFG); 2181 writel(0, gp->regs + RXDMA_CFG); 2182 2183 if (!wol) { 2184 spin_lock_irqsave(&gp->lock, flags); 2185 spin_lock(&gp->tx_lock); 2186 gem_reset(gp); 2187 writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); 2188 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); 2189 spin_unlock(&gp->tx_lock); 2190 spin_unlock_irqrestore(&gp->lock, flags); 2191 2192 /* No need to take the lock here */ 2193 2194 if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) 2195 gp->phy_mii.def->ops->suspend(&gp->phy_mii); 2196 2197 /* According to Apple, we must set the MDIO pins to this begnign 2198 * state or we may 1) eat more current, 2) damage some PHYs 2199 */ 2200 writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); 2201 writel(0, gp->regs + MIF_BBCLK); 2202 writel(0, gp->regs + MIF_BBDATA); 2203 writel(0, gp->regs + MIF_BBOENAB); 2204 writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG); 2205 (void) readl(gp->regs + MAC_XIFCFG); 2206 } 2207} 2208 2209 2210static int gem_do_start(struct net_device *dev) 2211{ 2212 struct gem *gp = netdev_priv(dev); 2213 unsigned long flags; 2214 2215 spin_lock_irqsave(&gp->lock, flags); 2216 spin_lock(&gp->tx_lock); 2217 2218 /* Enable the cell */ 2219 gem_get_cell(gp); 2220 2221 /* Init & setup chip hardware */ 2222 gem_reinit_chip(gp); 2223 2224 gp->running = 1; 2225 2226 napi_enable(&gp->napi); 2227 2228 if (gp->lstate == link_up) { 2229 netif_carrier_on(gp->dev); 2230 gem_set_link_modes(gp); 2231 } 2232 2233 netif_wake_queue(gp->dev); 2234 2235 spin_unlock(&gp->tx_lock); 2236 spin_unlock_irqrestore(&gp->lock, flags); 2237 2238 if (request_irq(gp->pdev->irq, gem_interrupt, 2239 IRQF_SHARED, dev->name, (void *)dev)) { 2240 printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name); 2241 2242 spin_lock_irqsave(&gp->lock, flags); 2243 spin_lock(&gp->tx_lock); 2244 2245 napi_disable(&gp->napi); 2246 2247 gp->running = 0; 2248 gem_reset(gp); 2249 gem_clean_rings(gp); 2250 gem_put_cell(gp); 2251 2252 spin_unlock(&gp->tx_lock); 2253 spin_unlock_irqrestore(&gp->lock, flags); 2254 2255 return -EAGAIN; 2256 } 2257 2258 return 0; 2259} 2260 2261static void gem_do_stop(struct net_device *dev, int wol) 2262{ 2263 struct gem *gp = netdev_priv(dev); 2264 unsigned long flags; 2265 2266 spin_lock_irqsave(&gp->lock, flags); 2267 spin_lock(&gp->tx_lock); 2268 2269 gp->running = 0; 2270 2271 /* Stop netif queue */ 2272 netif_stop_queue(dev); 2273 2274 /* Make sure ints are disabled */ 2275 gem_disable_ints(gp); 2276 2277 /* We can drop the lock now */ 2278 spin_unlock(&gp->tx_lock); 2279 spin_unlock_irqrestore(&gp->lock, flags); 2280 2281 /* If we are going to sleep with WOL */ 2282 gem_stop_dma(gp); 2283 msleep(10); 2284 if (!wol) 2285 gem_reset(gp); 2286 msleep(10); 2287 2288 /* Get rid of rings */ 2289 gem_clean_rings(gp); 2290 2291 /* No irq needed anymore */ 2292 free_irq(gp->pdev->irq, (void *) dev); 2293 2294 /* Cell not needed neither if no WOL */ 2295 if (!wol) { 2296 spin_lock_irqsave(&gp->lock, flags); 2297 gem_put_cell(gp); 2298 spin_unlock_irqrestore(&gp->lock, flags); 2299 } 2300} 2301 2302static void gem_reset_task(struct work_struct *work) 2303{ 2304 struct gem *gp = container_of(work, struct gem, reset_task); 2305 2306 mutex_lock(&gp->pm_mutex); 2307 2308 if (gp->opened) 2309 napi_disable(&gp->napi); 2310 2311 spin_lock_irq(&gp->lock); 2312 spin_lock(&gp->tx_lock); 2313 2314 if (gp->running) { 2315 netif_stop_queue(gp->dev); 2316 2317 /* Reset the chip & rings */ 2318 gem_reinit_chip(gp); 2319 if (gp->lstate == link_up) 2320 gem_set_link_modes(gp); 2321 netif_wake_queue(gp->dev); 2322 } 2323 2324 gp->reset_task_pending = 0; 2325 2326 spin_unlock(&gp->tx_lock); 2327 spin_unlock_irq(&gp->lock); 2328 2329 if (gp->opened) 2330 napi_enable(&gp->napi); 2331 2332 mutex_unlock(&gp->pm_mutex); 2333} 2334 2335 2336static int gem_open(struct net_device *dev) 2337{ 2338 struct gem *gp = netdev_priv(dev); 2339 int rc = 0; 2340 2341 mutex_lock(&gp->pm_mutex); 2342 2343 /* We need the cell enabled */ 2344 if (!gp->asleep) 2345 rc = gem_do_start(dev); 2346 gp->opened = (rc == 0); 2347 2348 mutex_unlock(&gp->pm_mutex); 2349 2350 return rc; 2351} 2352 2353static int gem_close(struct net_device *dev) 2354{ 2355 struct gem *gp = netdev_priv(dev); 2356 2357 mutex_lock(&gp->pm_mutex); 2358 2359 napi_disable(&gp->napi); 2360 2361 gp->opened = 0; 2362 if (!gp->asleep) 2363 gem_do_stop(dev, 0); 2364 2365 mutex_unlock(&gp->pm_mutex); 2366 2367 return 0; 2368} 2369 2370#ifdef CONFIG_PM 2371static int gem_suspend(struct pci_dev *pdev, pm_message_t state) 2372{ 2373 struct net_device *dev = pci_get_drvdata(pdev); 2374 struct gem *gp = netdev_priv(dev); 2375 unsigned long flags; 2376 2377 mutex_lock(&gp->pm_mutex); 2378 2379 printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", 2380 dev->name, 2381 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled"); 2382 2383 /* Keep the cell enabled during the entire operation */ 2384 spin_lock_irqsave(&gp->lock, flags); 2385 spin_lock(&gp->tx_lock); 2386 gem_get_cell(gp); 2387 spin_unlock(&gp->tx_lock); 2388 spin_unlock_irqrestore(&gp->lock, flags); 2389 2390 /* If the driver is opened, we stop the MAC */ 2391 if (gp->opened) { 2392 napi_disable(&gp->napi); 2393 2394 /* Stop traffic, mark us closed */ 2395 netif_device_detach(dev); 2396 2397 /* Switch off MAC, remember WOL setting */ 2398 gp->asleep_wol = gp->wake_on_lan; 2399 gem_do_stop(dev, gp->asleep_wol); 2400 } else 2401 gp->asleep_wol = 0; 2402 2403 /* Mark us asleep */ 2404 gp->asleep = 1; 2405 wmb(); 2406 2407 /* Stop the link timer */ 2408 del_timer_sync(&gp->link_timer); 2409 2410 /* Now we release the mutex to not block the reset task who 2411 * can take it too. We are marked asleep, so there will be no 2412 * conflict here 2413 */ 2414 mutex_unlock(&gp->pm_mutex); 2415 2416 /* Wait for a pending reset task to complete */ 2417 while (gp->reset_task_pending) 2418 yield(); 2419 flush_scheduled_work(); 2420 2421 /* Shut the PHY down eventually and setup WOL */ 2422 gem_stop_phy(gp, gp->asleep_wol); 2423 2424 /* Make sure bus master is disabled */ 2425 pci_disable_device(gp->pdev); 2426 2427 /* Release the cell, no need to take a lock at this point since 2428 * nothing else can happen now 2429 */ 2430 gem_put_cell(gp); 2431 2432 return 0; 2433} 2434 2435static int gem_resume(struct pci_dev *pdev) 2436{ 2437 struct net_device *dev = pci_get_drvdata(pdev); 2438 struct gem *gp = netdev_priv(dev); 2439 unsigned long flags; 2440 2441 printk(KERN_INFO "%s: resuming\n", dev->name); 2442 2443 mutex_lock(&gp->pm_mutex); 2444 2445 /* Keep the cell enabled during the entire operation, no need to 2446 * take a lock here tho since nothing else can happen while we are 2447 * marked asleep 2448 */ 2449 gem_get_cell(gp); 2450 2451 /* Make sure PCI access and bus master are enabled */ 2452 if (pci_enable_device(gp->pdev)) { 2453 printk(KERN_ERR "%s: Can't re-enable chip !\n", 2454 dev->name); 2455 /* Put cell and forget it for now, it will be considered as 2456 * still asleep, a new sleep cycle may bring it back 2457 */ 2458 gem_put_cell(gp); 2459 mutex_unlock(&gp->pm_mutex); 2460 return 0; 2461 } 2462 pci_set_master(gp->pdev); 2463 2464 /* Reset everything */ 2465 gem_reset(gp); 2466 2467 /* Mark us woken up */ 2468 gp->asleep = 0; 2469 wmb(); 2470 2471 /* Bring the PHY back. Again, lock is useless at this point as 2472 * nothing can be happening until we restart the whole thing 2473 */ 2474 gem_init_phy(gp); 2475 2476 /* If we were opened, bring everything back */ 2477 if (gp->opened) { 2478 /* Restart MAC */ 2479 gem_do_start(dev); 2480 2481 /* Re-attach net device */ 2482 netif_device_attach(dev); 2483 } 2484 2485 spin_lock_irqsave(&gp->lock, flags); 2486 spin_lock(&gp->tx_lock); 2487 2488 /* If we had WOL enabled, the cell clock was never turned off during 2489 * sleep, so we end up beeing unbalanced. Fix that here 2490 */ 2491 if (gp->asleep_wol) 2492 gem_put_cell(gp); 2493 2494 /* This function doesn't need to hold the cell, it will be held if the 2495 * driver is open by gem_do_start(). 2496 */ 2497 gem_put_cell(gp); 2498 2499 spin_unlock(&gp->tx_lock); 2500 spin_unlock_irqrestore(&gp->lock, flags); 2501 2502 mutex_unlock(&gp->pm_mutex); 2503 2504 return 0; 2505} 2506#endif /* CONFIG_PM */ 2507 2508static struct net_device_stats *gem_get_stats(struct net_device *dev) 2509{ 2510 struct gem *gp = netdev_priv(dev); 2511 struct net_device_stats *stats = &gp->net_stats; 2512 2513 spin_lock_irq(&gp->lock); 2514 spin_lock(&gp->tx_lock); 2515 2516 /* I have seen this being called while the PM was in progress, 2517 * so we shield against this 2518 */ 2519 if (gp->running) { 2520 stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); 2521 writel(0, gp->regs + MAC_FCSERR); 2522 2523 stats->rx_frame_errors += readl(gp->regs + MAC_AERR); 2524 writel(0, gp->regs + MAC_AERR); 2525 2526 stats->rx_length_errors += readl(gp->regs + MAC_LERR); 2527 writel(0, gp->regs + MAC_LERR); 2528 2529 stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL); 2530 stats->collisions += 2531 (readl(gp->regs + MAC_ECOLL) + 2532 readl(gp->regs + MAC_LCOLL)); 2533 writel(0, gp->regs + MAC_ECOLL); 2534 writel(0, gp->regs + MAC_LCOLL); 2535 } 2536 2537 spin_unlock(&gp->tx_lock); 2538 spin_unlock_irq(&gp->lock); 2539 2540 return &gp->net_stats; 2541} 2542 2543static int gem_set_mac_address(struct net_device *dev, void *addr) 2544{ 2545 struct sockaddr *macaddr = (struct sockaddr *) addr; 2546 struct gem *gp = netdev_priv(dev); 2547 unsigned char *e = &dev->dev_addr[0]; 2548 2549 if (!is_valid_ether_addr(macaddr->sa_data)) 2550 return -EADDRNOTAVAIL; 2551 2552 if (!netif_running(dev) || !netif_device_present(dev)) { 2553 /* We'll just catch it later when the 2554 * device is up'd or resumed. 2555 */ 2556 memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); 2557 return 0; 2558 } 2559 2560 mutex_lock(&gp->pm_mutex); 2561 memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); 2562 if (gp->running) { 2563 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); 2564 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); 2565 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); 2566 } 2567 mutex_unlock(&gp->pm_mutex); 2568 2569 return 0; 2570} 2571 2572static void gem_set_multicast(struct net_device *dev) 2573{ 2574 struct gem *gp = netdev_priv(dev); 2575 u32 rxcfg, rxcfg_new; 2576 int limit = 10000; 2577 2578 2579 spin_lock_irq(&gp->lock); 2580 spin_lock(&gp->tx_lock); 2581 2582 if (!gp->running) 2583 goto bail; 2584 2585 netif_stop_queue(dev); 2586 2587 rxcfg = readl(gp->regs + MAC_RXCFG); 2588 rxcfg_new = gem_setup_multicast(gp); 2589#ifdef STRIP_FCS 2590 rxcfg_new |= MAC_RXCFG_SFCS; 2591#endif 2592 gp->mac_rx_cfg = rxcfg_new; 2593 2594 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 2595 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { 2596 if (!limit--) 2597 break; 2598 udelay(10); 2599 } 2600 2601 rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE); 2602 rxcfg |= rxcfg_new; 2603 2604 writel(rxcfg, gp->regs + MAC_RXCFG); 2605 2606 netif_wake_queue(dev); 2607 2608 bail: 2609 spin_unlock(&gp->tx_lock); 2610 spin_unlock_irq(&gp->lock); 2611} 2612 2613/* Jumbo-grams don't seem to work :-( */ 2614#define GEM_MIN_MTU 68 2615#if 1 2616#define GEM_MAX_MTU 1500 2617#else 2618#define GEM_MAX_MTU 9000 2619#endif 2620 2621static int gem_change_mtu(struct net_device *dev, int new_mtu) 2622{ 2623 struct gem *gp = netdev_priv(dev); 2624 2625 if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) 2626 return -EINVAL; 2627 2628 if (!netif_running(dev) || !netif_device_present(dev)) { 2629 /* We'll just catch it later when the 2630 * device is up'd or resumed. 2631 */ 2632 dev->mtu = new_mtu; 2633 return 0; 2634 } 2635 2636 mutex_lock(&gp->pm_mutex); 2637 spin_lock_irq(&gp->lock); 2638 spin_lock(&gp->tx_lock); 2639 dev->mtu = new_mtu; 2640 if (gp->running) { 2641 gem_reinit_chip(gp); 2642 if (gp->lstate == link_up) 2643 gem_set_link_modes(gp); 2644 } 2645 spin_unlock(&gp->tx_lock); 2646 spin_unlock_irq(&gp->lock); 2647 mutex_unlock(&gp->pm_mutex); 2648 2649 return 0; 2650} 2651 2652static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2653{ 2654 struct gem *gp = netdev_priv(dev); 2655 2656 strcpy(info->driver, DRV_NAME); 2657 strcpy(info->version, DRV_VERSION); 2658 strcpy(info->bus_info, pci_name(gp->pdev)); 2659} 2660 2661static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2662{ 2663 struct gem *gp = netdev_priv(dev); 2664 2665 if (gp->phy_type == phy_mii_mdio0 || 2666 gp->phy_type == phy_mii_mdio1) { 2667 if (gp->phy_mii.def) 2668 cmd->supported = gp->phy_mii.def->features; 2669 else 2670 cmd->supported = (SUPPORTED_10baseT_Half | 2671 SUPPORTED_10baseT_Full); 2672 2673 /* XXX hardcoded stuff for now */ 2674 cmd->port = PORT_MII; 2675 cmd->transceiver = XCVR_EXTERNAL; 2676 cmd->phy_address = 0; /* XXX fixed PHYAD */ 2677 2678 /* Return current PHY settings */ 2679 spin_lock_irq(&gp->lock); 2680 cmd->autoneg = gp->want_autoneg; 2681 cmd->speed = gp->phy_mii.speed; 2682 cmd->duplex = gp->phy_mii.duplex; 2683 cmd->advertising = gp->phy_mii.advertising; 2684 2685 /* If we started with a forced mode, we don't have a default 2686 * advertise set, we need to return something sensible so 2687 * userland can re-enable autoneg properly. 2688 */ 2689 if (cmd->advertising == 0) 2690 cmd->advertising = cmd->supported; 2691 spin_unlock_irq(&gp->lock); 2692 } else { // XXX PCS ? 2693 cmd->supported = 2694 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2695 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 2696 SUPPORTED_Autoneg); 2697 cmd->advertising = cmd->supported; 2698 cmd->speed = 0; 2699 cmd->duplex = cmd->port = cmd->phy_address = 2700 cmd->transceiver = cmd->autoneg = 0; 2701 2702 /* serdes means usually a Fibre connector, with most fixed */ 2703 if (gp->phy_type == phy_serdes) { 2704 cmd->port = PORT_FIBRE; 2705 cmd->supported = (SUPPORTED_1000baseT_Half | 2706 SUPPORTED_1000baseT_Full | 2707 SUPPORTED_FIBRE | SUPPORTED_Autoneg | 2708 SUPPORTED_Pause | SUPPORTED_Asym_Pause); 2709 cmd->advertising = cmd->supported; 2710 cmd->transceiver = XCVR_INTERNAL; 2711 if (gp->lstate == link_up) 2712 cmd->speed = SPEED_1000; 2713 cmd->duplex = DUPLEX_FULL; 2714 cmd->autoneg = 1; 2715 } 2716 } 2717 cmd->maxtxpkt = cmd->maxrxpkt = 0; 2718 2719 return 0; 2720} 2721 2722static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2723{ 2724 struct gem *gp = netdev_priv(dev); 2725 2726 /* Verify the settings we care about. */ 2727 if (cmd->autoneg != AUTONEG_ENABLE && 2728 cmd->autoneg != AUTONEG_DISABLE) 2729 return -EINVAL; 2730 2731 if (cmd->autoneg == AUTONEG_ENABLE && 2732 cmd->advertising == 0) 2733 return -EINVAL; 2734 2735 if (cmd->autoneg == AUTONEG_DISABLE && 2736 ((cmd->speed != SPEED_1000 && 2737 cmd->speed != SPEED_100 && 2738 cmd->speed != SPEED_10) || 2739 (cmd->duplex != DUPLEX_HALF && 2740 cmd->duplex != DUPLEX_FULL))) 2741 return -EINVAL; 2742 2743 /* Apply settings and restart link process. */ 2744 spin_lock_irq(&gp->lock); 2745 gem_get_cell(gp); 2746 gem_begin_auto_negotiation(gp, cmd); 2747 gem_put_cell(gp); 2748 spin_unlock_irq(&gp->lock); 2749 2750 return 0; 2751} 2752 2753static int gem_nway_reset(struct net_device *dev) 2754{ 2755 struct gem *gp = netdev_priv(dev); 2756 2757 if (!gp->want_autoneg) 2758 return -EINVAL; 2759 2760 /* Restart link process. */ 2761 spin_lock_irq(&gp->lock); 2762 gem_get_cell(gp); 2763 gem_begin_auto_negotiation(gp, NULL); 2764 gem_put_cell(gp); 2765 spin_unlock_irq(&gp->lock); 2766 2767 return 0; 2768} 2769 2770static u32 gem_get_msglevel(struct net_device *dev) 2771{ 2772 struct gem *gp = netdev_priv(dev); 2773 return gp->msg_enable; 2774} 2775 2776static void gem_set_msglevel(struct net_device *dev, u32 value) 2777{ 2778 struct gem *gp = netdev_priv(dev); 2779 gp->msg_enable = value; 2780} 2781 2782 2783/* Add more when I understand how to program the chip */ 2784/* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */ 2785 2786#define WOL_SUPPORTED_MASK (WAKE_MAGIC) 2787 2788static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2789{ 2790 struct gem *gp = netdev_priv(dev); 2791 2792 /* Add more when I understand how to program the chip */ 2793 if (gp->has_wol) { 2794 wol->supported = WOL_SUPPORTED_MASK; 2795 wol->wolopts = gp->wake_on_lan; 2796 } else { 2797 wol->supported = 0; 2798 wol->wolopts = 0; 2799 } 2800} 2801 2802static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2803{ 2804 struct gem *gp = netdev_priv(dev); 2805 2806 if (!gp->has_wol) 2807 return -EOPNOTSUPP; 2808 gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK; 2809 return 0; 2810} 2811 2812static const struct ethtool_ops gem_ethtool_ops = { 2813 .get_drvinfo = gem_get_drvinfo, 2814 .get_link = ethtool_op_get_link, 2815 .get_settings = gem_get_settings, 2816 .set_settings = gem_set_settings, 2817 .nway_reset = gem_nway_reset, 2818 .get_msglevel = gem_get_msglevel, 2819 .set_msglevel = gem_set_msglevel, 2820 .get_wol = gem_get_wol, 2821 .set_wol = gem_set_wol, 2822}; 2823 2824static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2825{ 2826 struct gem *gp = netdev_priv(dev); 2827 struct mii_ioctl_data *data = if_mii(ifr); 2828 int rc = -EOPNOTSUPP; 2829 unsigned long flags; 2830 2831 /* Hold the PM mutex while doing ioctl's or we may collide 2832 * with power management. 2833 */ 2834 mutex_lock(&gp->pm_mutex); 2835 2836 spin_lock_irqsave(&gp->lock, flags); 2837 gem_get_cell(gp); 2838 spin_unlock_irqrestore(&gp->lock, flags); 2839 2840 switch (cmd) { 2841 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 2842 data->phy_id = gp->mii_phy_addr; 2843 /* Fallthrough... */ 2844 2845 case SIOCGMIIREG: /* Read MII PHY register. */ 2846 if (!gp->running) 2847 rc = -EAGAIN; 2848 else { 2849 data->val_out = __phy_read(gp, data->phy_id & 0x1f, 2850 data->reg_num & 0x1f); 2851 rc = 0; 2852 } 2853 break; 2854 2855 case SIOCSMIIREG: /* Write MII PHY register. */ 2856 if (!gp->running) 2857 rc = -EAGAIN; 2858 else { 2859 __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, 2860 data->val_in); 2861 rc = 0; 2862 } 2863 break; 2864 }; 2865 2866 spin_lock_irqsave(&gp->lock, flags); 2867 gem_put_cell(gp); 2868 spin_unlock_irqrestore(&gp->lock, flags); 2869 2870 mutex_unlock(&gp->pm_mutex); 2871 2872 return rc; 2873} 2874 2875#if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC)) 2876/* Fetch MAC address from vital product data of PCI ROM. */ 2877static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) 2878{ 2879 int this_offset; 2880 2881 for (this_offset = 0x20; this_offset < len; this_offset++) { 2882 void __iomem *p = rom_base + this_offset; 2883 int i; 2884 2885 if (readb(p + 0) != 0x90 || 2886 readb(p + 1) != 0x00 || 2887 readb(p + 2) != 0x09 || 2888 readb(p + 3) != 0x4e || 2889 readb(p + 4) != 0x41 || 2890 readb(p + 5) != 0x06) 2891 continue; 2892 2893 this_offset += 6; 2894 p += 6; 2895 2896 for (i = 0; i < 6; i++) 2897 dev_addr[i] = readb(p + i); 2898 return 1; 2899 } 2900 return 0; 2901} 2902 2903static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) 2904{ 2905 size_t size; 2906 void __iomem *p = pci_map_rom(pdev, &size); 2907 2908 if (p) { 2909 int found; 2910 2911 found = readb(p) == 0x55 && 2912 readb(p + 1) == 0xaa && 2913 find_eth_addr_in_vpd(p, (64 * 1024), dev_addr); 2914 pci_unmap_rom(pdev, p); 2915 if (found) 2916 return; 2917 } 2918 2919 /* Sun MAC prefix then 3 random bytes. */ 2920 dev_addr[0] = 0x08; 2921 dev_addr[1] = 0x00; 2922 dev_addr[2] = 0x20; 2923 get_random_bytes(dev_addr + 3, 3); 2924 return; 2925} 2926#endif /* not Sparc and not PPC */ 2927 2928static int __devinit gem_get_device_address(struct gem *gp) 2929{ 2930#if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC) 2931 struct net_device *dev = gp->dev; 2932 const unsigned char *addr; 2933 2934 addr = of_get_property(gp->of_node, "local-mac-address", NULL); 2935 if (addr == NULL) { 2936#ifdef CONFIG_SPARC 2937 addr = idprom->id_ethaddr; 2938#else 2939 printk("\n"); 2940 printk(KERN_ERR "%s: can't get mac-address\n", dev->name); 2941 return -1; 2942#endif 2943 } 2944 memcpy(dev->dev_addr, addr, 6); 2945#else 2946 get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); 2947#endif 2948 return 0; 2949} 2950 2951static void gem_remove_one(struct pci_dev *pdev) 2952{ 2953 struct net_device *dev = pci_get_drvdata(pdev); 2954 2955 if (dev) { 2956 struct gem *gp = netdev_priv(dev); 2957 2958 unregister_netdev(dev); 2959 2960 /* Stop the link timer */ 2961 del_timer_sync(&gp->link_timer); 2962 2963 /* We shouldn't need any locking here */ 2964 gem_get_cell(gp); 2965 2966 /* Wait for a pending reset task to complete */ 2967 while (gp->reset_task_pending) 2968 yield(); 2969 flush_scheduled_work(); 2970 2971 /* Shut the PHY down */ 2972 gem_stop_phy(gp, 0); 2973 2974 gem_put_cell(gp); 2975 2976 /* Make sure bus master is disabled */ 2977 pci_disable_device(gp->pdev); 2978 2979 /* Free resources */ 2980 pci_free_consistent(pdev, 2981 sizeof(struct gem_init_block), 2982 gp->init_block, 2983 gp->gblock_dvma); 2984 iounmap(gp->regs); 2985 pci_release_regions(pdev); 2986 free_netdev(dev); 2987 2988 pci_set_drvdata(pdev, NULL); 2989 } 2990} 2991 2992static const struct net_device_ops gem_netdev_ops = { 2993 .ndo_open = gem_open, 2994 .ndo_stop = gem_close, 2995 .ndo_start_xmit = gem_start_xmit, 2996 .ndo_get_stats = gem_get_stats, 2997 .ndo_set_multicast_list = gem_set_multicast, 2998 .ndo_do_ioctl = gem_ioctl, 2999 .ndo_tx_timeout = gem_tx_timeout, 3000 .ndo_change_mtu = gem_change_mtu, 3001 .ndo_validate_addr = eth_validate_addr, 3002 .ndo_set_mac_address = gem_set_mac_address, 3003#ifdef CONFIG_NET_POLL_CONTROLLER 3004 .ndo_poll_controller = gem_poll_controller, 3005#endif 3006}; 3007 3008static int __devinit gem_init_one(struct pci_dev *pdev, 3009 const struct pci_device_id *ent) 3010{ 3011 static int gem_version_printed = 0; 3012 unsigned long gemreg_base, gemreg_len; 3013 struct net_device *dev; 3014 struct gem *gp; 3015 int err, pci_using_dac; 3016 3017 if (gem_version_printed++ == 0) 3018 printk(KERN_INFO "%s", version); 3019 3020 /* Apple gmac note: during probe, the chip is powered up by 3021 * the arch code to allow the code below to work (and to let 3022 * the chip be probed on the config space. It won't stay powered 3023 * up until the interface is brought up however, so we can't rely 3024 * on register configuration done at this point. 3025 */ 3026 err = pci_enable_device(pdev); 3027 if (err) { 3028 printk(KERN_ERR PFX "Cannot enable MMIO operation, " 3029 "aborting.\n"); 3030 return err; 3031 } 3032 pci_set_master(pdev); 3033 3034 /* Configure DMA attributes. */ 3035 3036 /* All of the GEM documentation states that 64-bit DMA addressing 3037 * is fully supported and should work just fine. However the 3038 * front end for RIO based GEMs is different and only supports 3039 * 32-bit addressing. 3040 * 3041 * For now we assume the various PPC GEMs are 32-bit only as well. 3042 */ 3043 if (pdev->vendor == PCI_VENDOR_ID_SUN && 3044 pdev->device == PCI_DEVICE_ID_SUN_GEM && 3045 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3046 pci_using_dac = 1; 3047 } else { 3048 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3049 if (err) { 3050 printk(KERN_ERR PFX "No usable DMA configuration, " 3051 "aborting.\n"); 3052 goto err_disable_device; 3053 } 3054 pci_using_dac = 0; 3055 } 3056 3057 gemreg_base = pci_resource_start(pdev, 0); 3058 gemreg_len = pci_resource_len(pdev, 0); 3059 3060 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { 3061 printk(KERN_ERR PFX "Cannot find proper PCI device " 3062 "base address, aborting.\n"); 3063 err = -ENODEV; 3064 goto err_disable_device; 3065 } 3066 3067 dev = alloc_etherdev(sizeof(*gp)); 3068 if (!dev) { 3069 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 3070 err = -ENOMEM; 3071 goto err_disable_device; 3072 } 3073 SET_NETDEV_DEV(dev, &pdev->dev); 3074 3075 gp = netdev_priv(dev); 3076 3077 err = pci_request_regions(pdev, DRV_NAME); 3078 if (err) { 3079 printk(KERN_ERR PFX "Cannot obtain PCI resources, " 3080 "aborting.\n"); 3081 goto err_out_free_netdev; 3082 } 3083 3084 gp->pdev = pdev; 3085 dev->base_addr = (long) pdev; 3086 gp->dev = dev; 3087 3088 gp->msg_enable = DEFAULT_MSG; 3089 3090 spin_lock_init(&gp->lock); 3091 spin_lock_init(&gp->tx_lock); 3092 mutex_init(&gp->pm_mutex); 3093 3094 init_timer(&gp->link_timer); 3095 gp->link_timer.function = gem_link_timer; 3096 gp->link_timer.data = (unsigned long) gp; 3097 3098 INIT_WORK(&gp->reset_task, gem_reset_task); 3099 3100 gp->lstate = link_down; 3101 gp->timer_ticks = 0; 3102 netif_carrier_off(dev); 3103 3104 gp->regs = ioremap(gemreg_base, gemreg_len); 3105 if (!gp->regs) { 3106 printk(KERN_ERR PFX "Cannot map device registers, " 3107 "aborting.\n"); 3108 err = -EIO; 3109 goto err_out_free_res; 3110 } 3111 3112 /* On Apple, we want a reference to the Open Firmware device-tree 3113 * node. We use it for clock control. 3114 */ 3115#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) 3116 gp->of_node = pci_device_to_OF_node(pdev); 3117#endif 3118 3119 /* Only Apple version supports WOL afaik */ 3120 if (pdev->vendor == PCI_VENDOR_ID_APPLE) 3121 gp->has_wol = 1; 3122 3123 /* Make sure cell is enabled */ 3124 gem_get_cell(gp); 3125 3126 /* Make sure everything is stopped and in init state */ 3127 gem_reset(gp); 3128 3129 /* Fill up the mii_phy structure (even if we won't use it) */ 3130 gp->phy_mii.dev = dev; 3131 gp->phy_mii.mdio_read = _phy_read; 3132 gp->phy_mii.mdio_write = _phy_write; 3133#ifdef CONFIG_PPC_PMAC 3134 gp->phy_mii.platform_data = gp->of_node; 3135#endif 3136 /* By default, we start with autoneg */ 3137 gp->want_autoneg = 1; 3138 3139 /* Check fifo sizes, PHY type, etc... */ 3140 if (gem_check_invariants(gp)) { 3141 err = -ENODEV; 3142 goto err_out_iounmap; 3143 } 3144 3145 /* It is guaranteed that the returned buffer will be at least 3146 * PAGE_SIZE aligned. 3147 */ 3148 gp->init_block = (struct gem_init_block *) 3149 pci_alloc_consistent(pdev, sizeof(struct gem_init_block), 3150 &gp->gblock_dvma); 3151 if (!gp->init_block) { 3152 printk(KERN_ERR PFX "Cannot allocate init block, " 3153 "aborting.\n"); 3154 err = -ENOMEM; 3155 goto err_out_iounmap; 3156 } 3157 3158 if (gem_get_device_address(gp)) 3159 goto err_out_free_consistent; 3160 3161 dev->netdev_ops = &gem_netdev_ops; 3162 netif_napi_add(dev, &gp->napi, gem_poll, 64); 3163 dev->ethtool_ops = &gem_ethtool_ops; 3164 dev->watchdog_timeo = 5 * HZ; 3165 dev->irq = pdev->irq; 3166 dev->dma = 0; 3167 3168 /* Set that now, in case PM kicks in now */ 3169 pci_set_drvdata(pdev, dev); 3170 3171 /* Detect & init PHY, start autoneg, we release the cell now 3172 * too, it will be managed by whoever needs it 3173 */ 3174 gem_init_phy(gp); 3175 3176 spin_lock_irq(&gp->lock); 3177 gem_put_cell(gp); 3178 spin_unlock_irq(&gp->lock); 3179 3180 /* Register with kernel */ 3181 if (register_netdev(dev)) { 3182 printk(KERN_ERR PFX "Cannot register net device, " 3183 "aborting.\n"); 3184 err = -ENOMEM; 3185 goto err_out_free_consistent; 3186 } 3187 3188 printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", 3189 dev->name, dev->dev_addr); 3190 3191 if (gp->phy_type == phy_mii_mdio0 || 3192 gp->phy_type == phy_mii_mdio1) 3193 printk(KERN_INFO "%s: Found %s PHY\n", dev->name, 3194 gp->phy_mii.def ? gp->phy_mii.def->name : "no"); 3195 3196 /* GEM can do it all... */ 3197 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; 3198 if (pci_using_dac) 3199 dev->features |= NETIF_F_HIGHDMA; 3200 3201 return 0; 3202 3203err_out_free_consistent: 3204 gem_remove_one(pdev); 3205err_out_iounmap: 3206 gem_put_cell(gp); 3207 iounmap(gp->regs); 3208 3209err_out_free_res: 3210 pci_release_regions(pdev); 3211 3212err_out_free_netdev: 3213 free_netdev(dev); 3214err_disable_device: 3215 pci_disable_device(pdev); 3216 return err; 3217 3218} 3219 3220 3221static struct pci_driver gem_driver = { 3222 .name = GEM_MODULE_NAME, 3223 .id_table = gem_pci_tbl, 3224 .probe = gem_init_one, 3225 .remove = gem_remove_one, 3226#ifdef CONFIG_PM 3227 .suspend = gem_suspend, 3228 .resume = gem_resume, 3229#endif /* CONFIG_PM */ 3230}; 3231 3232static int __init gem_init(void) 3233{ 3234 return pci_register_driver(&gem_driver); 3235} 3236 3237static void __exit gem_cleanup(void) 3238{ 3239 pci_unregister_driver(&gem_driver); 3240} 3241 3242module_init(gem_init); 3243module_exit(gem_cleanup);