Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.22-rc2 3216 lines 81 kB view raw
1/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ 2 * sungem.c: Sun GEM ethernet driver. 3 * 4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com) 5 * 6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management 7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org) 8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. 9 * 10 * NAPI and NETPOLL support 11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) 12 * 13 * TODO: 14 * - Now that the driver was significantly simplified, I need to rework 15 * the locking. I'm sure we don't need _2_ spinlocks, and we probably 16 * can avoid taking most of them for so long period of time (and schedule 17 * instead). The main issues at this point are caused by the netdev layer 18 * though: 19 * 20 * gem_change_mtu() and gem_set_multicast() are called with a read_lock() 21 * help by net/core/dev.c, thus they can't schedule. That means they can't 22 * call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock 23 * where it could have been dropped. change_mtu especially would love also to 24 * be able to msleep instead of horrid locked delays when resetting the HW, 25 * but that read_lock() makes it impossible, unless I defer it's action to 26 * the reset task, which means it'll be asynchronous (won't take effect until 27 * the system schedules a bit). 28 * 29 * Also, it would probably be possible to also remove most of the long-life 30 * locking in open/resume code path (gem_reinit_chip) by beeing more careful 31 * about when we can start taking interrupts or get xmit() called... 32 */ 33 34#include <linux/module.h> 35#include <linux/kernel.h> 36#include <linux/types.h> 37#include <linux/fcntl.h> 38#include <linux/interrupt.h> 39#include <linux/ioport.h> 40#include <linux/in.h> 41#include <linux/slab.h> 42#include <linux/string.h> 43#include <linux/delay.h> 44#include <linux/init.h> 45#include <linux/errno.h> 46#include <linux/pci.h> 47#include <linux/dma-mapping.h> 48#include <linux/netdevice.h> 49#include <linux/etherdevice.h> 50#include <linux/skbuff.h> 51#include <linux/mii.h> 52#include <linux/ethtool.h> 53#include <linux/crc32.h> 54#include <linux/random.h> 55#include <linux/workqueue.h> 56#include <linux/if_vlan.h> 57#include <linux/bitops.h> 58#include <linux/mutex.h> 59#include <linux/mm.h> 60 61#include <asm/system.h> 62#include <asm/io.h> 63#include <asm/byteorder.h> 64#include <asm/uaccess.h> 65#include <asm/irq.h> 66 67#ifdef CONFIG_SPARC 68#include <asm/idprom.h> 69#include <asm/prom.h> 70#endif 71 72#ifdef CONFIG_PPC_PMAC 73#include <asm/pci-bridge.h> 74#include <asm/prom.h> 75#include <asm/machdep.h> 76#include <asm/pmac_feature.h> 77#endif 78 79#include "sungem_phy.h" 80#include "sungem.h" 81 82/* Stripping FCS is causing problems, disabled for now */ 83#undef STRIP_FCS 84 85#define DEFAULT_MSG (NETIF_MSG_DRV | \ 86 NETIF_MSG_PROBE | \ 87 NETIF_MSG_LINK) 88 89#define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ 90 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ 91 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \ 92 SUPPORTED_Pause | SUPPORTED_Autoneg) 93 94#define DRV_NAME "sungem" 95#define DRV_VERSION "0.98" 96#define DRV_RELDATE "8/24/03" 97#define DRV_AUTHOR "David S. Miller (davem@redhat.com)" 98 99static char version[] __devinitdata = 100 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; 101 102MODULE_AUTHOR(DRV_AUTHOR); 103MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); 104MODULE_LICENSE("GPL"); 105 106#define GEM_MODULE_NAME "gem" 107#define PFX GEM_MODULE_NAME ": " 108 109static struct pci_device_id gem_pci_tbl[] = { 110 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, 111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 112 113 /* These models only differ from the original GEM in 114 * that their tx/rx fifos are of a different size and 115 * they only support 10/100 speeds. -DaveM 116 * 117 * Apple's GMAC does support gigabit on machines with 118 * the BCM54xx PHYs. -BenH 119 */ 120 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM, 121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 122 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC, 123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 124 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP, 125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 126 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2, 127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 128 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC, 129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 130 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, 131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 132 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC, 133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 134 {0, } 135}; 136 137MODULE_DEVICE_TABLE(pci, gem_pci_tbl); 138 139static u16 __phy_read(struct gem *gp, int phy_addr, int reg) 140{ 141 u32 cmd; 142 int limit = 10000; 143 144 cmd = (1 << 30); 145 cmd |= (2 << 28); 146 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; 147 cmd |= (reg << 18) & MIF_FRAME_REGAD; 148 cmd |= (MIF_FRAME_TAMSB); 149 writel(cmd, gp->regs + MIF_FRAME); 150 151 while (limit--) { 152 cmd = readl(gp->regs + MIF_FRAME); 153 if (cmd & MIF_FRAME_TALSB) 154 break; 155 156 udelay(10); 157 } 158 159 if (!limit) 160 cmd = 0xffff; 161 162 return cmd & MIF_FRAME_DATA; 163} 164 165static inline int _phy_read(struct net_device *dev, int mii_id, int reg) 166{ 167 struct gem *gp = dev->priv; 168 return __phy_read(gp, mii_id, reg); 169} 170 171static inline u16 phy_read(struct gem *gp, int reg) 172{ 173 return __phy_read(gp, gp->mii_phy_addr, reg); 174} 175 176static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) 177{ 178 u32 cmd; 179 int limit = 10000; 180 181 cmd = (1 << 30); 182 cmd |= (1 << 28); 183 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; 184 cmd |= (reg << 18) & MIF_FRAME_REGAD; 185 cmd |= (MIF_FRAME_TAMSB); 186 cmd |= (val & MIF_FRAME_DATA); 187 writel(cmd, gp->regs + MIF_FRAME); 188 189 while (limit--) { 190 cmd = readl(gp->regs + MIF_FRAME); 191 if (cmd & MIF_FRAME_TALSB) 192 break; 193 194 udelay(10); 195 } 196} 197 198static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val) 199{ 200 struct gem *gp = dev->priv; 201 __phy_write(gp, mii_id, reg, val & 0xffff); 202} 203 204static inline void phy_write(struct gem *gp, int reg, u16 val) 205{ 206 __phy_write(gp, gp->mii_phy_addr, reg, val); 207} 208 209static inline void gem_enable_ints(struct gem *gp) 210{ 211 /* Enable all interrupts but TXDONE */ 212 writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK); 213} 214 215static inline void gem_disable_ints(struct gem *gp) 216{ 217 /* Disable all interrupts, including TXDONE */ 218 writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); 219} 220 221static void gem_get_cell(struct gem *gp) 222{ 223 BUG_ON(gp->cell_enabled < 0); 224 gp->cell_enabled++; 225#ifdef CONFIG_PPC_PMAC 226 if (gp->cell_enabled == 1) { 227 mb(); 228 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1); 229 udelay(10); 230 } 231#endif /* CONFIG_PPC_PMAC */ 232} 233 234/* Turn off the chip's clock */ 235static void gem_put_cell(struct gem *gp) 236{ 237 BUG_ON(gp->cell_enabled <= 0); 238 gp->cell_enabled--; 239#ifdef CONFIG_PPC_PMAC 240 if (gp->cell_enabled == 0) { 241 mb(); 242 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0); 243 udelay(10); 244 } 245#endif /* CONFIG_PPC_PMAC */ 246} 247 248static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) 249{ 250 if (netif_msg_intr(gp)) 251 printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name); 252} 253 254static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 255{ 256 u32 pcs_istat = readl(gp->regs + PCS_ISTAT); 257 u32 pcs_miistat; 258 259 if (netif_msg_intr(gp)) 260 printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n", 261 gp->dev->name, pcs_istat); 262 263 if (!(pcs_istat & PCS_ISTAT_LSC)) { 264 printk(KERN_ERR "%s: PCS irq but no link status change???\n", 265 dev->name); 266 return 0; 267 } 268 269 /* The link status bit latches on zero, so you must 270 * read it twice in such a case to see a transition 271 * to the link being up. 272 */ 273 pcs_miistat = readl(gp->regs + PCS_MIISTAT); 274 if (!(pcs_miistat & PCS_MIISTAT_LS)) 275 pcs_miistat |= 276 (readl(gp->regs + PCS_MIISTAT) & 277 PCS_MIISTAT_LS); 278 279 if (pcs_miistat & PCS_MIISTAT_ANC) { 280 /* The remote-fault indication is only valid 281 * when autoneg has completed. 282 */ 283 if (pcs_miistat & PCS_MIISTAT_RF) 284 printk(KERN_INFO "%s: PCS AutoNEG complete, " 285 "RemoteFault\n", dev->name); 286 else 287 printk(KERN_INFO "%s: PCS AutoNEG complete.\n", 288 dev->name); 289 } 290 291 if (pcs_miistat & PCS_MIISTAT_LS) { 292 printk(KERN_INFO "%s: PCS link is now up.\n", 293 dev->name); 294 netif_carrier_on(gp->dev); 295 } else { 296 printk(KERN_INFO "%s: PCS link is now down.\n", 297 dev->name); 298 netif_carrier_off(gp->dev); 299 /* If this happens and the link timer is not running, 300 * reset so we re-negotiate. 301 */ 302 if (!timer_pending(&gp->link_timer)) 303 return 1; 304 } 305 306 return 0; 307} 308 309static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 310{ 311 u32 txmac_stat = readl(gp->regs + MAC_TXSTAT); 312 313 if (netif_msg_intr(gp)) 314 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", 315 gp->dev->name, txmac_stat); 316 317 /* Defer timer expiration is quite normal, 318 * don't even log the event. 319 */ 320 if ((txmac_stat & MAC_TXSTAT_DTE) && 321 !(txmac_stat & ~MAC_TXSTAT_DTE)) 322 return 0; 323 324 if (txmac_stat & MAC_TXSTAT_URUN) { 325 printk(KERN_ERR "%s: TX MAC xmit underrun.\n", 326 dev->name); 327 gp->net_stats.tx_fifo_errors++; 328 } 329 330 if (txmac_stat & MAC_TXSTAT_MPE) { 331 printk(KERN_ERR "%s: TX MAC max packet size error.\n", 332 dev->name); 333 gp->net_stats.tx_errors++; 334 } 335 336 /* The rest are all cases of one of the 16-bit TX 337 * counters expiring. 338 */ 339 if (txmac_stat & MAC_TXSTAT_NCE) 340 gp->net_stats.collisions += 0x10000; 341 342 if (txmac_stat & MAC_TXSTAT_ECE) { 343 gp->net_stats.tx_aborted_errors += 0x10000; 344 gp->net_stats.collisions += 0x10000; 345 } 346 347 if (txmac_stat & MAC_TXSTAT_LCE) { 348 gp->net_stats.tx_aborted_errors += 0x10000; 349 gp->net_stats.collisions += 0x10000; 350 } 351 352 /* We do not keep track of MAC_TXSTAT_FCE and 353 * MAC_TXSTAT_PCE events. 354 */ 355 return 0; 356} 357 358/* When we get a RX fifo overflow, the RX unit in GEM is probably hung 359 * so we do the following. 360 * 361 * If any part of the reset goes wrong, we return 1 and that causes the 362 * whole chip to be reset. 363 */ 364static int gem_rxmac_reset(struct gem *gp) 365{ 366 struct net_device *dev = gp->dev; 367 int limit, i; 368 u64 desc_dma; 369 u32 val; 370 371 /* First, reset & disable MAC RX. */ 372 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); 373 for (limit = 0; limit < 5000; limit++) { 374 if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD)) 375 break; 376 udelay(10); 377 } 378 if (limit == 5000) { 379 printk(KERN_ERR "%s: RX MAC will not reset, resetting whole " 380 "chip.\n", dev->name); 381 return 1; 382 } 383 384 writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB, 385 gp->regs + MAC_RXCFG); 386 for (limit = 0; limit < 5000; limit++) { 387 if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB)) 388 break; 389 udelay(10); 390 } 391 if (limit == 5000) { 392 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " 393 "chip.\n", dev->name); 394 return 1; 395 } 396 397 /* Second, disable RX DMA. */ 398 writel(0, gp->regs + RXDMA_CFG); 399 for (limit = 0; limit < 5000; limit++) { 400 if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE)) 401 break; 402 udelay(10); 403 } 404 if (limit == 5000) { 405 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " 406 "chip.\n", dev->name); 407 return 1; 408 } 409 410 udelay(5000); 411 412 /* Execute RX reset command. */ 413 writel(gp->swrst_base | GREG_SWRST_RXRST, 414 gp->regs + GREG_SWRST); 415 for (limit = 0; limit < 5000; limit++) { 416 if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST)) 417 break; 418 udelay(10); 419 } 420 if (limit == 5000) { 421 printk(KERN_ERR "%s: RX reset command will not execute, resetting " 422 "whole chip.\n", dev->name); 423 return 1; 424 } 425 426 /* Refresh the RX ring. */ 427 for (i = 0; i < RX_RING_SIZE; i++) { 428 struct gem_rxd *rxd = &gp->init_block->rxd[i]; 429 430 if (gp->rx_skbs[i] == NULL) { 431 printk(KERN_ERR "%s: Parts of RX ring empty, resetting " 432 "whole chip.\n", dev->name); 433 return 1; 434 } 435 436 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 437 } 438 gp->rx_new = gp->rx_old = 0; 439 440 /* Now we must reprogram the rest of RX unit. */ 441 desc_dma = (u64) gp->gblock_dvma; 442 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); 443 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 444 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 445 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 446 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 447 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 448 writel(val, gp->regs + RXDMA_CFG); 449 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 450 writel(((5 & RXDMA_BLANK_IPKTS) | 451 ((8 << 12) & RXDMA_BLANK_ITIME)), 452 gp->regs + RXDMA_BLANK); 453 else 454 writel(((5 & RXDMA_BLANK_IPKTS) | 455 ((4 << 12) & RXDMA_BLANK_ITIME)), 456 gp->regs + RXDMA_BLANK); 457 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); 458 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); 459 writel(val, gp->regs + RXDMA_PTHRESH); 460 val = readl(gp->regs + RXDMA_CFG); 461 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 462 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); 463 val = readl(gp->regs + MAC_RXCFG); 464 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 465 466 return 0; 467} 468 469static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 470{ 471 u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT); 472 int ret = 0; 473 474 if (netif_msg_intr(gp)) 475 printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n", 476 gp->dev->name, rxmac_stat); 477 478 if (rxmac_stat & MAC_RXSTAT_OFLW) { 479 u32 smac = readl(gp->regs + MAC_SMACHINE); 480 481 printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n", 482 dev->name, smac); 483 gp->net_stats.rx_over_errors++; 484 gp->net_stats.rx_fifo_errors++; 485 486 ret = gem_rxmac_reset(gp); 487 } 488 489 if (rxmac_stat & MAC_RXSTAT_ACE) 490 gp->net_stats.rx_frame_errors += 0x10000; 491 492 if (rxmac_stat & MAC_RXSTAT_CCE) 493 gp->net_stats.rx_crc_errors += 0x10000; 494 495 if (rxmac_stat & MAC_RXSTAT_LCE) 496 gp->net_stats.rx_length_errors += 0x10000; 497 498 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE 499 * events. 500 */ 501 return ret; 502} 503 504static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 505{ 506 u32 mac_cstat = readl(gp->regs + MAC_CSTAT); 507 508 if (netif_msg_intr(gp)) 509 printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n", 510 gp->dev->name, mac_cstat); 511 512 /* This interrupt is just for pause frame and pause 513 * tracking. It is useful for diagnostics and debug 514 * but probably by default we will mask these events. 515 */ 516 if (mac_cstat & MAC_CSTAT_PS) 517 gp->pause_entered++; 518 519 if (mac_cstat & MAC_CSTAT_PRCV) 520 gp->pause_last_time_recvd = (mac_cstat >> 16); 521 522 return 0; 523} 524 525static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 526{ 527 u32 mif_status = readl(gp->regs + MIF_STATUS); 528 u32 reg_val, changed_bits; 529 530 reg_val = (mif_status & MIF_STATUS_DATA) >> 16; 531 changed_bits = (mif_status & MIF_STATUS_STAT); 532 533 gem_handle_mif_event(gp, reg_val, changed_bits); 534 535 return 0; 536} 537 538static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 539{ 540 u32 pci_estat = readl(gp->regs + GREG_PCIESTAT); 541 542 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 543 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 544 printk(KERN_ERR "%s: PCI error [%04x] ", 545 dev->name, pci_estat); 546 547 if (pci_estat & GREG_PCIESTAT_BADACK) 548 printk("<No ACK64# during ABS64 cycle> "); 549 if (pci_estat & GREG_PCIESTAT_DTRTO) 550 printk("<Delayed transaction timeout> "); 551 if (pci_estat & GREG_PCIESTAT_OTHER) 552 printk("<other>"); 553 printk("\n"); 554 } else { 555 pci_estat |= GREG_PCIESTAT_OTHER; 556 printk(KERN_ERR "%s: PCI error\n", dev->name); 557 } 558 559 if (pci_estat & GREG_PCIESTAT_OTHER) { 560 u16 pci_cfg_stat; 561 562 /* Interrogate PCI config space for the 563 * true cause. 564 */ 565 pci_read_config_word(gp->pdev, PCI_STATUS, 566 &pci_cfg_stat); 567 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", 568 dev->name, pci_cfg_stat); 569 if (pci_cfg_stat & PCI_STATUS_PARITY) 570 printk(KERN_ERR "%s: PCI parity error detected.\n", 571 dev->name); 572 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) 573 printk(KERN_ERR "%s: PCI target abort.\n", 574 dev->name); 575 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) 576 printk(KERN_ERR "%s: PCI master acks target abort.\n", 577 dev->name); 578 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) 579 printk(KERN_ERR "%s: PCI master abort.\n", 580 dev->name); 581 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) 582 printk(KERN_ERR "%s: PCI system error SERR#.\n", 583 dev->name); 584 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) 585 printk(KERN_ERR "%s: PCI parity error.\n", 586 dev->name); 587 588 /* Write the error bits back to clear them. */ 589 pci_cfg_stat &= (PCI_STATUS_PARITY | 590 PCI_STATUS_SIG_TARGET_ABORT | 591 PCI_STATUS_REC_TARGET_ABORT | 592 PCI_STATUS_REC_MASTER_ABORT | 593 PCI_STATUS_SIG_SYSTEM_ERROR | 594 PCI_STATUS_DETECTED_PARITY); 595 pci_write_config_word(gp->pdev, 596 PCI_STATUS, pci_cfg_stat); 597 } 598 599 /* For all PCI errors, we should reset the chip. */ 600 return 1; 601} 602 603/* All non-normal interrupt conditions get serviced here. 604 * Returns non-zero if we should just exit the interrupt 605 * handler right now (ie. if we reset the card which invalidates 606 * all of the other original irq status bits). 607 */ 608static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status) 609{ 610 if (gem_status & GREG_STAT_RXNOBUF) { 611 /* Frame arrived, no free RX buffers available. */ 612 if (netif_msg_rx_err(gp)) 613 printk(KERN_DEBUG "%s: no buffer for rx frame\n", 614 gp->dev->name); 615 gp->net_stats.rx_dropped++; 616 } 617 618 if (gem_status & GREG_STAT_RXTAGERR) { 619 /* corrupt RX tag framing */ 620 if (netif_msg_rx_err(gp)) 621 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 622 gp->dev->name); 623 gp->net_stats.rx_errors++; 624 625 goto do_reset; 626 } 627 628 if (gem_status & GREG_STAT_PCS) { 629 if (gem_pcs_interrupt(dev, gp, gem_status)) 630 goto do_reset; 631 } 632 633 if (gem_status & GREG_STAT_TXMAC) { 634 if (gem_txmac_interrupt(dev, gp, gem_status)) 635 goto do_reset; 636 } 637 638 if (gem_status & GREG_STAT_RXMAC) { 639 if (gem_rxmac_interrupt(dev, gp, gem_status)) 640 goto do_reset; 641 } 642 643 if (gem_status & GREG_STAT_MAC) { 644 if (gem_mac_interrupt(dev, gp, gem_status)) 645 goto do_reset; 646 } 647 648 if (gem_status & GREG_STAT_MIF) { 649 if (gem_mif_interrupt(dev, gp, gem_status)) 650 goto do_reset; 651 } 652 653 if (gem_status & GREG_STAT_PCIERR) { 654 if (gem_pci_interrupt(dev, gp, gem_status)) 655 goto do_reset; 656 } 657 658 return 0; 659 660do_reset: 661 gp->reset_task_pending = 1; 662 schedule_work(&gp->reset_task); 663 664 return 1; 665} 666 667static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) 668{ 669 int entry, limit; 670 671 if (netif_msg_intr(gp)) 672 printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n", 673 gp->dev->name, gem_status); 674 675 entry = gp->tx_old; 676 limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); 677 while (entry != limit) { 678 struct sk_buff *skb; 679 struct gem_txd *txd; 680 dma_addr_t dma_addr; 681 u32 dma_len; 682 int frag; 683 684 if (netif_msg_tx_done(gp)) 685 printk(KERN_DEBUG "%s: tx done, slot %d\n", 686 gp->dev->name, entry); 687 skb = gp->tx_skbs[entry]; 688 if (skb_shinfo(skb)->nr_frags) { 689 int last = entry + skb_shinfo(skb)->nr_frags; 690 int walk = entry; 691 int incomplete = 0; 692 693 last &= (TX_RING_SIZE - 1); 694 for (;;) { 695 walk = NEXT_TX(walk); 696 if (walk == limit) 697 incomplete = 1; 698 if (walk == last) 699 break; 700 } 701 if (incomplete) 702 break; 703 } 704 gp->tx_skbs[entry] = NULL; 705 gp->net_stats.tx_bytes += skb->len; 706 707 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 708 txd = &gp->init_block->txd[entry]; 709 710 dma_addr = le64_to_cpu(txd->buffer); 711 dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; 712 713 pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); 714 entry = NEXT_TX(entry); 715 } 716 717 gp->net_stats.tx_packets++; 718 dev_kfree_skb_irq(skb); 719 } 720 gp->tx_old = entry; 721 722 if (netif_queue_stopped(dev) && 723 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) 724 netif_wake_queue(dev); 725} 726 727static __inline__ void gem_post_rxds(struct gem *gp, int limit) 728{ 729 int cluster_start, curr, count, kick; 730 731 cluster_start = curr = (gp->rx_new & ~(4 - 1)); 732 count = 0; 733 kick = -1; 734 wmb(); 735 while (curr != limit) { 736 curr = NEXT_RX(curr); 737 if (++count == 4) { 738 struct gem_rxd *rxd = 739 &gp->init_block->rxd[cluster_start]; 740 for (;;) { 741 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 742 rxd++; 743 cluster_start = NEXT_RX(cluster_start); 744 if (cluster_start == curr) 745 break; 746 } 747 kick = curr; 748 count = 0; 749 } 750 } 751 if (kick >= 0) { 752 mb(); 753 writel(kick, gp->regs + RXDMA_KICK); 754 } 755} 756 757static int gem_rx(struct gem *gp, int work_to_do) 758{ 759 int entry, drops, work_done = 0; 760 u32 done; 761 762 if (netif_msg_rx_status(gp)) 763 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", 764 gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); 765 766 entry = gp->rx_new; 767 drops = 0; 768 done = readl(gp->regs + RXDMA_DONE); 769 for (;;) { 770 struct gem_rxd *rxd = &gp->init_block->rxd[entry]; 771 struct sk_buff *skb; 772 u64 status = cpu_to_le64(rxd->status_word); 773 dma_addr_t dma_addr; 774 int len; 775 776 if ((status & RXDCTRL_OWN) != 0) 777 break; 778 779 if (work_done >= RX_RING_SIZE || work_done >= work_to_do) 780 break; 781 782 /* When writing back RX descriptor, GEM writes status 783 * then buffer address, possibly in seperate transactions. 784 * If we don't wait for the chip to write both, we could 785 * post a new buffer to this descriptor then have GEM spam 786 * on the buffer address. We sync on the RX completion 787 * register to prevent this from happening. 788 */ 789 if (entry == done) { 790 done = readl(gp->regs + RXDMA_DONE); 791 if (entry == done) 792 break; 793 } 794 795 /* We can now account for the work we're about to do */ 796 work_done++; 797 798 skb = gp->rx_skbs[entry]; 799 800 len = (status & RXDCTRL_BUFSZ) >> 16; 801 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { 802 gp->net_stats.rx_errors++; 803 if (len < ETH_ZLEN) 804 gp->net_stats.rx_length_errors++; 805 if (len & RXDCTRL_BAD) 806 gp->net_stats.rx_crc_errors++; 807 808 /* We'll just return it to GEM. */ 809 drop_it: 810 gp->net_stats.rx_dropped++; 811 goto next; 812 } 813 814 dma_addr = cpu_to_le64(rxd->buffer); 815 if (len > RX_COPY_THRESHOLD) { 816 struct sk_buff *new_skb; 817 818 new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 819 if (new_skb == NULL) { 820 drops++; 821 goto drop_it; 822 } 823 pci_unmap_page(gp->pdev, dma_addr, 824 RX_BUF_ALLOC_SIZE(gp), 825 PCI_DMA_FROMDEVICE); 826 gp->rx_skbs[entry] = new_skb; 827 new_skb->dev = gp->dev; 828 skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); 829 rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, 830 virt_to_page(new_skb->data), 831 offset_in_page(new_skb->data), 832 RX_BUF_ALLOC_SIZE(gp), 833 PCI_DMA_FROMDEVICE)); 834 skb_reserve(new_skb, RX_OFFSET); 835 836 /* Trim the original skb for the netif. */ 837 skb_trim(skb, len); 838 } else { 839 struct sk_buff *copy_skb = dev_alloc_skb(len + 2); 840 841 if (copy_skb == NULL) { 842 drops++; 843 goto drop_it; 844 } 845 846 skb_reserve(copy_skb, 2); 847 skb_put(copy_skb, len); 848 pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 849 skb_copy_from_linear_data(skb, copy_skb->data, len); 850 pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 851 852 /* We'll reuse the original ring buffer. */ 853 skb = copy_skb; 854 } 855 856 skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff); 857 skb->ip_summed = CHECKSUM_COMPLETE; 858 skb->protocol = eth_type_trans(skb, gp->dev); 859 860 netif_receive_skb(skb); 861 862 gp->net_stats.rx_packets++; 863 gp->net_stats.rx_bytes += len; 864 gp->dev->last_rx = jiffies; 865 866 next: 867 entry = NEXT_RX(entry); 868 } 869 870 gem_post_rxds(gp, entry); 871 872 gp->rx_new = entry; 873 874 if (drops) 875 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", 876 gp->dev->name); 877 878 return work_done; 879} 880 881static int gem_poll(struct net_device *dev, int *budget) 882{ 883 struct gem *gp = dev->priv; 884 unsigned long flags; 885 886 /* 887 * NAPI locking nightmare: See comment at head of driver 888 */ 889 spin_lock_irqsave(&gp->lock, flags); 890 891 do { 892 int work_to_do, work_done; 893 894 /* Handle anomalies */ 895 if (gp->status & GREG_STAT_ABNORMAL) { 896 if (gem_abnormal_irq(dev, gp, gp->status)) 897 break; 898 } 899 900 /* Run TX completion thread */ 901 spin_lock(&gp->tx_lock); 902 gem_tx(dev, gp, gp->status); 903 spin_unlock(&gp->tx_lock); 904 905 spin_unlock_irqrestore(&gp->lock, flags); 906 907 /* Run RX thread. We don't use any locking here, 908 * code willing to do bad things - like cleaning the 909 * rx ring - must call netif_poll_disable(), which 910 * schedule_timeout()'s if polling is already disabled. 911 */ 912 work_to_do = min(*budget, dev->quota); 913 914 work_done = gem_rx(gp, work_to_do); 915 916 *budget -= work_done; 917 dev->quota -= work_done; 918 919 if (work_done >= work_to_do) 920 return 1; 921 922 spin_lock_irqsave(&gp->lock, flags); 923 924 gp->status = readl(gp->regs + GREG_STAT); 925 } while (gp->status & GREG_STAT_NAPI); 926 927 __netif_rx_complete(dev); 928 gem_enable_ints(gp); 929 930 spin_unlock_irqrestore(&gp->lock, flags); 931 return 0; 932} 933 934static irqreturn_t gem_interrupt(int irq, void *dev_id) 935{ 936 struct net_device *dev = dev_id; 937 struct gem *gp = dev->priv; 938 unsigned long flags; 939 940 /* Swallow interrupts when shutting the chip down, though 941 * that shouldn't happen, we should have done free_irq() at 942 * this point... 943 */ 944 if (!gp->running) 945 return IRQ_HANDLED; 946 947 spin_lock_irqsave(&gp->lock, flags); 948 949 if (netif_rx_schedule_prep(dev)) { 950 u32 gem_status = readl(gp->regs + GREG_STAT); 951 952 if (gem_status == 0) { 953 netif_poll_enable(dev); 954 spin_unlock_irqrestore(&gp->lock, flags); 955 return IRQ_NONE; 956 } 957 gp->status = gem_status; 958 gem_disable_ints(gp); 959 __netif_rx_schedule(dev); 960 } 961 962 spin_unlock_irqrestore(&gp->lock, flags); 963 964 /* If polling was disabled at the time we received that 965 * interrupt, we may return IRQ_HANDLED here while we 966 * should return IRQ_NONE. No big deal... 967 */ 968 return IRQ_HANDLED; 969} 970 971#ifdef CONFIG_NET_POLL_CONTROLLER 972static void gem_poll_controller(struct net_device *dev) 973{ 974 /* gem_interrupt is safe to reentrance so no need 975 * to disable_irq here. 976 */ 977 gem_interrupt(dev->irq, dev); 978} 979#endif 980 981static void gem_tx_timeout(struct net_device *dev) 982{ 983 struct gem *gp = dev->priv; 984 985 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 986 if (!gp->running) { 987 printk("%s: hrm.. hw not running !\n", dev->name); 988 return; 989 } 990 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n", 991 dev->name, 992 readl(gp->regs + TXDMA_CFG), 993 readl(gp->regs + MAC_TXSTAT), 994 readl(gp->regs + MAC_TXCFG)); 995 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", 996 dev->name, 997 readl(gp->regs + RXDMA_CFG), 998 readl(gp->regs + MAC_RXSTAT), 999 readl(gp->regs + MAC_RXCFG)); 1000 1001 spin_lock_irq(&gp->lock); 1002 spin_lock(&gp->tx_lock); 1003 1004 gp->reset_task_pending = 1; 1005 schedule_work(&gp->reset_task); 1006 1007 spin_unlock(&gp->tx_lock); 1008 spin_unlock_irq(&gp->lock); 1009} 1010 1011static __inline__ int gem_intme(int entry) 1012{ 1013 /* Algorithm: IRQ every 1/2 of descriptors. */ 1014 if (!(entry & ((TX_RING_SIZE>>1)-1))) 1015 return 1; 1016 1017 return 0; 1018} 1019 1020static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) 1021{ 1022 struct gem *gp = dev->priv; 1023 int entry; 1024 u64 ctrl; 1025 unsigned long flags; 1026 1027 ctrl = 0; 1028 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1029 const u64 csum_start_off = skb_transport_offset(skb); 1030 const u64 csum_stuff_off = csum_start_off + skb->csum_offset; 1031 1032 ctrl = (TXDCTRL_CENAB | 1033 (csum_start_off << 15) | 1034 (csum_stuff_off << 21)); 1035 } 1036 1037 local_irq_save(flags); 1038 if (!spin_trylock(&gp->tx_lock)) { 1039 /* Tell upper layer to requeue */ 1040 local_irq_restore(flags); 1041 return NETDEV_TX_LOCKED; 1042 } 1043 /* We raced with gem_do_stop() */ 1044 if (!gp->running) { 1045 spin_unlock_irqrestore(&gp->tx_lock, flags); 1046 return NETDEV_TX_BUSY; 1047 } 1048 1049 /* This is a hard error, log it. */ 1050 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { 1051 netif_stop_queue(dev); 1052 spin_unlock_irqrestore(&gp->tx_lock, flags); 1053 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", 1054 dev->name); 1055 return NETDEV_TX_BUSY; 1056 } 1057 1058 entry = gp->tx_new; 1059 gp->tx_skbs[entry] = skb; 1060 1061 if (skb_shinfo(skb)->nr_frags == 0) { 1062 struct gem_txd *txd = &gp->init_block->txd[entry]; 1063 dma_addr_t mapping; 1064 u32 len; 1065 1066 len = skb->len; 1067 mapping = pci_map_page(gp->pdev, 1068 virt_to_page(skb->data), 1069 offset_in_page(skb->data), 1070 len, PCI_DMA_TODEVICE); 1071 ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; 1072 if (gem_intme(entry)) 1073 ctrl |= TXDCTRL_INTME; 1074 txd->buffer = cpu_to_le64(mapping); 1075 wmb(); 1076 txd->control_word = cpu_to_le64(ctrl); 1077 entry = NEXT_TX(entry); 1078 } else { 1079 struct gem_txd *txd; 1080 u32 first_len; 1081 u64 intme; 1082 dma_addr_t first_mapping; 1083 int frag, first_entry = entry; 1084 1085 intme = 0; 1086 if (gem_intme(entry)) 1087 intme |= TXDCTRL_INTME; 1088 1089 /* We must give this initial chunk to the device last. 1090 * Otherwise we could race with the device. 1091 */ 1092 first_len = skb_headlen(skb); 1093 first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), 1094 offset_in_page(skb->data), 1095 first_len, PCI_DMA_TODEVICE); 1096 entry = NEXT_TX(entry); 1097 1098 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 1099 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 1100 u32 len; 1101 dma_addr_t mapping; 1102 u64 this_ctrl; 1103 1104 len = this_frag->size; 1105 mapping = pci_map_page(gp->pdev, 1106 this_frag->page, 1107 this_frag->page_offset, 1108 len, PCI_DMA_TODEVICE); 1109 this_ctrl = ctrl; 1110 if (frag == skb_shinfo(skb)->nr_frags - 1) 1111 this_ctrl |= TXDCTRL_EOF; 1112 1113 txd = &gp->init_block->txd[entry]; 1114 txd->buffer = cpu_to_le64(mapping); 1115 wmb(); 1116 txd->control_word = cpu_to_le64(this_ctrl | len); 1117 1118 if (gem_intme(entry)) 1119 intme |= TXDCTRL_INTME; 1120 1121 entry = NEXT_TX(entry); 1122 } 1123 txd = &gp->init_block->txd[first_entry]; 1124 txd->buffer = cpu_to_le64(first_mapping); 1125 wmb(); 1126 txd->control_word = 1127 cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); 1128 } 1129 1130 gp->tx_new = entry; 1131 if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1)) 1132 netif_stop_queue(dev); 1133 1134 if (netif_msg_tx_queued(gp)) 1135 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", 1136 dev->name, entry, skb->len); 1137 mb(); 1138 writel(gp->tx_new, gp->regs + TXDMA_KICK); 1139 spin_unlock_irqrestore(&gp->tx_lock, flags); 1140 1141 dev->trans_start = jiffies; 1142 1143 return NETDEV_TX_OK; 1144} 1145 1146#define STOP_TRIES 32 1147 1148/* Must be invoked under gp->lock and gp->tx_lock. */ 1149static void gem_reset(struct gem *gp) 1150{ 1151 int limit; 1152 u32 val; 1153 1154 /* Make sure we won't get any more interrupts */ 1155 writel(0xffffffff, gp->regs + GREG_IMASK); 1156 1157 /* Reset the chip */ 1158 writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST, 1159 gp->regs + GREG_SWRST); 1160 1161 limit = STOP_TRIES; 1162 1163 do { 1164 udelay(20); 1165 val = readl(gp->regs + GREG_SWRST); 1166 if (limit-- <= 0) 1167 break; 1168 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); 1169 1170 if (limit <= 0) 1171 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); 1172} 1173 1174/* Must be invoked under gp->lock and gp->tx_lock. */ 1175static void gem_start_dma(struct gem *gp) 1176{ 1177 u32 val; 1178 1179 /* We are ready to rock, turn everything on. */ 1180 val = readl(gp->regs + TXDMA_CFG); 1181 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); 1182 val = readl(gp->regs + RXDMA_CFG); 1183 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 1184 val = readl(gp->regs + MAC_TXCFG); 1185 writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); 1186 val = readl(gp->regs + MAC_RXCFG); 1187 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 1188 1189 (void) readl(gp->regs + MAC_RXCFG); 1190 udelay(100); 1191 1192 gem_enable_ints(gp); 1193 1194 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 1195} 1196 1197/* Must be invoked under gp->lock and gp->tx_lock. DMA won't be 1198 * actually stopped before about 4ms tho ... 1199 */ 1200static void gem_stop_dma(struct gem *gp) 1201{ 1202 u32 val; 1203 1204 /* We are done rocking, turn everything off. */ 1205 val = readl(gp->regs + TXDMA_CFG); 1206 writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); 1207 val = readl(gp->regs + RXDMA_CFG); 1208 writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 1209 val = readl(gp->regs + MAC_TXCFG); 1210 writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); 1211 val = readl(gp->regs + MAC_RXCFG); 1212 writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 1213 1214 (void) readl(gp->regs + MAC_RXCFG); 1215 1216 /* Need to wait a bit ... done by the caller */ 1217} 1218 1219 1220/* Must be invoked under gp->lock and gp->tx_lock. */ 1221// XXX dbl check what that function should do when called on PCS PHY 1222static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) 1223{ 1224 u32 advertise, features; 1225 int autoneg; 1226 int speed; 1227 int duplex; 1228 1229 if (gp->phy_type != phy_mii_mdio0 && 1230 gp->phy_type != phy_mii_mdio1) 1231 goto non_mii; 1232 1233 /* Setup advertise */ 1234 if (found_mii_phy(gp)) 1235 features = gp->phy_mii.def->features; 1236 else 1237 features = 0; 1238 1239 advertise = features & ADVERTISE_MASK; 1240 if (gp->phy_mii.advertising != 0) 1241 advertise &= gp->phy_mii.advertising; 1242 1243 autoneg = gp->want_autoneg; 1244 speed = gp->phy_mii.speed; 1245 duplex = gp->phy_mii.duplex; 1246 1247 /* Setup link parameters */ 1248 if (!ep) 1249 goto start_aneg; 1250 if (ep->autoneg == AUTONEG_ENABLE) { 1251 advertise = ep->advertising; 1252 autoneg = 1; 1253 } else { 1254 autoneg = 0; 1255 speed = ep->speed; 1256 duplex = ep->duplex; 1257 } 1258 1259start_aneg: 1260 /* Sanitize settings based on PHY capabilities */ 1261 if ((features & SUPPORTED_Autoneg) == 0) 1262 autoneg = 0; 1263 if (speed == SPEED_1000 && 1264 !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full))) 1265 speed = SPEED_100; 1266 if (speed == SPEED_100 && 1267 !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full))) 1268 speed = SPEED_10; 1269 if (duplex == DUPLEX_FULL && 1270 !(features & (SUPPORTED_1000baseT_Full | 1271 SUPPORTED_100baseT_Full | 1272 SUPPORTED_10baseT_Full))) 1273 duplex = DUPLEX_HALF; 1274 if (speed == 0) 1275 speed = SPEED_10; 1276 1277 /* If we are asleep, we don't try to actually setup the PHY, we 1278 * just store the settings 1279 */ 1280 if (gp->asleep) { 1281 gp->phy_mii.autoneg = gp->want_autoneg = autoneg; 1282 gp->phy_mii.speed = speed; 1283 gp->phy_mii.duplex = duplex; 1284 return; 1285 } 1286 1287 /* Configure PHY & start aneg */ 1288 gp->want_autoneg = autoneg; 1289 if (autoneg) { 1290 if (found_mii_phy(gp)) 1291 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise); 1292 gp->lstate = link_aneg; 1293 } else { 1294 if (found_mii_phy(gp)) 1295 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex); 1296 gp->lstate = link_force_ok; 1297 } 1298 1299non_mii: 1300 gp->timer_ticks = 0; 1301 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 1302} 1303 1304/* A link-up condition has occurred, initialize and enable the 1305 * rest of the chip. 1306 * 1307 * Must be invoked under gp->lock and gp->tx_lock. 1308 */ 1309static int gem_set_link_modes(struct gem *gp) 1310{ 1311 u32 val; 1312 int full_duplex, speed, pause; 1313 1314 full_duplex = 0; 1315 speed = SPEED_10; 1316 pause = 0; 1317 1318 if (found_mii_phy(gp)) { 1319 if (gp->phy_mii.def->ops->read_link(&gp->phy_mii)) 1320 return 1; 1321 full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL); 1322 speed = gp->phy_mii.speed; 1323 pause = gp->phy_mii.pause; 1324 } else if (gp->phy_type == phy_serialink || 1325 gp->phy_type == phy_serdes) { 1326 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1327 1328 if (pcs_lpa & PCS_MIIADV_FD) 1329 full_duplex = 1; 1330 speed = SPEED_1000; 1331 } 1332 1333 if (netif_msg_link(gp)) 1334 printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n", 1335 gp->dev->name, speed, (full_duplex ? "full" : "half")); 1336 1337 if (!gp->running) 1338 return 0; 1339 1340 val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); 1341 if (full_duplex) { 1342 val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); 1343 } else { 1344 /* MAC_TXCFG_NBO must be zero. */ 1345 } 1346 writel(val, gp->regs + MAC_TXCFG); 1347 1348 val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); 1349 if (!full_duplex && 1350 (gp->phy_type == phy_mii_mdio0 || 1351 gp->phy_type == phy_mii_mdio1)) { 1352 val |= MAC_XIFCFG_DISE; 1353 } else if (full_duplex) { 1354 val |= MAC_XIFCFG_FLED; 1355 } 1356 1357 if (speed == SPEED_1000) 1358 val |= (MAC_XIFCFG_GMII); 1359 1360 writel(val, gp->regs + MAC_XIFCFG); 1361 1362 /* If gigabit and half-duplex, enable carrier extension 1363 * mode. Else, disable it. 1364 */ 1365 if (speed == SPEED_1000 && !full_duplex) { 1366 val = readl(gp->regs + MAC_TXCFG); 1367 writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); 1368 1369 val = readl(gp->regs + MAC_RXCFG); 1370 writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); 1371 } else { 1372 val = readl(gp->regs + MAC_TXCFG); 1373 writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); 1374 1375 val = readl(gp->regs + MAC_RXCFG); 1376 writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); 1377 } 1378 1379 if (gp->phy_type == phy_serialink || 1380 gp->phy_type == phy_serdes) { 1381 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1382 1383 if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) 1384 pause = 1; 1385 } 1386 1387 if (netif_msg_link(gp)) { 1388 if (pause) { 1389 printk(KERN_INFO "%s: Pause is enabled " 1390 "(rxfifo: %d off: %d on: %d)\n", 1391 gp->dev->name, 1392 gp->rx_fifo_sz, 1393 gp->rx_pause_off, 1394 gp->rx_pause_on); 1395 } else { 1396 printk(KERN_INFO "%s: Pause is disabled\n", 1397 gp->dev->name); 1398 } 1399 } 1400 1401 if (!full_duplex) 1402 writel(512, gp->regs + MAC_STIME); 1403 else 1404 writel(64, gp->regs + MAC_STIME); 1405 val = readl(gp->regs + MAC_MCCFG); 1406 if (pause) 1407 val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); 1408 else 1409 val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); 1410 writel(val, gp->regs + MAC_MCCFG); 1411 1412 gem_start_dma(gp); 1413 1414 return 0; 1415} 1416 1417/* Must be invoked under gp->lock and gp->tx_lock. */ 1418static int gem_mdio_link_not_up(struct gem *gp) 1419{ 1420 switch (gp->lstate) { 1421 case link_force_ret: 1422 if (netif_msg_link(gp)) 1423 printk(KERN_INFO "%s: Autoneg failed again, keeping" 1424 " forced mode\n", gp->dev->name); 1425 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, 1426 gp->last_forced_speed, DUPLEX_HALF); 1427 gp->timer_ticks = 5; 1428 gp->lstate = link_force_ok; 1429 return 0; 1430 case link_aneg: 1431 /* We try forced modes after a failed aneg only on PHYs that don't 1432 * have "magic_aneg" bit set, which means they internally do the 1433 * while forced-mode thingy. On these, we just restart aneg 1434 */ 1435 if (gp->phy_mii.def->magic_aneg) 1436 return 1; 1437 if (netif_msg_link(gp)) 1438 printk(KERN_INFO "%s: switching to forced 100bt\n", 1439 gp->dev->name); 1440 /* Try forced modes. */ 1441 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, 1442 DUPLEX_HALF); 1443 gp->timer_ticks = 5; 1444 gp->lstate = link_force_try; 1445 return 0; 1446 case link_force_try: 1447 /* Downgrade from 100 to 10 Mbps if necessary. 1448 * If already at 10Mbps, warn user about the 1449 * situation every 10 ticks. 1450 */ 1451 if (gp->phy_mii.speed == SPEED_100) { 1452 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, 1453 DUPLEX_HALF); 1454 gp->timer_ticks = 5; 1455 if (netif_msg_link(gp)) 1456 printk(KERN_INFO "%s: switching to forced 10bt\n", 1457 gp->dev->name); 1458 return 0; 1459 } else 1460 return 1; 1461 default: 1462 return 0; 1463 } 1464} 1465 1466static void gem_link_timer(unsigned long data) 1467{ 1468 struct gem *gp = (struct gem *) data; 1469 int restart_aneg = 0; 1470 1471 if (gp->asleep) 1472 return; 1473 1474 spin_lock_irq(&gp->lock); 1475 spin_lock(&gp->tx_lock); 1476 gem_get_cell(gp); 1477 1478 /* If the reset task is still pending, we just 1479 * reschedule the link timer 1480 */ 1481 if (gp->reset_task_pending) 1482 goto restart; 1483 1484 if (gp->phy_type == phy_serialink || 1485 gp->phy_type == phy_serdes) { 1486 u32 val = readl(gp->regs + PCS_MIISTAT); 1487 1488 if (!(val & PCS_MIISTAT_LS)) 1489 val = readl(gp->regs + PCS_MIISTAT); 1490 1491 if ((val & PCS_MIISTAT_LS) != 0) { 1492 gp->lstate = link_up; 1493 netif_carrier_on(gp->dev); 1494 (void)gem_set_link_modes(gp); 1495 } 1496 goto restart; 1497 } 1498 if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) { 1499 /* Ok, here we got a link. If we had it due to a forced 1500 * fallback, and we were configured for autoneg, we do 1501 * retry a short autoneg pass. If you know your hub is 1502 * broken, use ethtool ;) 1503 */ 1504 if (gp->lstate == link_force_try && gp->want_autoneg) { 1505 gp->lstate = link_force_ret; 1506 gp->last_forced_speed = gp->phy_mii.speed; 1507 gp->timer_ticks = 5; 1508 if (netif_msg_link(gp)) 1509 printk(KERN_INFO "%s: Got link after fallback, retrying" 1510 " autoneg once...\n", gp->dev->name); 1511 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); 1512 } else if (gp->lstate != link_up) { 1513 gp->lstate = link_up; 1514 netif_carrier_on(gp->dev); 1515 if (gem_set_link_modes(gp)) 1516 restart_aneg = 1; 1517 } 1518 } else { 1519 /* If the link was previously up, we restart the 1520 * whole process 1521 */ 1522 if (gp->lstate == link_up) { 1523 gp->lstate = link_down; 1524 if (netif_msg_link(gp)) 1525 printk(KERN_INFO "%s: Link down\n", 1526 gp->dev->name); 1527 netif_carrier_off(gp->dev); 1528 gp->reset_task_pending = 1; 1529 schedule_work(&gp->reset_task); 1530 restart_aneg = 1; 1531 } else if (++gp->timer_ticks > 10) { 1532 if (found_mii_phy(gp)) 1533 restart_aneg = gem_mdio_link_not_up(gp); 1534 else 1535 restart_aneg = 1; 1536 } 1537 } 1538 if (restart_aneg) { 1539 gem_begin_auto_negotiation(gp, NULL); 1540 goto out_unlock; 1541 } 1542restart: 1543 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 1544out_unlock: 1545 gem_put_cell(gp); 1546 spin_unlock(&gp->tx_lock); 1547 spin_unlock_irq(&gp->lock); 1548} 1549 1550/* Must be invoked under gp->lock and gp->tx_lock. */ 1551static void gem_clean_rings(struct gem *gp) 1552{ 1553 struct gem_init_block *gb = gp->init_block; 1554 struct sk_buff *skb; 1555 int i; 1556 dma_addr_t dma_addr; 1557 1558 for (i = 0; i < RX_RING_SIZE; i++) { 1559 struct gem_rxd *rxd; 1560 1561 rxd = &gb->rxd[i]; 1562 if (gp->rx_skbs[i] != NULL) { 1563 skb = gp->rx_skbs[i]; 1564 dma_addr = le64_to_cpu(rxd->buffer); 1565 pci_unmap_page(gp->pdev, dma_addr, 1566 RX_BUF_ALLOC_SIZE(gp), 1567 PCI_DMA_FROMDEVICE); 1568 dev_kfree_skb_any(skb); 1569 gp->rx_skbs[i] = NULL; 1570 } 1571 rxd->status_word = 0; 1572 wmb(); 1573 rxd->buffer = 0; 1574 } 1575 1576 for (i = 0; i < TX_RING_SIZE; i++) { 1577 if (gp->tx_skbs[i] != NULL) { 1578 struct gem_txd *txd; 1579 int frag; 1580 1581 skb = gp->tx_skbs[i]; 1582 gp->tx_skbs[i] = NULL; 1583 1584 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 1585 int ent = i & (TX_RING_SIZE - 1); 1586 1587 txd = &gb->txd[ent]; 1588 dma_addr = le64_to_cpu(txd->buffer); 1589 pci_unmap_page(gp->pdev, dma_addr, 1590 le64_to_cpu(txd->control_word) & 1591 TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); 1592 1593 if (frag != skb_shinfo(skb)->nr_frags) 1594 i++; 1595 } 1596 dev_kfree_skb_any(skb); 1597 } 1598 } 1599} 1600 1601/* Must be invoked under gp->lock and gp->tx_lock. */ 1602static void gem_init_rings(struct gem *gp) 1603{ 1604 struct gem_init_block *gb = gp->init_block; 1605 struct net_device *dev = gp->dev; 1606 int i; 1607 dma_addr_t dma_addr; 1608 1609 gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; 1610 1611 gem_clean_rings(gp); 1612 1613 gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, 1614 (unsigned)VLAN_ETH_FRAME_LEN); 1615 1616 for (i = 0; i < RX_RING_SIZE; i++) { 1617 struct sk_buff *skb; 1618 struct gem_rxd *rxd = &gb->rxd[i]; 1619 1620 skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 1621 if (!skb) { 1622 rxd->buffer = 0; 1623 rxd->status_word = 0; 1624 continue; 1625 } 1626 1627 gp->rx_skbs[i] = skb; 1628 skb->dev = dev; 1629 skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); 1630 dma_addr = pci_map_page(gp->pdev, 1631 virt_to_page(skb->data), 1632 offset_in_page(skb->data), 1633 RX_BUF_ALLOC_SIZE(gp), 1634 PCI_DMA_FROMDEVICE); 1635 rxd->buffer = cpu_to_le64(dma_addr); 1636 wmb(); 1637 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 1638 skb_reserve(skb, RX_OFFSET); 1639 } 1640 1641 for (i = 0; i < TX_RING_SIZE; i++) { 1642 struct gem_txd *txd = &gb->txd[i]; 1643 1644 txd->control_word = 0; 1645 wmb(); 1646 txd->buffer = 0; 1647 } 1648 wmb(); 1649} 1650 1651/* Init PHY interface and start link poll state machine */ 1652static void gem_init_phy(struct gem *gp) 1653{ 1654 u32 mifcfg; 1655 1656 /* Revert MIF CFG setting done on stop_phy */ 1657 mifcfg = readl(gp->regs + MIF_CFG); 1658 mifcfg &= ~MIF_CFG_BBMODE; 1659 writel(mifcfg, gp->regs + MIF_CFG); 1660 1661 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { 1662 int i; 1663 1664 /* Those delay sucks, the HW seem to love them though, I'll 1665 * serisouly consider breaking some locks here to be able 1666 * to schedule instead 1667 */ 1668 for (i = 0; i < 3; i++) { 1669#ifdef CONFIG_PPC_PMAC 1670 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); 1671 msleep(20); 1672#endif 1673 /* Some PHYs used by apple have problem getting back to us, 1674 * we do an additional reset here 1675 */ 1676 phy_write(gp, MII_BMCR, BMCR_RESET); 1677 msleep(20); 1678 if (phy_read(gp, MII_BMCR) != 0xffff) 1679 break; 1680 if (i == 2) 1681 printk(KERN_WARNING "%s: GMAC PHY not responding !\n", 1682 gp->dev->name); 1683 } 1684 } 1685 1686 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 1687 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 1688 u32 val; 1689 1690 /* Init datapath mode register. */ 1691 if (gp->phy_type == phy_mii_mdio0 || 1692 gp->phy_type == phy_mii_mdio1) { 1693 val = PCS_DMODE_MGM; 1694 } else if (gp->phy_type == phy_serialink) { 1695 val = PCS_DMODE_SM | PCS_DMODE_GMOE; 1696 } else { 1697 val = PCS_DMODE_ESM; 1698 } 1699 1700 writel(val, gp->regs + PCS_DMODE); 1701 } 1702 1703 if (gp->phy_type == phy_mii_mdio0 || 1704 gp->phy_type == phy_mii_mdio1) { 1705 // XXX check for errors 1706 mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr); 1707 1708 /* Init PHY */ 1709 if (gp->phy_mii.def && gp->phy_mii.def->ops->init) 1710 gp->phy_mii.def->ops->init(&gp->phy_mii); 1711 } else { 1712 u32 val; 1713 int limit; 1714 1715 /* Reset PCS unit. */ 1716 val = readl(gp->regs + PCS_MIICTRL); 1717 val |= PCS_MIICTRL_RST; 1718 writeb(val, gp->regs + PCS_MIICTRL); 1719 1720 limit = 32; 1721 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { 1722 udelay(100); 1723 if (limit-- <= 0) 1724 break; 1725 } 1726 if (limit <= 0) 1727 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n", 1728 gp->dev->name); 1729 1730 /* Make sure PCS is disabled while changing advertisement 1731 * configuration. 1732 */ 1733 val = readl(gp->regs + PCS_CFG); 1734 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); 1735 writel(val, gp->regs + PCS_CFG); 1736 1737 /* Advertise all capabilities except assymetric 1738 * pause. 1739 */ 1740 val = readl(gp->regs + PCS_MIIADV); 1741 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | 1742 PCS_MIIADV_SP | PCS_MIIADV_AP); 1743 writel(val, gp->regs + PCS_MIIADV); 1744 1745 /* Enable and restart auto-negotiation, disable wrapback/loopback, 1746 * and re-enable PCS. 1747 */ 1748 val = readl(gp->regs + PCS_MIICTRL); 1749 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); 1750 val &= ~PCS_MIICTRL_WB; 1751 writel(val, gp->regs + PCS_MIICTRL); 1752 1753 val = readl(gp->regs + PCS_CFG); 1754 val |= PCS_CFG_ENABLE; 1755 writel(val, gp->regs + PCS_CFG); 1756 1757 /* Make sure serialink loopback is off. The meaning 1758 * of this bit is logically inverted based upon whether 1759 * you are in Serialink or SERDES mode. 1760 */ 1761 val = readl(gp->regs + PCS_SCTRL); 1762 if (gp->phy_type == phy_serialink) 1763 val &= ~PCS_SCTRL_LOOP; 1764 else 1765 val |= PCS_SCTRL_LOOP; 1766 writel(val, gp->regs + PCS_SCTRL); 1767 } 1768 1769 /* Default aneg parameters */ 1770 gp->timer_ticks = 0; 1771 gp->lstate = link_down; 1772 netif_carrier_off(gp->dev); 1773 1774 /* Can I advertise gigabit here ? I'd need BCM PHY docs... */ 1775 spin_lock_irq(&gp->lock); 1776 gem_begin_auto_negotiation(gp, NULL); 1777 spin_unlock_irq(&gp->lock); 1778} 1779 1780/* Must be invoked under gp->lock and gp->tx_lock. */ 1781static void gem_init_dma(struct gem *gp) 1782{ 1783 u64 desc_dma = (u64) gp->gblock_dvma; 1784 u32 val; 1785 1786 val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE); 1787 writel(val, gp->regs + TXDMA_CFG); 1788 1789 writel(desc_dma >> 32, gp->regs + TXDMA_DBHI); 1790 writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW); 1791 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); 1792 1793 writel(0, gp->regs + TXDMA_KICK); 1794 1795 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 1796 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 1797 writel(val, gp->regs + RXDMA_CFG); 1798 1799 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 1800 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 1801 1802 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 1803 1804 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); 1805 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); 1806 writel(val, gp->regs + RXDMA_PTHRESH); 1807 1808 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 1809 writel(((5 & RXDMA_BLANK_IPKTS) | 1810 ((8 << 12) & RXDMA_BLANK_ITIME)), 1811 gp->regs + RXDMA_BLANK); 1812 else 1813 writel(((5 & RXDMA_BLANK_IPKTS) | 1814 ((4 << 12) & RXDMA_BLANK_ITIME)), 1815 gp->regs + RXDMA_BLANK); 1816} 1817 1818/* Must be invoked under gp->lock and gp->tx_lock. */ 1819static u32 gem_setup_multicast(struct gem *gp) 1820{ 1821 u32 rxcfg = 0; 1822 int i; 1823 1824 if ((gp->dev->flags & IFF_ALLMULTI) || 1825 (gp->dev->mc_count > 256)) { 1826 for (i=0; i<16; i++) 1827 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2)); 1828 rxcfg |= MAC_RXCFG_HFE; 1829 } else if (gp->dev->flags & IFF_PROMISC) { 1830 rxcfg |= MAC_RXCFG_PROM; 1831 } else { 1832 u16 hash_table[16]; 1833 u32 crc; 1834 struct dev_mc_list *dmi = gp->dev->mc_list; 1835 int i; 1836 1837 for (i = 0; i < 16; i++) 1838 hash_table[i] = 0; 1839 1840 for (i = 0; i < gp->dev->mc_count; i++) { 1841 char *addrs = dmi->dmi_addr; 1842 1843 dmi = dmi->next; 1844 1845 if (!(*addrs & 1)) 1846 continue; 1847 1848 crc = ether_crc_le(6, addrs); 1849 crc >>= 24; 1850 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 1851 } 1852 for (i=0; i<16; i++) 1853 writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2)); 1854 rxcfg |= MAC_RXCFG_HFE; 1855 } 1856 1857 return rxcfg; 1858} 1859 1860/* Must be invoked under gp->lock and gp->tx_lock. */ 1861static void gem_init_mac(struct gem *gp) 1862{ 1863 unsigned char *e = &gp->dev->dev_addr[0]; 1864 1865 writel(0x1bf0, gp->regs + MAC_SNDPAUSE); 1866 1867 writel(0x00, gp->regs + MAC_IPG0); 1868 writel(0x08, gp->regs + MAC_IPG1); 1869 writel(0x04, gp->regs + MAC_IPG2); 1870 writel(0x40, gp->regs + MAC_STIME); 1871 writel(0x40, gp->regs + MAC_MINFSZ); 1872 1873 /* Ethernet payload + header + FCS + optional VLAN tag. */ 1874 writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ); 1875 1876 writel(0x07, gp->regs + MAC_PASIZE); 1877 writel(0x04, gp->regs + MAC_JAMSIZE); 1878 writel(0x10, gp->regs + MAC_ATTLIM); 1879 writel(0x8808, gp->regs + MAC_MCTYPE); 1880 1881 writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED); 1882 1883 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); 1884 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); 1885 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); 1886 1887 writel(0, gp->regs + MAC_ADDR3); 1888 writel(0, gp->regs + MAC_ADDR4); 1889 writel(0, gp->regs + MAC_ADDR5); 1890 1891 writel(0x0001, gp->regs + MAC_ADDR6); 1892 writel(0xc200, gp->regs + MAC_ADDR7); 1893 writel(0x0180, gp->regs + MAC_ADDR8); 1894 1895 writel(0, gp->regs + MAC_AFILT0); 1896 writel(0, gp->regs + MAC_AFILT1); 1897 writel(0, gp->regs + MAC_AFILT2); 1898 writel(0, gp->regs + MAC_AF21MSK); 1899 writel(0, gp->regs + MAC_AF0MSK); 1900 1901 gp->mac_rx_cfg = gem_setup_multicast(gp); 1902#ifdef STRIP_FCS 1903 gp->mac_rx_cfg |= MAC_RXCFG_SFCS; 1904#endif 1905 writel(0, gp->regs + MAC_NCOLL); 1906 writel(0, gp->regs + MAC_FASUCC); 1907 writel(0, gp->regs + MAC_ECOLL); 1908 writel(0, gp->regs + MAC_LCOLL); 1909 writel(0, gp->regs + MAC_DTIMER); 1910 writel(0, gp->regs + MAC_PATMPS); 1911 writel(0, gp->regs + MAC_RFCTR); 1912 writel(0, gp->regs + MAC_LERR); 1913 writel(0, gp->regs + MAC_AERR); 1914 writel(0, gp->regs + MAC_FCSERR); 1915 writel(0, gp->regs + MAC_RXCVERR); 1916 1917 /* Clear RX/TX/MAC/XIF config, we will set these up and enable 1918 * them once a link is established. 1919 */ 1920 writel(0, gp->regs + MAC_TXCFG); 1921 writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG); 1922 writel(0, gp->regs + MAC_MCCFG); 1923 writel(0, gp->regs + MAC_XIFCFG); 1924 1925 /* Setup MAC interrupts. We want to get all of the interesting 1926 * counter expiration events, but we do not want to hear about 1927 * normal rx/tx as the DMA engine tells us that. 1928 */ 1929 writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK); 1930 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); 1931 1932 /* Don't enable even the PAUSE interrupts for now, we 1933 * make no use of those events other than to record them. 1934 */ 1935 writel(0xffffffff, gp->regs + MAC_MCMASK); 1936 1937 /* Don't enable GEM's WOL in normal operations 1938 */ 1939 if (gp->has_wol) 1940 writel(0, gp->regs + WOL_WAKECSR); 1941} 1942 1943/* Must be invoked under gp->lock and gp->tx_lock. */ 1944static void gem_init_pause_thresholds(struct gem *gp) 1945{ 1946 u32 cfg; 1947 1948 /* Calculate pause thresholds. Setting the OFF threshold to the 1949 * full RX fifo size effectively disables PAUSE generation which 1950 * is what we do for 10/100 only GEMs which have FIFOs too small 1951 * to make real gains from PAUSE. 1952 */ 1953 if (gp->rx_fifo_sz <= (2 * 1024)) { 1954 gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; 1955 } else { 1956 int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; 1957 int off = (gp->rx_fifo_sz - (max_frame * 2)); 1958 int on = off - max_frame; 1959 1960 gp->rx_pause_off = off; 1961 gp->rx_pause_on = on; 1962 } 1963 1964 1965 /* Configure the chip "burst" DMA mode & enable some 1966 * HW bug fixes on Apple version 1967 */ 1968 cfg = 0; 1969 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) 1970 cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX; 1971#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) 1972 cfg |= GREG_CFG_IBURST; 1973#endif 1974 cfg |= ((31 << 1) & GREG_CFG_TXDMALIM); 1975 cfg |= ((31 << 6) & GREG_CFG_RXDMALIM); 1976 writel(cfg, gp->regs + GREG_CFG); 1977 1978 /* If Infinite Burst didn't stick, then use different 1979 * thresholds (and Apple bug fixes don't exist) 1980 */ 1981 if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) { 1982 cfg = ((2 << 1) & GREG_CFG_TXDMALIM); 1983 cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); 1984 writel(cfg, gp->regs + GREG_CFG); 1985 } 1986} 1987 1988static int gem_check_invariants(struct gem *gp) 1989{ 1990 struct pci_dev *pdev = gp->pdev; 1991 u32 mif_cfg; 1992 1993 /* On Apple's sungem, we can't rely on registers as the chip 1994 * was been powered down by the firmware. The PHY is looked 1995 * up later on. 1996 */ 1997 if (pdev->vendor == PCI_VENDOR_ID_APPLE) { 1998 gp->phy_type = phy_mii_mdio0; 1999 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; 2000 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; 2001 gp->swrst_base = 0; 2002 2003 mif_cfg = readl(gp->regs + MIF_CFG); 2004 mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); 2005 mif_cfg |= MIF_CFG_MDI0; 2006 writel(mif_cfg, gp->regs + MIF_CFG); 2007 writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE); 2008 writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG); 2009 2010 /* We hard-code the PHY address so we can properly bring it out of 2011 * reset later on, we can't really probe it at this point, though 2012 * that isn't an issue. 2013 */ 2014 if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC) 2015 gp->mii_phy_addr = 1; 2016 else 2017 gp->mii_phy_addr = 0; 2018 2019 return 0; 2020 } 2021 2022 mif_cfg = readl(gp->regs + MIF_CFG); 2023 2024 if (pdev->vendor == PCI_VENDOR_ID_SUN && 2025 pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) { 2026 /* One of the MII PHYs _must_ be present 2027 * as this chip has no gigabit PHY. 2028 */ 2029 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { 2030 printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n", 2031 mif_cfg); 2032 return -1; 2033 } 2034 } 2035 2036 /* Determine initial PHY interface type guess. MDIO1 is the 2037 * external PHY and thus takes precedence over MDIO0. 2038 */ 2039 2040 if (mif_cfg & MIF_CFG_MDI1) { 2041 gp->phy_type = phy_mii_mdio1; 2042 mif_cfg |= MIF_CFG_PSELECT; 2043 writel(mif_cfg, gp->regs + MIF_CFG); 2044 } else if (mif_cfg & MIF_CFG_MDI0) { 2045 gp->phy_type = phy_mii_mdio0; 2046 mif_cfg &= ~MIF_CFG_PSELECT; 2047 writel(mif_cfg, gp->regs + MIF_CFG); 2048 } else { 2049 gp->phy_type = phy_serialink; 2050 } 2051 if (gp->phy_type == phy_mii_mdio1 || 2052 gp->phy_type == phy_mii_mdio0) { 2053 int i; 2054 2055 for (i = 0; i < 32; i++) { 2056 gp->mii_phy_addr = i; 2057 if (phy_read(gp, MII_BMCR) != 0xffff) 2058 break; 2059 } 2060 if (i == 32) { 2061 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { 2062 printk(KERN_ERR PFX "RIO MII phy will not respond.\n"); 2063 return -1; 2064 } 2065 gp->phy_type = phy_serdes; 2066 } 2067 } 2068 2069 /* Fetch the FIFO configurations now too. */ 2070 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; 2071 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; 2072 2073 if (pdev->vendor == PCI_VENDOR_ID_SUN) { 2074 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { 2075 if (gp->tx_fifo_sz != (9 * 1024) || 2076 gp->rx_fifo_sz != (20 * 1024)) { 2077 printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2078 gp->tx_fifo_sz, gp->rx_fifo_sz); 2079 return -1; 2080 } 2081 gp->swrst_base = 0; 2082 } else { 2083 if (gp->tx_fifo_sz != (2 * 1024) || 2084 gp->rx_fifo_sz != (2 * 1024)) { 2085 printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2086 gp->tx_fifo_sz, gp->rx_fifo_sz); 2087 return -1; 2088 } 2089 gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT; 2090 } 2091 } 2092 2093 return 0; 2094} 2095 2096/* Must be invoked under gp->lock and gp->tx_lock. */ 2097static void gem_reinit_chip(struct gem *gp) 2098{ 2099 /* Reset the chip */ 2100 gem_reset(gp); 2101 2102 /* Make sure ints are disabled */ 2103 gem_disable_ints(gp); 2104 2105 /* Allocate & setup ring buffers */ 2106 gem_init_rings(gp); 2107 2108 /* Configure pause thresholds */ 2109 gem_init_pause_thresholds(gp); 2110 2111 /* Init DMA & MAC engines */ 2112 gem_init_dma(gp); 2113 gem_init_mac(gp); 2114} 2115 2116 2117/* Must be invoked with no lock held. */ 2118static void gem_stop_phy(struct gem *gp, int wol) 2119{ 2120 u32 mifcfg; 2121 unsigned long flags; 2122 2123 /* Let the chip settle down a bit, it seems that helps 2124 * for sleep mode on some models 2125 */ 2126 msleep(10); 2127 2128 /* Make sure we aren't polling PHY status change. We 2129 * don't currently use that feature though 2130 */ 2131 mifcfg = readl(gp->regs + MIF_CFG); 2132 mifcfg &= ~MIF_CFG_POLL; 2133 writel(mifcfg, gp->regs + MIF_CFG); 2134 2135 if (wol && gp->has_wol) { 2136 unsigned char *e = &gp->dev->dev_addr[0]; 2137 u32 csr; 2138 2139 /* Setup wake-on-lan for MAGIC packet */ 2140 writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, 2141 gp->regs + MAC_RXCFG); 2142 writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0); 2143 writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1); 2144 writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2); 2145 2146 writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT); 2147 csr = WOL_WAKECSR_ENABLE; 2148 if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0) 2149 csr |= WOL_WAKECSR_MII; 2150 writel(csr, gp->regs + WOL_WAKECSR); 2151 } else { 2152 writel(0, gp->regs + MAC_RXCFG); 2153 (void)readl(gp->regs + MAC_RXCFG); 2154 /* Machine sleep will die in strange ways if we 2155 * dont wait a bit here, looks like the chip takes 2156 * some time to really shut down 2157 */ 2158 msleep(10); 2159 } 2160 2161 writel(0, gp->regs + MAC_TXCFG); 2162 writel(0, gp->regs + MAC_XIFCFG); 2163 writel(0, gp->regs + TXDMA_CFG); 2164 writel(0, gp->regs + RXDMA_CFG); 2165 2166 if (!wol) { 2167 spin_lock_irqsave(&gp->lock, flags); 2168 spin_lock(&gp->tx_lock); 2169 gem_reset(gp); 2170 writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); 2171 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); 2172 spin_unlock(&gp->tx_lock); 2173 spin_unlock_irqrestore(&gp->lock, flags); 2174 2175 /* No need to take the lock here */ 2176 2177 if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) 2178 gp->phy_mii.def->ops->suspend(&gp->phy_mii); 2179 2180 /* According to Apple, we must set the MDIO pins to this begnign 2181 * state or we may 1) eat more current, 2) damage some PHYs 2182 */ 2183 writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); 2184 writel(0, gp->regs + MIF_BBCLK); 2185 writel(0, gp->regs + MIF_BBDATA); 2186 writel(0, gp->regs + MIF_BBOENAB); 2187 writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG); 2188 (void) readl(gp->regs + MAC_XIFCFG); 2189 } 2190} 2191 2192 2193static int gem_do_start(struct net_device *dev) 2194{ 2195 struct gem *gp = dev->priv; 2196 unsigned long flags; 2197 2198 spin_lock_irqsave(&gp->lock, flags); 2199 spin_lock(&gp->tx_lock); 2200 2201 /* Enable the cell */ 2202 gem_get_cell(gp); 2203 2204 /* Init & setup chip hardware */ 2205 gem_reinit_chip(gp); 2206 2207 gp->running = 1; 2208 2209 if (gp->lstate == link_up) { 2210 netif_carrier_on(gp->dev); 2211 gem_set_link_modes(gp); 2212 } 2213 2214 netif_wake_queue(gp->dev); 2215 2216 spin_unlock(&gp->tx_lock); 2217 spin_unlock_irqrestore(&gp->lock, flags); 2218 2219 if (request_irq(gp->pdev->irq, gem_interrupt, 2220 IRQF_SHARED, dev->name, (void *)dev)) { 2221 printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name); 2222 2223 spin_lock_irqsave(&gp->lock, flags); 2224 spin_lock(&gp->tx_lock); 2225 2226 gp->running = 0; 2227 gem_reset(gp); 2228 gem_clean_rings(gp); 2229 gem_put_cell(gp); 2230 2231 spin_unlock(&gp->tx_lock); 2232 spin_unlock_irqrestore(&gp->lock, flags); 2233 2234 return -EAGAIN; 2235 } 2236 2237 return 0; 2238} 2239 2240static void gem_do_stop(struct net_device *dev, int wol) 2241{ 2242 struct gem *gp = dev->priv; 2243 unsigned long flags; 2244 2245 spin_lock_irqsave(&gp->lock, flags); 2246 spin_lock(&gp->tx_lock); 2247 2248 gp->running = 0; 2249 2250 /* Stop netif queue */ 2251 netif_stop_queue(dev); 2252 2253 /* Make sure ints are disabled */ 2254 gem_disable_ints(gp); 2255 2256 /* We can drop the lock now */ 2257 spin_unlock(&gp->tx_lock); 2258 spin_unlock_irqrestore(&gp->lock, flags); 2259 2260 /* If we are going to sleep with WOL */ 2261 gem_stop_dma(gp); 2262 msleep(10); 2263 if (!wol) 2264 gem_reset(gp); 2265 msleep(10); 2266 2267 /* Get rid of rings */ 2268 gem_clean_rings(gp); 2269 2270 /* No irq needed anymore */ 2271 free_irq(gp->pdev->irq, (void *) dev); 2272 2273 /* Cell not needed neither if no WOL */ 2274 if (!wol) { 2275 spin_lock_irqsave(&gp->lock, flags); 2276 gem_put_cell(gp); 2277 spin_unlock_irqrestore(&gp->lock, flags); 2278 } 2279} 2280 2281static void gem_reset_task(struct work_struct *work) 2282{ 2283 struct gem *gp = container_of(work, struct gem, reset_task); 2284 2285 mutex_lock(&gp->pm_mutex); 2286 2287 netif_poll_disable(gp->dev); 2288 2289 spin_lock_irq(&gp->lock); 2290 spin_lock(&gp->tx_lock); 2291 2292 if (gp->running == 0) 2293 goto not_running; 2294 2295 if (gp->running) { 2296 netif_stop_queue(gp->dev); 2297 2298 /* Reset the chip & rings */ 2299 gem_reinit_chip(gp); 2300 if (gp->lstate == link_up) 2301 gem_set_link_modes(gp); 2302 netif_wake_queue(gp->dev); 2303 } 2304 not_running: 2305 gp->reset_task_pending = 0; 2306 2307 spin_unlock(&gp->tx_lock); 2308 spin_unlock_irq(&gp->lock); 2309 2310 netif_poll_enable(gp->dev); 2311 2312 mutex_unlock(&gp->pm_mutex); 2313} 2314 2315 2316static int gem_open(struct net_device *dev) 2317{ 2318 struct gem *gp = dev->priv; 2319 int rc = 0; 2320 2321 mutex_lock(&gp->pm_mutex); 2322 2323 /* We need the cell enabled */ 2324 if (!gp->asleep) 2325 rc = gem_do_start(dev); 2326 gp->opened = (rc == 0); 2327 2328 mutex_unlock(&gp->pm_mutex); 2329 2330 return rc; 2331} 2332 2333static int gem_close(struct net_device *dev) 2334{ 2335 struct gem *gp = dev->priv; 2336 2337 /* Note: we don't need to call netif_poll_disable() here because 2338 * our caller (dev_close) already did it for us 2339 */ 2340 2341 mutex_lock(&gp->pm_mutex); 2342 2343 gp->opened = 0; 2344 if (!gp->asleep) 2345 gem_do_stop(dev, 0); 2346 2347 mutex_unlock(&gp->pm_mutex); 2348 2349 return 0; 2350} 2351 2352#ifdef CONFIG_PM 2353static int gem_suspend(struct pci_dev *pdev, pm_message_t state) 2354{ 2355 struct net_device *dev = pci_get_drvdata(pdev); 2356 struct gem *gp = dev->priv; 2357 unsigned long flags; 2358 2359 mutex_lock(&gp->pm_mutex); 2360 2361 netif_poll_disable(dev); 2362 2363 printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", 2364 dev->name, 2365 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled"); 2366 2367 /* Keep the cell enabled during the entire operation */ 2368 spin_lock_irqsave(&gp->lock, flags); 2369 spin_lock(&gp->tx_lock); 2370 gem_get_cell(gp); 2371 spin_unlock(&gp->tx_lock); 2372 spin_unlock_irqrestore(&gp->lock, flags); 2373 2374 /* If the driver is opened, we stop the MAC */ 2375 if (gp->opened) { 2376 /* Stop traffic, mark us closed */ 2377 netif_device_detach(dev); 2378 2379 /* Switch off MAC, remember WOL setting */ 2380 gp->asleep_wol = gp->wake_on_lan; 2381 gem_do_stop(dev, gp->asleep_wol); 2382 } else 2383 gp->asleep_wol = 0; 2384 2385 /* Mark us asleep */ 2386 gp->asleep = 1; 2387 wmb(); 2388 2389 /* Stop the link timer */ 2390 del_timer_sync(&gp->link_timer); 2391 2392 /* Now we release the mutex to not block the reset task who 2393 * can take it too. We are marked asleep, so there will be no 2394 * conflict here 2395 */ 2396 mutex_unlock(&gp->pm_mutex); 2397 2398 /* Wait for a pending reset task to complete */ 2399 while (gp->reset_task_pending) 2400 yield(); 2401 flush_scheduled_work(); 2402 2403 /* Shut the PHY down eventually and setup WOL */ 2404 gem_stop_phy(gp, gp->asleep_wol); 2405 2406 /* Make sure bus master is disabled */ 2407 pci_disable_device(gp->pdev); 2408 2409 /* Release the cell, no need to take a lock at this point since 2410 * nothing else can happen now 2411 */ 2412 gem_put_cell(gp); 2413 2414 return 0; 2415} 2416 2417static int gem_resume(struct pci_dev *pdev) 2418{ 2419 struct net_device *dev = pci_get_drvdata(pdev); 2420 struct gem *gp = dev->priv; 2421 unsigned long flags; 2422 2423 printk(KERN_INFO "%s: resuming\n", dev->name); 2424 2425 mutex_lock(&gp->pm_mutex); 2426 2427 /* Keep the cell enabled during the entire operation, no need to 2428 * take a lock here tho since nothing else can happen while we are 2429 * marked asleep 2430 */ 2431 gem_get_cell(gp); 2432 2433 /* Make sure PCI access and bus master are enabled */ 2434 if (pci_enable_device(gp->pdev)) { 2435 printk(KERN_ERR "%s: Can't re-enable chip !\n", 2436 dev->name); 2437 /* Put cell and forget it for now, it will be considered as 2438 * still asleep, a new sleep cycle may bring it back 2439 */ 2440 gem_put_cell(gp); 2441 mutex_unlock(&gp->pm_mutex); 2442 return 0; 2443 } 2444 pci_set_master(gp->pdev); 2445 2446 /* Reset everything */ 2447 gem_reset(gp); 2448 2449 /* Mark us woken up */ 2450 gp->asleep = 0; 2451 wmb(); 2452 2453 /* Bring the PHY back. Again, lock is useless at this point as 2454 * nothing can be happening until we restart the whole thing 2455 */ 2456 gem_init_phy(gp); 2457 2458 /* If we were opened, bring everything back */ 2459 if (gp->opened) { 2460 /* Restart MAC */ 2461 gem_do_start(dev); 2462 2463 /* Re-attach net device */ 2464 netif_device_attach(dev); 2465 2466 } 2467 2468 spin_lock_irqsave(&gp->lock, flags); 2469 spin_lock(&gp->tx_lock); 2470 2471 /* If we had WOL enabled, the cell clock was never turned off during 2472 * sleep, so we end up beeing unbalanced. Fix that here 2473 */ 2474 if (gp->asleep_wol) 2475 gem_put_cell(gp); 2476 2477 /* This function doesn't need to hold the cell, it will be held if the 2478 * driver is open by gem_do_start(). 2479 */ 2480 gem_put_cell(gp); 2481 2482 spin_unlock(&gp->tx_lock); 2483 spin_unlock_irqrestore(&gp->lock, flags); 2484 2485 netif_poll_enable(dev); 2486 2487 mutex_unlock(&gp->pm_mutex); 2488 2489 return 0; 2490} 2491#endif /* CONFIG_PM */ 2492 2493static struct net_device_stats *gem_get_stats(struct net_device *dev) 2494{ 2495 struct gem *gp = dev->priv; 2496 struct net_device_stats *stats = &gp->net_stats; 2497 2498 spin_lock_irq(&gp->lock); 2499 spin_lock(&gp->tx_lock); 2500 2501 /* I have seen this being called while the PM was in progress, 2502 * so we shield against this 2503 */ 2504 if (gp->running) { 2505 stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); 2506 writel(0, gp->regs + MAC_FCSERR); 2507 2508 stats->rx_frame_errors += readl(gp->regs + MAC_AERR); 2509 writel(0, gp->regs + MAC_AERR); 2510 2511 stats->rx_length_errors += readl(gp->regs + MAC_LERR); 2512 writel(0, gp->regs + MAC_LERR); 2513 2514 stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL); 2515 stats->collisions += 2516 (readl(gp->regs + MAC_ECOLL) + 2517 readl(gp->regs + MAC_LCOLL)); 2518 writel(0, gp->regs + MAC_ECOLL); 2519 writel(0, gp->regs + MAC_LCOLL); 2520 } 2521 2522 spin_unlock(&gp->tx_lock); 2523 spin_unlock_irq(&gp->lock); 2524 2525 return &gp->net_stats; 2526} 2527 2528static int gem_set_mac_address(struct net_device *dev, void *addr) 2529{ 2530 struct sockaddr *macaddr = (struct sockaddr *) addr; 2531 struct gem *gp = dev->priv; 2532 unsigned char *e = &dev->dev_addr[0]; 2533 2534 if (!is_valid_ether_addr(macaddr->sa_data)) 2535 return -EADDRNOTAVAIL; 2536 2537 if (!netif_running(dev) || !netif_device_present(dev)) { 2538 /* We'll just catch it later when the 2539 * device is up'd or resumed. 2540 */ 2541 memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); 2542 return 0; 2543 } 2544 2545 mutex_lock(&gp->pm_mutex); 2546 memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); 2547 if (gp->running) { 2548 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); 2549 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); 2550 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); 2551 } 2552 mutex_unlock(&gp->pm_mutex); 2553 2554 return 0; 2555} 2556 2557static void gem_set_multicast(struct net_device *dev) 2558{ 2559 struct gem *gp = dev->priv; 2560 u32 rxcfg, rxcfg_new; 2561 int limit = 10000; 2562 2563 2564 spin_lock_irq(&gp->lock); 2565 spin_lock(&gp->tx_lock); 2566 2567 if (!gp->running) 2568 goto bail; 2569 2570 netif_stop_queue(dev); 2571 2572 rxcfg = readl(gp->regs + MAC_RXCFG); 2573 rxcfg_new = gem_setup_multicast(gp); 2574#ifdef STRIP_FCS 2575 rxcfg_new |= MAC_RXCFG_SFCS; 2576#endif 2577 gp->mac_rx_cfg = rxcfg_new; 2578 2579 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 2580 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { 2581 if (!limit--) 2582 break; 2583 udelay(10); 2584 } 2585 2586 rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE); 2587 rxcfg |= rxcfg_new; 2588 2589 writel(rxcfg, gp->regs + MAC_RXCFG); 2590 2591 netif_wake_queue(dev); 2592 2593 bail: 2594 spin_unlock(&gp->tx_lock); 2595 spin_unlock_irq(&gp->lock); 2596} 2597 2598/* Jumbo-grams don't seem to work :-( */ 2599#define GEM_MIN_MTU 68 2600#if 1 2601#define GEM_MAX_MTU 1500 2602#else 2603#define GEM_MAX_MTU 9000 2604#endif 2605 2606static int gem_change_mtu(struct net_device *dev, int new_mtu) 2607{ 2608 struct gem *gp = dev->priv; 2609 2610 if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) 2611 return -EINVAL; 2612 2613 if (!netif_running(dev) || !netif_device_present(dev)) { 2614 /* We'll just catch it later when the 2615 * device is up'd or resumed. 2616 */ 2617 dev->mtu = new_mtu; 2618 return 0; 2619 } 2620 2621 mutex_lock(&gp->pm_mutex); 2622 spin_lock_irq(&gp->lock); 2623 spin_lock(&gp->tx_lock); 2624 dev->mtu = new_mtu; 2625 if (gp->running) { 2626 gem_reinit_chip(gp); 2627 if (gp->lstate == link_up) 2628 gem_set_link_modes(gp); 2629 } 2630 spin_unlock(&gp->tx_lock); 2631 spin_unlock_irq(&gp->lock); 2632 mutex_unlock(&gp->pm_mutex); 2633 2634 return 0; 2635} 2636 2637static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2638{ 2639 struct gem *gp = dev->priv; 2640 2641 strcpy(info->driver, DRV_NAME); 2642 strcpy(info->version, DRV_VERSION); 2643 strcpy(info->bus_info, pci_name(gp->pdev)); 2644} 2645 2646static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2647{ 2648 struct gem *gp = dev->priv; 2649 2650 if (gp->phy_type == phy_mii_mdio0 || 2651 gp->phy_type == phy_mii_mdio1) { 2652 if (gp->phy_mii.def) 2653 cmd->supported = gp->phy_mii.def->features; 2654 else 2655 cmd->supported = (SUPPORTED_10baseT_Half | 2656 SUPPORTED_10baseT_Full); 2657 2658 /* XXX hardcoded stuff for now */ 2659 cmd->port = PORT_MII; 2660 cmd->transceiver = XCVR_EXTERNAL; 2661 cmd->phy_address = 0; /* XXX fixed PHYAD */ 2662 2663 /* Return current PHY settings */ 2664 spin_lock_irq(&gp->lock); 2665 cmd->autoneg = gp->want_autoneg; 2666 cmd->speed = gp->phy_mii.speed; 2667 cmd->duplex = gp->phy_mii.duplex; 2668 cmd->advertising = gp->phy_mii.advertising; 2669 2670 /* If we started with a forced mode, we don't have a default 2671 * advertise set, we need to return something sensible so 2672 * userland can re-enable autoneg properly. 2673 */ 2674 if (cmd->advertising == 0) 2675 cmd->advertising = cmd->supported; 2676 spin_unlock_irq(&gp->lock); 2677 } else { // XXX PCS ? 2678 cmd->supported = 2679 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2680 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 2681 SUPPORTED_Autoneg); 2682 cmd->advertising = cmd->supported; 2683 cmd->speed = 0; 2684 cmd->duplex = cmd->port = cmd->phy_address = 2685 cmd->transceiver = cmd->autoneg = 0; 2686 } 2687 cmd->maxtxpkt = cmd->maxrxpkt = 0; 2688 2689 return 0; 2690} 2691 2692static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2693{ 2694 struct gem *gp = dev->priv; 2695 2696 /* Verify the settings we care about. */ 2697 if (cmd->autoneg != AUTONEG_ENABLE && 2698 cmd->autoneg != AUTONEG_DISABLE) 2699 return -EINVAL; 2700 2701 if (cmd->autoneg == AUTONEG_ENABLE && 2702 cmd->advertising == 0) 2703 return -EINVAL; 2704 2705 if (cmd->autoneg == AUTONEG_DISABLE && 2706 ((cmd->speed != SPEED_1000 && 2707 cmd->speed != SPEED_100 && 2708 cmd->speed != SPEED_10) || 2709 (cmd->duplex != DUPLEX_HALF && 2710 cmd->duplex != DUPLEX_FULL))) 2711 return -EINVAL; 2712 2713 /* Apply settings and restart link process. */ 2714 spin_lock_irq(&gp->lock); 2715 gem_get_cell(gp); 2716 gem_begin_auto_negotiation(gp, cmd); 2717 gem_put_cell(gp); 2718 spin_unlock_irq(&gp->lock); 2719 2720 return 0; 2721} 2722 2723static int gem_nway_reset(struct net_device *dev) 2724{ 2725 struct gem *gp = dev->priv; 2726 2727 if (!gp->want_autoneg) 2728 return -EINVAL; 2729 2730 /* Restart link process. */ 2731 spin_lock_irq(&gp->lock); 2732 gem_get_cell(gp); 2733 gem_begin_auto_negotiation(gp, NULL); 2734 gem_put_cell(gp); 2735 spin_unlock_irq(&gp->lock); 2736 2737 return 0; 2738} 2739 2740static u32 gem_get_msglevel(struct net_device *dev) 2741{ 2742 struct gem *gp = dev->priv; 2743 return gp->msg_enable; 2744} 2745 2746static void gem_set_msglevel(struct net_device *dev, u32 value) 2747{ 2748 struct gem *gp = dev->priv; 2749 gp->msg_enable = value; 2750} 2751 2752 2753/* Add more when I understand how to program the chip */ 2754/* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */ 2755 2756#define WOL_SUPPORTED_MASK (WAKE_MAGIC) 2757 2758static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2759{ 2760 struct gem *gp = dev->priv; 2761 2762 /* Add more when I understand how to program the chip */ 2763 if (gp->has_wol) { 2764 wol->supported = WOL_SUPPORTED_MASK; 2765 wol->wolopts = gp->wake_on_lan; 2766 } else { 2767 wol->supported = 0; 2768 wol->wolopts = 0; 2769 } 2770} 2771 2772static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2773{ 2774 struct gem *gp = dev->priv; 2775 2776 if (!gp->has_wol) 2777 return -EOPNOTSUPP; 2778 gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK; 2779 return 0; 2780} 2781 2782static const struct ethtool_ops gem_ethtool_ops = { 2783 .get_drvinfo = gem_get_drvinfo, 2784 .get_link = ethtool_op_get_link, 2785 .get_settings = gem_get_settings, 2786 .set_settings = gem_set_settings, 2787 .nway_reset = gem_nway_reset, 2788 .get_msglevel = gem_get_msglevel, 2789 .set_msglevel = gem_set_msglevel, 2790 .get_wol = gem_get_wol, 2791 .set_wol = gem_set_wol, 2792}; 2793 2794static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2795{ 2796 struct gem *gp = dev->priv; 2797 struct mii_ioctl_data *data = if_mii(ifr); 2798 int rc = -EOPNOTSUPP; 2799 unsigned long flags; 2800 2801 /* Hold the PM mutex while doing ioctl's or we may collide 2802 * with power management. 2803 */ 2804 mutex_lock(&gp->pm_mutex); 2805 2806 spin_lock_irqsave(&gp->lock, flags); 2807 gem_get_cell(gp); 2808 spin_unlock_irqrestore(&gp->lock, flags); 2809 2810 switch (cmd) { 2811 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 2812 data->phy_id = gp->mii_phy_addr; 2813 /* Fallthrough... */ 2814 2815 case SIOCGMIIREG: /* Read MII PHY register. */ 2816 if (!gp->running) 2817 rc = -EAGAIN; 2818 else { 2819 data->val_out = __phy_read(gp, data->phy_id & 0x1f, 2820 data->reg_num & 0x1f); 2821 rc = 0; 2822 } 2823 break; 2824 2825 case SIOCSMIIREG: /* Write MII PHY register. */ 2826 if (!capable(CAP_NET_ADMIN)) 2827 rc = -EPERM; 2828 else if (!gp->running) 2829 rc = -EAGAIN; 2830 else { 2831 __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, 2832 data->val_in); 2833 rc = 0; 2834 } 2835 break; 2836 }; 2837 2838 spin_lock_irqsave(&gp->lock, flags); 2839 gem_put_cell(gp); 2840 spin_unlock_irqrestore(&gp->lock, flags); 2841 2842 mutex_unlock(&gp->pm_mutex); 2843 2844 return rc; 2845} 2846 2847#if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC)) 2848/* Fetch MAC address from vital product data of PCI ROM. */ 2849static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) 2850{ 2851 int this_offset; 2852 2853 for (this_offset = 0x20; this_offset < len; this_offset++) { 2854 void __iomem *p = rom_base + this_offset; 2855 int i; 2856 2857 if (readb(p + 0) != 0x90 || 2858 readb(p + 1) != 0x00 || 2859 readb(p + 2) != 0x09 || 2860 readb(p + 3) != 0x4e || 2861 readb(p + 4) != 0x41 || 2862 readb(p + 5) != 0x06) 2863 continue; 2864 2865 this_offset += 6; 2866 p += 6; 2867 2868 for (i = 0; i < 6; i++) 2869 dev_addr[i] = readb(p + i); 2870 return 1; 2871 } 2872 return 0; 2873} 2874 2875static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) 2876{ 2877 size_t size; 2878 void __iomem *p = pci_map_rom(pdev, &size); 2879 2880 if (p) { 2881 int found; 2882 2883 found = readb(p) == 0x55 && 2884 readb(p + 1) == 0xaa && 2885 find_eth_addr_in_vpd(p, (64 * 1024), dev_addr); 2886 pci_unmap_rom(pdev, p); 2887 if (found) 2888 return; 2889 } 2890 2891 /* Sun MAC prefix then 3 random bytes. */ 2892 dev_addr[0] = 0x08; 2893 dev_addr[1] = 0x00; 2894 dev_addr[2] = 0x20; 2895 get_random_bytes(dev_addr + 3, 3); 2896 return; 2897} 2898#endif /* not Sparc and not PPC */ 2899 2900static int __devinit gem_get_device_address(struct gem *gp) 2901{ 2902#if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC) 2903 struct net_device *dev = gp->dev; 2904 const unsigned char *addr; 2905 2906 addr = of_get_property(gp->of_node, "local-mac-address", NULL); 2907 if (addr == NULL) { 2908#ifdef CONFIG_SPARC 2909 addr = idprom->id_ethaddr; 2910#else 2911 printk("\n"); 2912 printk(KERN_ERR "%s: can't get mac-address\n", dev->name); 2913 return -1; 2914#endif 2915 } 2916 memcpy(dev->dev_addr, addr, 6); 2917#else 2918 get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); 2919#endif 2920 return 0; 2921} 2922 2923static void gem_remove_one(struct pci_dev *pdev) 2924{ 2925 struct net_device *dev = pci_get_drvdata(pdev); 2926 2927 if (dev) { 2928 struct gem *gp = dev->priv; 2929 2930 unregister_netdev(dev); 2931 2932 /* Stop the link timer */ 2933 del_timer_sync(&gp->link_timer); 2934 2935 /* We shouldn't need any locking here */ 2936 gem_get_cell(gp); 2937 2938 /* Wait for a pending reset task to complete */ 2939 while (gp->reset_task_pending) 2940 yield(); 2941 flush_scheduled_work(); 2942 2943 /* Shut the PHY down */ 2944 gem_stop_phy(gp, 0); 2945 2946 gem_put_cell(gp); 2947 2948 /* Make sure bus master is disabled */ 2949 pci_disable_device(gp->pdev); 2950 2951 /* Free resources */ 2952 pci_free_consistent(pdev, 2953 sizeof(struct gem_init_block), 2954 gp->init_block, 2955 gp->gblock_dvma); 2956 iounmap(gp->regs); 2957 pci_release_regions(pdev); 2958 free_netdev(dev); 2959 2960 pci_set_drvdata(pdev, NULL); 2961 } 2962} 2963 2964static int __devinit gem_init_one(struct pci_dev *pdev, 2965 const struct pci_device_id *ent) 2966{ 2967 static int gem_version_printed = 0; 2968 unsigned long gemreg_base, gemreg_len; 2969 struct net_device *dev; 2970 struct gem *gp; 2971 int i, err, pci_using_dac; 2972 2973 if (gem_version_printed++ == 0) 2974 printk(KERN_INFO "%s", version); 2975 2976 /* Apple gmac note: during probe, the chip is powered up by 2977 * the arch code to allow the code below to work (and to let 2978 * the chip be probed on the config space. It won't stay powered 2979 * up until the interface is brought up however, so we can't rely 2980 * on register configuration done at this point. 2981 */ 2982 err = pci_enable_device(pdev); 2983 if (err) { 2984 printk(KERN_ERR PFX "Cannot enable MMIO operation, " 2985 "aborting.\n"); 2986 return err; 2987 } 2988 pci_set_master(pdev); 2989 2990 /* Configure DMA attributes. */ 2991 2992 /* All of the GEM documentation states that 64-bit DMA addressing 2993 * is fully supported and should work just fine. However the 2994 * front end for RIO based GEMs is different and only supports 2995 * 32-bit addressing. 2996 * 2997 * For now we assume the various PPC GEMs are 32-bit only as well. 2998 */ 2999 if (pdev->vendor == PCI_VENDOR_ID_SUN && 3000 pdev->device == PCI_DEVICE_ID_SUN_GEM && 3001 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 3002 pci_using_dac = 1; 3003 } else { 3004 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3005 if (err) { 3006 printk(KERN_ERR PFX "No usable DMA configuration, " 3007 "aborting.\n"); 3008 goto err_disable_device; 3009 } 3010 pci_using_dac = 0; 3011 } 3012 3013 gemreg_base = pci_resource_start(pdev, 0); 3014 gemreg_len = pci_resource_len(pdev, 0); 3015 3016 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { 3017 printk(KERN_ERR PFX "Cannot find proper PCI device " 3018 "base address, aborting.\n"); 3019 err = -ENODEV; 3020 goto err_disable_device; 3021 } 3022 3023 dev = alloc_etherdev(sizeof(*gp)); 3024 if (!dev) { 3025 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 3026 err = -ENOMEM; 3027 goto err_disable_device; 3028 } 3029 SET_MODULE_OWNER(dev); 3030 SET_NETDEV_DEV(dev, &pdev->dev); 3031 3032 gp = dev->priv; 3033 3034 err = pci_request_regions(pdev, DRV_NAME); 3035 if (err) { 3036 printk(KERN_ERR PFX "Cannot obtain PCI resources, " 3037 "aborting.\n"); 3038 goto err_out_free_netdev; 3039 } 3040 3041 gp->pdev = pdev; 3042 dev->base_addr = (long) pdev; 3043 gp->dev = dev; 3044 3045 gp->msg_enable = DEFAULT_MSG; 3046 3047 spin_lock_init(&gp->lock); 3048 spin_lock_init(&gp->tx_lock); 3049 mutex_init(&gp->pm_mutex); 3050 3051 init_timer(&gp->link_timer); 3052 gp->link_timer.function = gem_link_timer; 3053 gp->link_timer.data = (unsigned long) gp; 3054 3055 INIT_WORK(&gp->reset_task, gem_reset_task); 3056 3057 gp->lstate = link_down; 3058 gp->timer_ticks = 0; 3059 netif_carrier_off(dev); 3060 3061 gp->regs = ioremap(gemreg_base, gemreg_len); 3062 if (gp->regs == 0UL) { 3063 printk(KERN_ERR PFX "Cannot map device registers, " 3064 "aborting.\n"); 3065 err = -EIO; 3066 goto err_out_free_res; 3067 } 3068 3069 /* On Apple, we want a reference to the Open Firmware device-tree 3070 * node. We use it for clock control. 3071 */ 3072#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) 3073 gp->of_node = pci_device_to_OF_node(pdev); 3074#endif 3075 3076 /* Only Apple version supports WOL afaik */ 3077 if (pdev->vendor == PCI_VENDOR_ID_APPLE) 3078 gp->has_wol = 1; 3079 3080 /* Make sure cell is enabled */ 3081 gem_get_cell(gp); 3082 3083 /* Make sure everything is stopped and in init state */ 3084 gem_reset(gp); 3085 3086 /* Fill up the mii_phy structure (even if we won't use it) */ 3087 gp->phy_mii.dev = dev; 3088 gp->phy_mii.mdio_read = _phy_read; 3089 gp->phy_mii.mdio_write = _phy_write; 3090#ifdef CONFIG_PPC_PMAC 3091 gp->phy_mii.platform_data = gp->of_node; 3092#endif 3093 /* By default, we start with autoneg */ 3094 gp->want_autoneg = 1; 3095 3096 /* Check fifo sizes, PHY type, etc... */ 3097 if (gem_check_invariants(gp)) { 3098 err = -ENODEV; 3099 goto err_out_iounmap; 3100 } 3101 3102 /* It is guaranteed that the returned buffer will be at least 3103 * PAGE_SIZE aligned. 3104 */ 3105 gp->init_block = (struct gem_init_block *) 3106 pci_alloc_consistent(pdev, sizeof(struct gem_init_block), 3107 &gp->gblock_dvma); 3108 if (!gp->init_block) { 3109 printk(KERN_ERR PFX "Cannot allocate init block, " 3110 "aborting.\n"); 3111 err = -ENOMEM; 3112 goto err_out_iounmap; 3113 } 3114 3115 if (gem_get_device_address(gp)) 3116 goto err_out_free_consistent; 3117 3118 dev->open = gem_open; 3119 dev->stop = gem_close; 3120 dev->hard_start_xmit = gem_start_xmit; 3121 dev->get_stats = gem_get_stats; 3122 dev->set_multicast_list = gem_set_multicast; 3123 dev->do_ioctl = gem_ioctl; 3124 dev->poll = gem_poll; 3125 dev->weight = 64; 3126 dev->ethtool_ops = &gem_ethtool_ops; 3127 dev->tx_timeout = gem_tx_timeout; 3128 dev->watchdog_timeo = 5 * HZ; 3129 dev->change_mtu = gem_change_mtu; 3130 dev->irq = pdev->irq; 3131 dev->dma = 0; 3132 dev->set_mac_address = gem_set_mac_address; 3133#ifdef CONFIG_NET_POLL_CONTROLLER 3134 dev->poll_controller = gem_poll_controller; 3135#endif 3136 3137 /* Set that now, in case PM kicks in now */ 3138 pci_set_drvdata(pdev, dev); 3139 3140 /* Detect & init PHY, start autoneg, we release the cell now 3141 * too, it will be managed by whoever needs it 3142 */ 3143 gem_init_phy(gp); 3144 3145 spin_lock_irq(&gp->lock); 3146 gem_put_cell(gp); 3147 spin_unlock_irq(&gp->lock); 3148 3149 /* Register with kernel */ 3150 if (register_netdev(dev)) { 3151 printk(KERN_ERR PFX "Cannot register net device, " 3152 "aborting.\n"); 3153 err = -ENOMEM; 3154 goto err_out_free_consistent; 3155 } 3156 3157 printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet ", 3158 dev->name); 3159 for (i = 0; i < 6; i++) 3160 printk("%2.2x%c", dev->dev_addr[i], 3161 i == 5 ? ' ' : ':'); 3162 printk("\n"); 3163 3164 if (gp->phy_type == phy_mii_mdio0 || 3165 gp->phy_type == phy_mii_mdio1) 3166 printk(KERN_INFO "%s: Found %s PHY\n", dev->name, 3167 gp->phy_mii.def ? gp->phy_mii.def->name : "no"); 3168 3169 /* GEM can do it all... */ 3170 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; 3171 if (pci_using_dac) 3172 dev->features |= NETIF_F_HIGHDMA; 3173 3174 return 0; 3175 3176err_out_free_consistent: 3177 gem_remove_one(pdev); 3178err_out_iounmap: 3179 gem_put_cell(gp); 3180 iounmap(gp->regs); 3181 3182err_out_free_res: 3183 pci_release_regions(pdev); 3184 3185err_out_free_netdev: 3186 free_netdev(dev); 3187err_disable_device: 3188 pci_disable_device(pdev); 3189 return err; 3190 3191} 3192 3193 3194static struct pci_driver gem_driver = { 3195 .name = GEM_MODULE_NAME, 3196 .id_table = gem_pci_tbl, 3197 .probe = gem_init_one, 3198 .remove = gem_remove_one, 3199#ifdef CONFIG_PM 3200 .suspend = gem_suspend, 3201 .resume = gem_resume, 3202#endif /* CONFIG_PM */ 3203}; 3204 3205static int __init gem_init(void) 3206{ 3207 return pci_register_driver(&gem_driver); 3208} 3209 3210static void __exit gem_cleanup(void) 3211{ 3212 pci_unregister_driver(&gem_driver); 3213} 3214 3215module_init(gem_init); 3216module_exit(gem_cleanup);