Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.21-rc5 3208 lines 81 kB view raw
1/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ 2 * sungem.c: Sun GEM ethernet driver. 3 * 4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com) 5 * 6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management 7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org) 8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. 9 * 10 * NAPI and NETPOLL support 11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) 12 * 13 * TODO: 14 * - Now that the driver was significantly simplified, I need to rework 15 * the locking. I'm sure we don't need _2_ spinlocks, and we probably 16 * can avoid taking most of them for so long period of time (and schedule 17 * instead). The main issues at this point are caused by the netdev layer 18 * though: 19 * 20 * gem_change_mtu() and gem_set_multicast() are called with a read_lock() 21 * help by net/core/dev.c, thus they can't schedule. That means they can't 22 * call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock 23 * where it could have been dropped. change_mtu especially would love also to 24 * be able to msleep instead of horrid locked delays when resetting the HW, 25 * but that read_lock() makes it impossible, unless I defer it's action to 26 * the reset task, which means it'll be asynchronous (won't take effect until 27 * the system schedules a bit). 28 * 29 * Also, it would probably be possible to also remove most of the long-life 30 * locking in open/resume code path (gem_reinit_chip) by beeing more careful 31 * about when we can start taking interrupts or get xmit() called... 32 */ 33 34#include <linux/module.h> 35#include <linux/kernel.h> 36#include <linux/types.h> 37#include <linux/fcntl.h> 38#include <linux/interrupt.h> 39#include <linux/ioport.h> 40#include <linux/in.h> 41#include <linux/slab.h> 42#include <linux/string.h> 43#include <linux/delay.h> 44#include <linux/init.h> 45#include <linux/errno.h> 46#include <linux/pci.h> 47#include <linux/dma-mapping.h> 48#include <linux/netdevice.h> 49#include <linux/etherdevice.h> 50#include <linux/skbuff.h> 51#include <linux/mii.h> 52#include <linux/ethtool.h> 53#include <linux/crc32.h> 54#include <linux/random.h> 55#include <linux/workqueue.h> 56#include <linux/if_vlan.h> 57#include <linux/bitops.h> 58#include <linux/mutex.h> 59#include <linux/mm.h> 60 61#include <asm/system.h> 62#include <asm/io.h> 63#include <asm/byteorder.h> 64#include <asm/uaccess.h> 65#include <asm/irq.h> 66 67#ifdef __sparc__ 68#include <asm/idprom.h> 69#include <asm/openprom.h> 70#include <asm/oplib.h> 71#include <asm/pbm.h> 72#endif 73 74#ifdef CONFIG_PPC_PMAC 75#include <asm/pci-bridge.h> 76#include <asm/prom.h> 77#include <asm/machdep.h> 78#include <asm/pmac_feature.h> 79#endif 80 81#include "sungem_phy.h" 82#include "sungem.h" 83 84/* Stripping FCS is causing problems, disabled for now */ 85#undef STRIP_FCS 86 87#define DEFAULT_MSG (NETIF_MSG_DRV | \ 88 NETIF_MSG_PROBE | \ 89 NETIF_MSG_LINK) 90 91#define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ 92 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ 93 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \ 94 SUPPORTED_Pause | SUPPORTED_Autoneg) 95 96#define DRV_NAME "sungem" 97#define DRV_VERSION "0.98" 98#define DRV_RELDATE "8/24/03" 99#define DRV_AUTHOR "David S. Miller (davem@redhat.com)" 100 101static char version[] __devinitdata = 102 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; 103 104MODULE_AUTHOR(DRV_AUTHOR); 105MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); 106MODULE_LICENSE("GPL"); 107 108#define GEM_MODULE_NAME "gem" 109#define PFX GEM_MODULE_NAME ": " 110 111static struct pci_device_id gem_pci_tbl[] = { 112 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, 113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 114 115 /* These models only differ from the original GEM in 116 * that their tx/rx fifos are of a different size and 117 * they only support 10/100 speeds. -DaveM 118 * 119 * Apple's GMAC does support gigabit on machines with 120 * the BCM54xx PHYs. -BenH 121 */ 122 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM, 123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 124 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC, 125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 126 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP, 127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 128 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2, 129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 130 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC, 131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 132 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, 133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 134 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC, 135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 136 {0, } 137}; 138 139MODULE_DEVICE_TABLE(pci, gem_pci_tbl); 140 141static u16 __phy_read(struct gem *gp, int phy_addr, int reg) 142{ 143 u32 cmd; 144 int limit = 10000; 145 146 cmd = (1 << 30); 147 cmd |= (2 << 28); 148 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; 149 cmd |= (reg << 18) & MIF_FRAME_REGAD; 150 cmd |= (MIF_FRAME_TAMSB); 151 writel(cmd, gp->regs + MIF_FRAME); 152 153 while (limit--) { 154 cmd = readl(gp->regs + MIF_FRAME); 155 if (cmd & MIF_FRAME_TALSB) 156 break; 157 158 udelay(10); 159 } 160 161 if (!limit) 162 cmd = 0xffff; 163 164 return cmd & MIF_FRAME_DATA; 165} 166 167static inline int _phy_read(struct net_device *dev, int mii_id, int reg) 168{ 169 struct gem *gp = dev->priv; 170 return __phy_read(gp, mii_id, reg); 171} 172 173static inline u16 phy_read(struct gem *gp, int reg) 174{ 175 return __phy_read(gp, gp->mii_phy_addr, reg); 176} 177 178static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) 179{ 180 u32 cmd; 181 int limit = 10000; 182 183 cmd = (1 << 30); 184 cmd |= (1 << 28); 185 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; 186 cmd |= (reg << 18) & MIF_FRAME_REGAD; 187 cmd |= (MIF_FRAME_TAMSB); 188 cmd |= (val & MIF_FRAME_DATA); 189 writel(cmd, gp->regs + MIF_FRAME); 190 191 while (limit--) { 192 cmd = readl(gp->regs + MIF_FRAME); 193 if (cmd & MIF_FRAME_TALSB) 194 break; 195 196 udelay(10); 197 } 198} 199 200static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val) 201{ 202 struct gem *gp = dev->priv; 203 __phy_write(gp, mii_id, reg, val & 0xffff); 204} 205 206static inline void phy_write(struct gem *gp, int reg, u16 val) 207{ 208 __phy_write(gp, gp->mii_phy_addr, reg, val); 209} 210 211static inline void gem_enable_ints(struct gem *gp) 212{ 213 /* Enable all interrupts but TXDONE */ 214 writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK); 215} 216 217static inline void gem_disable_ints(struct gem *gp) 218{ 219 /* Disable all interrupts, including TXDONE */ 220 writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); 221} 222 223static void gem_get_cell(struct gem *gp) 224{ 225 BUG_ON(gp->cell_enabled < 0); 226 gp->cell_enabled++; 227#ifdef CONFIG_PPC_PMAC 228 if (gp->cell_enabled == 1) { 229 mb(); 230 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1); 231 udelay(10); 232 } 233#endif /* CONFIG_PPC_PMAC */ 234} 235 236/* Turn off the chip's clock */ 237static void gem_put_cell(struct gem *gp) 238{ 239 BUG_ON(gp->cell_enabled <= 0); 240 gp->cell_enabled--; 241#ifdef CONFIG_PPC_PMAC 242 if (gp->cell_enabled == 0) { 243 mb(); 244 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0); 245 udelay(10); 246 } 247#endif /* CONFIG_PPC_PMAC */ 248} 249 250static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) 251{ 252 if (netif_msg_intr(gp)) 253 printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name); 254} 255 256static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 257{ 258 u32 pcs_istat = readl(gp->regs + PCS_ISTAT); 259 u32 pcs_miistat; 260 261 if (netif_msg_intr(gp)) 262 printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n", 263 gp->dev->name, pcs_istat); 264 265 if (!(pcs_istat & PCS_ISTAT_LSC)) { 266 printk(KERN_ERR "%s: PCS irq but no link status change???\n", 267 dev->name); 268 return 0; 269 } 270 271 /* The link status bit latches on zero, so you must 272 * read it twice in such a case to see a transition 273 * to the link being up. 274 */ 275 pcs_miistat = readl(gp->regs + PCS_MIISTAT); 276 if (!(pcs_miistat & PCS_MIISTAT_LS)) 277 pcs_miistat |= 278 (readl(gp->regs + PCS_MIISTAT) & 279 PCS_MIISTAT_LS); 280 281 if (pcs_miistat & PCS_MIISTAT_ANC) { 282 /* The remote-fault indication is only valid 283 * when autoneg has completed. 284 */ 285 if (pcs_miistat & PCS_MIISTAT_RF) 286 printk(KERN_INFO "%s: PCS AutoNEG complete, " 287 "RemoteFault\n", dev->name); 288 else 289 printk(KERN_INFO "%s: PCS AutoNEG complete.\n", 290 dev->name); 291 } 292 293 if (pcs_miistat & PCS_MIISTAT_LS) { 294 printk(KERN_INFO "%s: PCS link is now up.\n", 295 dev->name); 296 netif_carrier_on(gp->dev); 297 } else { 298 printk(KERN_INFO "%s: PCS link is now down.\n", 299 dev->name); 300 netif_carrier_off(gp->dev); 301 /* If this happens and the link timer is not running, 302 * reset so we re-negotiate. 303 */ 304 if (!timer_pending(&gp->link_timer)) 305 return 1; 306 } 307 308 return 0; 309} 310 311static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 312{ 313 u32 txmac_stat = readl(gp->regs + MAC_TXSTAT); 314 315 if (netif_msg_intr(gp)) 316 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", 317 gp->dev->name, txmac_stat); 318 319 /* Defer timer expiration is quite normal, 320 * don't even log the event. 321 */ 322 if ((txmac_stat & MAC_TXSTAT_DTE) && 323 !(txmac_stat & ~MAC_TXSTAT_DTE)) 324 return 0; 325 326 if (txmac_stat & MAC_TXSTAT_URUN) { 327 printk(KERN_ERR "%s: TX MAC xmit underrun.\n", 328 dev->name); 329 gp->net_stats.tx_fifo_errors++; 330 } 331 332 if (txmac_stat & MAC_TXSTAT_MPE) { 333 printk(KERN_ERR "%s: TX MAC max packet size error.\n", 334 dev->name); 335 gp->net_stats.tx_errors++; 336 } 337 338 /* The rest are all cases of one of the 16-bit TX 339 * counters expiring. 340 */ 341 if (txmac_stat & MAC_TXSTAT_NCE) 342 gp->net_stats.collisions += 0x10000; 343 344 if (txmac_stat & MAC_TXSTAT_ECE) { 345 gp->net_stats.tx_aborted_errors += 0x10000; 346 gp->net_stats.collisions += 0x10000; 347 } 348 349 if (txmac_stat & MAC_TXSTAT_LCE) { 350 gp->net_stats.tx_aborted_errors += 0x10000; 351 gp->net_stats.collisions += 0x10000; 352 } 353 354 /* We do not keep track of MAC_TXSTAT_FCE and 355 * MAC_TXSTAT_PCE events. 356 */ 357 return 0; 358} 359 360/* When we get a RX fifo overflow, the RX unit in GEM is probably hung 361 * so we do the following. 362 * 363 * If any part of the reset goes wrong, we return 1 and that causes the 364 * whole chip to be reset. 365 */ 366static int gem_rxmac_reset(struct gem *gp) 367{ 368 struct net_device *dev = gp->dev; 369 int limit, i; 370 u64 desc_dma; 371 u32 val; 372 373 /* First, reset & disable MAC RX. */ 374 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); 375 for (limit = 0; limit < 5000; limit++) { 376 if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD)) 377 break; 378 udelay(10); 379 } 380 if (limit == 5000) { 381 printk(KERN_ERR "%s: RX MAC will not reset, resetting whole " 382 "chip.\n", dev->name); 383 return 1; 384 } 385 386 writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB, 387 gp->regs + MAC_RXCFG); 388 for (limit = 0; limit < 5000; limit++) { 389 if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB)) 390 break; 391 udelay(10); 392 } 393 if (limit == 5000) { 394 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " 395 "chip.\n", dev->name); 396 return 1; 397 } 398 399 /* Second, disable RX DMA. */ 400 writel(0, gp->regs + RXDMA_CFG); 401 for (limit = 0; limit < 5000; limit++) { 402 if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE)) 403 break; 404 udelay(10); 405 } 406 if (limit == 5000) { 407 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " 408 "chip.\n", dev->name); 409 return 1; 410 } 411 412 udelay(5000); 413 414 /* Execute RX reset command. */ 415 writel(gp->swrst_base | GREG_SWRST_RXRST, 416 gp->regs + GREG_SWRST); 417 for (limit = 0; limit < 5000; limit++) { 418 if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST)) 419 break; 420 udelay(10); 421 } 422 if (limit == 5000) { 423 printk(KERN_ERR "%s: RX reset command will not execute, resetting " 424 "whole chip.\n", dev->name); 425 return 1; 426 } 427 428 /* Refresh the RX ring. */ 429 for (i = 0; i < RX_RING_SIZE; i++) { 430 struct gem_rxd *rxd = &gp->init_block->rxd[i]; 431 432 if (gp->rx_skbs[i] == NULL) { 433 printk(KERN_ERR "%s: Parts of RX ring empty, resetting " 434 "whole chip.\n", dev->name); 435 return 1; 436 } 437 438 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 439 } 440 gp->rx_new = gp->rx_old = 0; 441 442 /* Now we must reprogram the rest of RX unit. */ 443 desc_dma = (u64) gp->gblock_dvma; 444 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); 445 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 446 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 447 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 448 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 449 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 450 writel(val, gp->regs + RXDMA_CFG); 451 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 452 writel(((5 & RXDMA_BLANK_IPKTS) | 453 ((8 << 12) & RXDMA_BLANK_ITIME)), 454 gp->regs + RXDMA_BLANK); 455 else 456 writel(((5 & RXDMA_BLANK_IPKTS) | 457 ((4 << 12) & RXDMA_BLANK_ITIME)), 458 gp->regs + RXDMA_BLANK); 459 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); 460 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); 461 writel(val, gp->regs + RXDMA_PTHRESH); 462 val = readl(gp->regs + RXDMA_CFG); 463 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 464 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); 465 val = readl(gp->regs + MAC_RXCFG); 466 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 467 468 return 0; 469} 470 471static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 472{ 473 u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT); 474 int ret = 0; 475 476 if (netif_msg_intr(gp)) 477 printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n", 478 gp->dev->name, rxmac_stat); 479 480 if (rxmac_stat & MAC_RXSTAT_OFLW) { 481 u32 smac = readl(gp->regs + MAC_SMACHINE); 482 483 printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n", 484 dev->name, smac); 485 gp->net_stats.rx_over_errors++; 486 gp->net_stats.rx_fifo_errors++; 487 488 ret = gem_rxmac_reset(gp); 489 } 490 491 if (rxmac_stat & MAC_RXSTAT_ACE) 492 gp->net_stats.rx_frame_errors += 0x10000; 493 494 if (rxmac_stat & MAC_RXSTAT_CCE) 495 gp->net_stats.rx_crc_errors += 0x10000; 496 497 if (rxmac_stat & MAC_RXSTAT_LCE) 498 gp->net_stats.rx_length_errors += 0x10000; 499 500 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE 501 * events. 502 */ 503 return ret; 504} 505 506static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 507{ 508 u32 mac_cstat = readl(gp->regs + MAC_CSTAT); 509 510 if (netif_msg_intr(gp)) 511 printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n", 512 gp->dev->name, mac_cstat); 513 514 /* This interrupt is just for pause frame and pause 515 * tracking. It is useful for diagnostics and debug 516 * but probably by default we will mask these events. 517 */ 518 if (mac_cstat & MAC_CSTAT_PS) 519 gp->pause_entered++; 520 521 if (mac_cstat & MAC_CSTAT_PRCV) 522 gp->pause_last_time_recvd = (mac_cstat >> 16); 523 524 return 0; 525} 526 527static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 528{ 529 u32 mif_status = readl(gp->regs + MIF_STATUS); 530 u32 reg_val, changed_bits; 531 532 reg_val = (mif_status & MIF_STATUS_DATA) >> 16; 533 changed_bits = (mif_status & MIF_STATUS_STAT); 534 535 gem_handle_mif_event(gp, reg_val, changed_bits); 536 537 return 0; 538} 539 540static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 541{ 542 u32 pci_estat = readl(gp->regs + GREG_PCIESTAT); 543 544 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 545 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 546 printk(KERN_ERR "%s: PCI error [%04x] ", 547 dev->name, pci_estat); 548 549 if (pci_estat & GREG_PCIESTAT_BADACK) 550 printk("<No ACK64# during ABS64 cycle> "); 551 if (pci_estat & GREG_PCIESTAT_DTRTO) 552 printk("<Delayed transaction timeout> "); 553 if (pci_estat & GREG_PCIESTAT_OTHER) 554 printk("<other>"); 555 printk("\n"); 556 } else { 557 pci_estat |= GREG_PCIESTAT_OTHER; 558 printk(KERN_ERR "%s: PCI error\n", dev->name); 559 } 560 561 if (pci_estat & GREG_PCIESTAT_OTHER) { 562 u16 pci_cfg_stat; 563 564 /* Interrogate PCI config space for the 565 * true cause. 566 */ 567 pci_read_config_word(gp->pdev, PCI_STATUS, 568 &pci_cfg_stat); 569 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", 570 dev->name, pci_cfg_stat); 571 if (pci_cfg_stat & PCI_STATUS_PARITY) 572 printk(KERN_ERR "%s: PCI parity error detected.\n", 573 dev->name); 574 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) 575 printk(KERN_ERR "%s: PCI target abort.\n", 576 dev->name); 577 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) 578 printk(KERN_ERR "%s: PCI master acks target abort.\n", 579 dev->name); 580 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) 581 printk(KERN_ERR "%s: PCI master abort.\n", 582 dev->name); 583 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) 584 printk(KERN_ERR "%s: PCI system error SERR#.\n", 585 dev->name); 586 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) 587 printk(KERN_ERR "%s: PCI parity error.\n", 588 dev->name); 589 590 /* Write the error bits back to clear them. */ 591 pci_cfg_stat &= (PCI_STATUS_PARITY | 592 PCI_STATUS_SIG_TARGET_ABORT | 593 PCI_STATUS_REC_TARGET_ABORT | 594 PCI_STATUS_REC_MASTER_ABORT | 595 PCI_STATUS_SIG_SYSTEM_ERROR | 596 PCI_STATUS_DETECTED_PARITY); 597 pci_write_config_word(gp->pdev, 598 PCI_STATUS, pci_cfg_stat); 599 } 600 601 /* For all PCI errors, we should reset the chip. */ 602 return 1; 603} 604 605/* All non-normal interrupt conditions get serviced here. 606 * Returns non-zero if we should just exit the interrupt 607 * handler right now (ie. if we reset the card which invalidates 608 * all of the other original irq status bits). 609 */ 610static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status) 611{ 612 if (gem_status & GREG_STAT_RXNOBUF) { 613 /* Frame arrived, no free RX buffers available. */ 614 if (netif_msg_rx_err(gp)) 615 printk(KERN_DEBUG "%s: no buffer for rx frame\n", 616 gp->dev->name); 617 gp->net_stats.rx_dropped++; 618 } 619 620 if (gem_status & GREG_STAT_RXTAGERR) { 621 /* corrupt RX tag framing */ 622 if (netif_msg_rx_err(gp)) 623 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 624 gp->dev->name); 625 gp->net_stats.rx_errors++; 626 627 goto do_reset; 628 } 629 630 if (gem_status & GREG_STAT_PCS) { 631 if (gem_pcs_interrupt(dev, gp, gem_status)) 632 goto do_reset; 633 } 634 635 if (gem_status & GREG_STAT_TXMAC) { 636 if (gem_txmac_interrupt(dev, gp, gem_status)) 637 goto do_reset; 638 } 639 640 if (gem_status & GREG_STAT_RXMAC) { 641 if (gem_rxmac_interrupt(dev, gp, gem_status)) 642 goto do_reset; 643 } 644 645 if (gem_status & GREG_STAT_MAC) { 646 if (gem_mac_interrupt(dev, gp, gem_status)) 647 goto do_reset; 648 } 649 650 if (gem_status & GREG_STAT_MIF) { 651 if (gem_mif_interrupt(dev, gp, gem_status)) 652 goto do_reset; 653 } 654 655 if (gem_status & GREG_STAT_PCIERR) { 656 if (gem_pci_interrupt(dev, gp, gem_status)) 657 goto do_reset; 658 } 659 660 return 0; 661 662do_reset: 663 gp->reset_task_pending = 1; 664 schedule_work(&gp->reset_task); 665 666 return 1; 667} 668 669static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) 670{ 671 int entry, limit; 672 673 if (netif_msg_intr(gp)) 674 printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n", 675 gp->dev->name, gem_status); 676 677 entry = gp->tx_old; 678 limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); 679 while (entry != limit) { 680 struct sk_buff *skb; 681 struct gem_txd *txd; 682 dma_addr_t dma_addr; 683 u32 dma_len; 684 int frag; 685 686 if (netif_msg_tx_done(gp)) 687 printk(KERN_DEBUG "%s: tx done, slot %d\n", 688 gp->dev->name, entry); 689 skb = gp->tx_skbs[entry]; 690 if (skb_shinfo(skb)->nr_frags) { 691 int last = entry + skb_shinfo(skb)->nr_frags; 692 int walk = entry; 693 int incomplete = 0; 694 695 last &= (TX_RING_SIZE - 1); 696 for (;;) { 697 walk = NEXT_TX(walk); 698 if (walk == limit) 699 incomplete = 1; 700 if (walk == last) 701 break; 702 } 703 if (incomplete) 704 break; 705 } 706 gp->tx_skbs[entry] = NULL; 707 gp->net_stats.tx_bytes += skb->len; 708 709 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 710 txd = &gp->init_block->txd[entry]; 711 712 dma_addr = le64_to_cpu(txd->buffer); 713 dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; 714 715 pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); 716 entry = NEXT_TX(entry); 717 } 718 719 gp->net_stats.tx_packets++; 720 dev_kfree_skb_irq(skb); 721 } 722 gp->tx_old = entry; 723 724 if (netif_queue_stopped(dev) && 725 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) 726 netif_wake_queue(dev); 727} 728 729static __inline__ void gem_post_rxds(struct gem *gp, int limit) 730{ 731 int cluster_start, curr, count, kick; 732 733 cluster_start = curr = (gp->rx_new & ~(4 - 1)); 734 count = 0; 735 kick = -1; 736 wmb(); 737 while (curr != limit) { 738 curr = NEXT_RX(curr); 739 if (++count == 4) { 740 struct gem_rxd *rxd = 741 &gp->init_block->rxd[cluster_start]; 742 for (;;) { 743 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 744 rxd++; 745 cluster_start = NEXT_RX(cluster_start); 746 if (cluster_start == curr) 747 break; 748 } 749 kick = curr; 750 count = 0; 751 } 752 } 753 if (kick >= 0) { 754 mb(); 755 writel(kick, gp->regs + RXDMA_KICK); 756 } 757} 758 759static int gem_rx(struct gem *gp, int work_to_do) 760{ 761 int entry, drops, work_done = 0; 762 u32 done; 763 764 if (netif_msg_rx_status(gp)) 765 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", 766 gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); 767 768 entry = gp->rx_new; 769 drops = 0; 770 done = readl(gp->regs + RXDMA_DONE); 771 for (;;) { 772 struct gem_rxd *rxd = &gp->init_block->rxd[entry]; 773 struct sk_buff *skb; 774 u64 status = cpu_to_le64(rxd->status_word); 775 dma_addr_t dma_addr; 776 int len; 777 778 if ((status & RXDCTRL_OWN) != 0) 779 break; 780 781 if (work_done >= RX_RING_SIZE || work_done >= work_to_do) 782 break; 783 784 /* When writing back RX descriptor, GEM writes status 785 * then buffer address, possibly in seperate transactions. 786 * If we don't wait for the chip to write both, we could 787 * post a new buffer to this descriptor then have GEM spam 788 * on the buffer address. We sync on the RX completion 789 * register to prevent this from happening. 790 */ 791 if (entry == done) { 792 done = readl(gp->regs + RXDMA_DONE); 793 if (entry == done) 794 break; 795 } 796 797 /* We can now account for the work we're about to do */ 798 work_done++; 799 800 skb = gp->rx_skbs[entry]; 801 802 len = (status & RXDCTRL_BUFSZ) >> 16; 803 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { 804 gp->net_stats.rx_errors++; 805 if (len < ETH_ZLEN) 806 gp->net_stats.rx_length_errors++; 807 if (len & RXDCTRL_BAD) 808 gp->net_stats.rx_crc_errors++; 809 810 /* We'll just return it to GEM. */ 811 drop_it: 812 gp->net_stats.rx_dropped++; 813 goto next; 814 } 815 816 dma_addr = cpu_to_le64(rxd->buffer); 817 if (len > RX_COPY_THRESHOLD) { 818 struct sk_buff *new_skb; 819 820 new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 821 if (new_skb == NULL) { 822 drops++; 823 goto drop_it; 824 } 825 pci_unmap_page(gp->pdev, dma_addr, 826 RX_BUF_ALLOC_SIZE(gp), 827 PCI_DMA_FROMDEVICE); 828 gp->rx_skbs[entry] = new_skb; 829 new_skb->dev = gp->dev; 830 skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); 831 rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, 832 virt_to_page(new_skb->data), 833 offset_in_page(new_skb->data), 834 RX_BUF_ALLOC_SIZE(gp), 835 PCI_DMA_FROMDEVICE)); 836 skb_reserve(new_skb, RX_OFFSET); 837 838 /* Trim the original skb for the netif. */ 839 skb_trim(skb, len); 840 } else { 841 struct sk_buff *copy_skb = dev_alloc_skb(len + 2); 842 843 if (copy_skb == NULL) { 844 drops++; 845 goto drop_it; 846 } 847 848 copy_skb->dev = gp->dev; 849 skb_reserve(copy_skb, 2); 850 skb_put(copy_skb, len); 851 pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 852 memcpy(copy_skb->data, skb->data, len); 853 pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 854 855 /* We'll reuse the original ring buffer. */ 856 skb = copy_skb; 857 } 858 859 skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff); 860 skb->ip_summed = CHECKSUM_COMPLETE; 861 skb->protocol = eth_type_trans(skb, gp->dev); 862 863 netif_receive_skb(skb); 864 865 gp->net_stats.rx_packets++; 866 gp->net_stats.rx_bytes += len; 867 gp->dev->last_rx = jiffies; 868 869 next: 870 entry = NEXT_RX(entry); 871 } 872 873 gem_post_rxds(gp, entry); 874 875 gp->rx_new = entry; 876 877 if (drops) 878 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", 879 gp->dev->name); 880 881 return work_done; 882} 883 884static int gem_poll(struct net_device *dev, int *budget) 885{ 886 struct gem *gp = dev->priv; 887 unsigned long flags; 888 889 /* 890 * NAPI locking nightmare: See comment at head of driver 891 */ 892 spin_lock_irqsave(&gp->lock, flags); 893 894 do { 895 int work_to_do, work_done; 896 897 /* Handle anomalies */ 898 if (gp->status & GREG_STAT_ABNORMAL) { 899 if (gem_abnormal_irq(dev, gp, gp->status)) 900 break; 901 } 902 903 /* Run TX completion thread */ 904 spin_lock(&gp->tx_lock); 905 gem_tx(dev, gp, gp->status); 906 spin_unlock(&gp->tx_lock); 907 908 spin_unlock_irqrestore(&gp->lock, flags); 909 910 /* Run RX thread. We don't use any locking here, 911 * code willing to do bad things - like cleaning the 912 * rx ring - must call netif_poll_disable(), which 913 * schedule_timeout()'s if polling is already disabled. 914 */ 915 work_to_do = min(*budget, dev->quota); 916 917 work_done = gem_rx(gp, work_to_do); 918 919 *budget -= work_done; 920 dev->quota -= work_done; 921 922 if (work_done >= work_to_do) 923 return 1; 924 925 spin_lock_irqsave(&gp->lock, flags); 926 927 gp->status = readl(gp->regs + GREG_STAT); 928 } while (gp->status & GREG_STAT_NAPI); 929 930 __netif_rx_complete(dev); 931 gem_enable_ints(gp); 932 933 spin_unlock_irqrestore(&gp->lock, flags); 934 return 0; 935} 936 937static irqreturn_t gem_interrupt(int irq, void *dev_id) 938{ 939 struct net_device *dev = dev_id; 940 struct gem *gp = dev->priv; 941 unsigned long flags; 942 943 /* Swallow interrupts when shutting the chip down, though 944 * that shouldn't happen, we should have done free_irq() at 945 * this point... 946 */ 947 if (!gp->running) 948 return IRQ_HANDLED; 949 950 spin_lock_irqsave(&gp->lock, flags); 951 952 if (netif_rx_schedule_prep(dev)) { 953 u32 gem_status = readl(gp->regs + GREG_STAT); 954 955 if (gem_status == 0) { 956 netif_poll_enable(dev); 957 spin_unlock_irqrestore(&gp->lock, flags); 958 return IRQ_NONE; 959 } 960 gp->status = gem_status; 961 gem_disable_ints(gp); 962 __netif_rx_schedule(dev); 963 } 964 965 spin_unlock_irqrestore(&gp->lock, flags); 966 967 /* If polling was disabled at the time we received that 968 * interrupt, we may return IRQ_HANDLED here while we 969 * should return IRQ_NONE. No big deal... 970 */ 971 return IRQ_HANDLED; 972} 973 974#ifdef CONFIG_NET_POLL_CONTROLLER 975static void gem_poll_controller(struct net_device *dev) 976{ 977 /* gem_interrupt is safe to reentrance so no need 978 * to disable_irq here. 979 */ 980 gem_interrupt(dev->irq, dev); 981} 982#endif 983 984static void gem_tx_timeout(struct net_device *dev) 985{ 986 struct gem *gp = dev->priv; 987 988 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 989 if (!gp->running) { 990 printk("%s: hrm.. hw not running !\n", dev->name); 991 return; 992 } 993 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n", 994 dev->name, 995 readl(gp->regs + TXDMA_CFG), 996 readl(gp->regs + MAC_TXSTAT), 997 readl(gp->regs + MAC_TXCFG)); 998 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", 999 dev->name, 1000 readl(gp->regs + RXDMA_CFG), 1001 readl(gp->regs + MAC_RXSTAT), 1002 readl(gp->regs + MAC_RXCFG)); 1003 1004 spin_lock_irq(&gp->lock); 1005 spin_lock(&gp->tx_lock); 1006 1007 gp->reset_task_pending = 1; 1008 schedule_work(&gp->reset_task); 1009 1010 spin_unlock(&gp->tx_lock); 1011 spin_unlock_irq(&gp->lock); 1012} 1013 1014static __inline__ int gem_intme(int entry) 1015{ 1016 /* Algorithm: IRQ every 1/2 of descriptors. */ 1017 if (!(entry & ((TX_RING_SIZE>>1)-1))) 1018 return 1; 1019 1020 return 0; 1021} 1022 1023static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) 1024{ 1025 struct gem *gp = dev->priv; 1026 int entry; 1027 u64 ctrl; 1028 unsigned long flags; 1029 1030 ctrl = 0; 1031 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1032 u64 csum_start_off, csum_stuff_off; 1033 1034 csum_start_off = (u64) (skb->h.raw - skb->data); 1035 csum_stuff_off = csum_start_off + skb->csum_offset; 1036 1037 ctrl = (TXDCTRL_CENAB | 1038 (csum_start_off << 15) | 1039 (csum_stuff_off << 21)); 1040 } 1041 1042 local_irq_save(flags); 1043 if (!spin_trylock(&gp->tx_lock)) { 1044 /* Tell upper layer to requeue */ 1045 local_irq_restore(flags); 1046 return NETDEV_TX_LOCKED; 1047 } 1048 /* We raced with gem_do_stop() */ 1049 if (!gp->running) { 1050 spin_unlock_irqrestore(&gp->tx_lock, flags); 1051 return NETDEV_TX_BUSY; 1052 } 1053 1054 /* This is a hard error, log it. */ 1055 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { 1056 netif_stop_queue(dev); 1057 spin_unlock_irqrestore(&gp->tx_lock, flags); 1058 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", 1059 dev->name); 1060 return NETDEV_TX_BUSY; 1061 } 1062 1063 entry = gp->tx_new; 1064 gp->tx_skbs[entry] = skb; 1065 1066 if (skb_shinfo(skb)->nr_frags == 0) { 1067 struct gem_txd *txd = &gp->init_block->txd[entry]; 1068 dma_addr_t mapping; 1069 u32 len; 1070 1071 len = skb->len; 1072 mapping = pci_map_page(gp->pdev, 1073 virt_to_page(skb->data), 1074 offset_in_page(skb->data), 1075 len, PCI_DMA_TODEVICE); 1076 ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; 1077 if (gem_intme(entry)) 1078 ctrl |= TXDCTRL_INTME; 1079 txd->buffer = cpu_to_le64(mapping); 1080 wmb(); 1081 txd->control_word = cpu_to_le64(ctrl); 1082 entry = NEXT_TX(entry); 1083 } else { 1084 struct gem_txd *txd; 1085 u32 first_len; 1086 u64 intme; 1087 dma_addr_t first_mapping; 1088 int frag, first_entry = entry; 1089 1090 intme = 0; 1091 if (gem_intme(entry)) 1092 intme |= TXDCTRL_INTME; 1093 1094 /* We must give this initial chunk to the device last. 1095 * Otherwise we could race with the device. 1096 */ 1097 first_len = skb_headlen(skb); 1098 first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), 1099 offset_in_page(skb->data), 1100 first_len, PCI_DMA_TODEVICE); 1101 entry = NEXT_TX(entry); 1102 1103 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 1104 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 1105 u32 len; 1106 dma_addr_t mapping; 1107 u64 this_ctrl; 1108 1109 len = this_frag->size; 1110 mapping = pci_map_page(gp->pdev, 1111 this_frag->page, 1112 this_frag->page_offset, 1113 len, PCI_DMA_TODEVICE); 1114 this_ctrl = ctrl; 1115 if (frag == skb_shinfo(skb)->nr_frags - 1) 1116 this_ctrl |= TXDCTRL_EOF; 1117 1118 txd = &gp->init_block->txd[entry]; 1119 txd->buffer = cpu_to_le64(mapping); 1120 wmb(); 1121 txd->control_word = cpu_to_le64(this_ctrl | len); 1122 1123 if (gem_intme(entry)) 1124 intme |= TXDCTRL_INTME; 1125 1126 entry = NEXT_TX(entry); 1127 } 1128 txd = &gp->init_block->txd[first_entry]; 1129 txd->buffer = cpu_to_le64(first_mapping); 1130 wmb(); 1131 txd->control_word = 1132 cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); 1133 } 1134 1135 gp->tx_new = entry; 1136 if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1)) 1137 netif_stop_queue(dev); 1138 1139 if (netif_msg_tx_queued(gp)) 1140 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", 1141 dev->name, entry, skb->len); 1142 mb(); 1143 writel(gp->tx_new, gp->regs + TXDMA_KICK); 1144 spin_unlock_irqrestore(&gp->tx_lock, flags); 1145 1146 dev->trans_start = jiffies; 1147 1148 return NETDEV_TX_OK; 1149} 1150 1151#define STOP_TRIES 32 1152 1153/* Must be invoked under gp->lock and gp->tx_lock. */ 1154static void gem_reset(struct gem *gp) 1155{ 1156 int limit; 1157 u32 val; 1158 1159 /* Make sure we won't get any more interrupts */ 1160 writel(0xffffffff, gp->regs + GREG_IMASK); 1161 1162 /* Reset the chip */ 1163 writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST, 1164 gp->regs + GREG_SWRST); 1165 1166 limit = STOP_TRIES; 1167 1168 do { 1169 udelay(20); 1170 val = readl(gp->regs + GREG_SWRST); 1171 if (limit-- <= 0) 1172 break; 1173 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); 1174 1175 if (limit <= 0) 1176 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); 1177} 1178 1179/* Must be invoked under gp->lock and gp->tx_lock. */ 1180static void gem_start_dma(struct gem *gp) 1181{ 1182 u32 val; 1183 1184 /* We are ready to rock, turn everything on. */ 1185 val = readl(gp->regs + TXDMA_CFG); 1186 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); 1187 val = readl(gp->regs + RXDMA_CFG); 1188 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 1189 val = readl(gp->regs + MAC_TXCFG); 1190 writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); 1191 val = readl(gp->regs + MAC_RXCFG); 1192 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 1193 1194 (void) readl(gp->regs + MAC_RXCFG); 1195 udelay(100); 1196 1197 gem_enable_ints(gp); 1198 1199 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 1200} 1201 1202/* Must be invoked under gp->lock and gp->tx_lock. DMA won't be 1203 * actually stopped before about 4ms tho ... 1204 */ 1205static void gem_stop_dma(struct gem *gp) 1206{ 1207 u32 val; 1208 1209 /* We are done rocking, turn everything off. */ 1210 val = readl(gp->regs + TXDMA_CFG); 1211 writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); 1212 val = readl(gp->regs + RXDMA_CFG); 1213 writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 1214 val = readl(gp->regs + MAC_TXCFG); 1215 writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); 1216 val = readl(gp->regs + MAC_RXCFG); 1217 writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 1218 1219 (void) readl(gp->regs + MAC_RXCFG); 1220 1221 /* Need to wait a bit ... done by the caller */ 1222} 1223 1224 1225/* Must be invoked under gp->lock and gp->tx_lock. */ 1226// XXX dbl check what that function should do when called on PCS PHY 1227static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) 1228{ 1229 u32 advertise, features; 1230 int autoneg; 1231 int speed; 1232 int duplex; 1233 1234 if (gp->phy_type != phy_mii_mdio0 && 1235 gp->phy_type != phy_mii_mdio1) 1236 goto non_mii; 1237 1238 /* Setup advertise */ 1239 if (found_mii_phy(gp)) 1240 features = gp->phy_mii.def->features; 1241 else 1242 features = 0; 1243 1244 advertise = features & ADVERTISE_MASK; 1245 if (gp->phy_mii.advertising != 0) 1246 advertise &= gp->phy_mii.advertising; 1247 1248 autoneg = gp->want_autoneg; 1249 speed = gp->phy_mii.speed; 1250 duplex = gp->phy_mii.duplex; 1251 1252 /* Setup link parameters */ 1253 if (!ep) 1254 goto start_aneg; 1255 if (ep->autoneg == AUTONEG_ENABLE) { 1256 advertise = ep->advertising; 1257 autoneg = 1; 1258 } else { 1259 autoneg = 0; 1260 speed = ep->speed; 1261 duplex = ep->duplex; 1262 } 1263 1264start_aneg: 1265 /* Sanitize settings based on PHY capabilities */ 1266 if ((features & SUPPORTED_Autoneg) == 0) 1267 autoneg = 0; 1268 if (speed == SPEED_1000 && 1269 !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full))) 1270 speed = SPEED_100; 1271 if (speed == SPEED_100 && 1272 !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full))) 1273 speed = SPEED_10; 1274 if (duplex == DUPLEX_FULL && 1275 !(features & (SUPPORTED_1000baseT_Full | 1276 SUPPORTED_100baseT_Full | 1277 SUPPORTED_10baseT_Full))) 1278 duplex = DUPLEX_HALF; 1279 if (speed == 0) 1280 speed = SPEED_10; 1281 1282 /* If we are asleep, we don't try to actually setup the PHY, we 1283 * just store the settings 1284 */ 1285 if (gp->asleep) { 1286 gp->phy_mii.autoneg = gp->want_autoneg = autoneg; 1287 gp->phy_mii.speed = speed; 1288 gp->phy_mii.duplex = duplex; 1289 return; 1290 } 1291 1292 /* Configure PHY & start aneg */ 1293 gp->want_autoneg = autoneg; 1294 if (autoneg) { 1295 if (found_mii_phy(gp)) 1296 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise); 1297 gp->lstate = link_aneg; 1298 } else { 1299 if (found_mii_phy(gp)) 1300 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex); 1301 gp->lstate = link_force_ok; 1302 } 1303 1304non_mii: 1305 gp->timer_ticks = 0; 1306 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 1307} 1308 1309/* A link-up condition has occurred, initialize and enable the 1310 * rest of the chip. 1311 * 1312 * Must be invoked under gp->lock and gp->tx_lock. 1313 */ 1314static int gem_set_link_modes(struct gem *gp) 1315{ 1316 u32 val; 1317 int full_duplex, speed, pause; 1318 1319 full_duplex = 0; 1320 speed = SPEED_10; 1321 pause = 0; 1322 1323 if (found_mii_phy(gp)) { 1324 if (gp->phy_mii.def->ops->read_link(&gp->phy_mii)) 1325 return 1; 1326 full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL); 1327 speed = gp->phy_mii.speed; 1328 pause = gp->phy_mii.pause; 1329 } else if (gp->phy_type == phy_serialink || 1330 gp->phy_type == phy_serdes) { 1331 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1332 1333 if (pcs_lpa & PCS_MIIADV_FD) 1334 full_duplex = 1; 1335 speed = SPEED_1000; 1336 } 1337 1338 if (netif_msg_link(gp)) 1339 printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n", 1340 gp->dev->name, speed, (full_duplex ? "full" : "half")); 1341 1342 if (!gp->running) 1343 return 0; 1344 1345 val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); 1346 if (full_duplex) { 1347 val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); 1348 } else { 1349 /* MAC_TXCFG_NBO must be zero. */ 1350 } 1351 writel(val, gp->regs + MAC_TXCFG); 1352 1353 val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); 1354 if (!full_duplex && 1355 (gp->phy_type == phy_mii_mdio0 || 1356 gp->phy_type == phy_mii_mdio1)) { 1357 val |= MAC_XIFCFG_DISE; 1358 } else if (full_duplex) { 1359 val |= MAC_XIFCFG_FLED; 1360 } 1361 1362 if (speed == SPEED_1000) 1363 val |= (MAC_XIFCFG_GMII); 1364 1365 writel(val, gp->regs + MAC_XIFCFG); 1366 1367 /* If gigabit and half-duplex, enable carrier extension 1368 * mode. Else, disable it. 1369 */ 1370 if (speed == SPEED_1000 && !full_duplex) { 1371 val = readl(gp->regs + MAC_TXCFG); 1372 writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); 1373 1374 val = readl(gp->regs + MAC_RXCFG); 1375 writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); 1376 } else { 1377 val = readl(gp->regs + MAC_TXCFG); 1378 writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); 1379 1380 val = readl(gp->regs + MAC_RXCFG); 1381 writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); 1382 } 1383 1384 if (gp->phy_type == phy_serialink || 1385 gp->phy_type == phy_serdes) { 1386 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1387 1388 if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) 1389 pause = 1; 1390 } 1391 1392 if (netif_msg_link(gp)) { 1393 if (pause) { 1394 printk(KERN_INFO "%s: Pause is enabled " 1395 "(rxfifo: %d off: %d on: %d)\n", 1396 gp->dev->name, 1397 gp->rx_fifo_sz, 1398 gp->rx_pause_off, 1399 gp->rx_pause_on); 1400 } else { 1401 printk(KERN_INFO "%s: Pause is disabled\n", 1402 gp->dev->name); 1403 } 1404 } 1405 1406 if (!full_duplex) 1407 writel(512, gp->regs + MAC_STIME); 1408 else 1409 writel(64, gp->regs + MAC_STIME); 1410 val = readl(gp->regs + MAC_MCCFG); 1411 if (pause) 1412 val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); 1413 else 1414 val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); 1415 writel(val, gp->regs + MAC_MCCFG); 1416 1417 gem_start_dma(gp); 1418 1419 return 0; 1420} 1421 1422/* Must be invoked under gp->lock and gp->tx_lock. */ 1423static int gem_mdio_link_not_up(struct gem *gp) 1424{ 1425 switch (gp->lstate) { 1426 case link_force_ret: 1427 if (netif_msg_link(gp)) 1428 printk(KERN_INFO "%s: Autoneg failed again, keeping" 1429 " forced mode\n", gp->dev->name); 1430 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, 1431 gp->last_forced_speed, DUPLEX_HALF); 1432 gp->timer_ticks = 5; 1433 gp->lstate = link_force_ok; 1434 return 0; 1435 case link_aneg: 1436 /* We try forced modes after a failed aneg only on PHYs that don't 1437 * have "magic_aneg" bit set, which means they internally do the 1438 * while forced-mode thingy. On these, we just restart aneg 1439 */ 1440 if (gp->phy_mii.def->magic_aneg) 1441 return 1; 1442 if (netif_msg_link(gp)) 1443 printk(KERN_INFO "%s: switching to forced 100bt\n", 1444 gp->dev->name); 1445 /* Try forced modes. */ 1446 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, 1447 DUPLEX_HALF); 1448 gp->timer_ticks = 5; 1449 gp->lstate = link_force_try; 1450 return 0; 1451 case link_force_try: 1452 /* Downgrade from 100 to 10 Mbps if necessary. 1453 * If already at 10Mbps, warn user about the 1454 * situation every 10 ticks. 1455 */ 1456 if (gp->phy_mii.speed == SPEED_100) { 1457 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, 1458 DUPLEX_HALF); 1459 gp->timer_ticks = 5; 1460 if (netif_msg_link(gp)) 1461 printk(KERN_INFO "%s: switching to forced 10bt\n", 1462 gp->dev->name); 1463 return 0; 1464 } else 1465 return 1; 1466 default: 1467 return 0; 1468 } 1469} 1470 1471static void gem_link_timer(unsigned long data) 1472{ 1473 struct gem *gp = (struct gem *) data; 1474 int restart_aneg = 0; 1475 1476 if (gp->asleep) 1477 return; 1478 1479 spin_lock_irq(&gp->lock); 1480 spin_lock(&gp->tx_lock); 1481 gem_get_cell(gp); 1482 1483 /* If the reset task is still pending, we just 1484 * reschedule the link timer 1485 */ 1486 if (gp->reset_task_pending) 1487 goto restart; 1488 1489 if (gp->phy_type == phy_serialink || 1490 gp->phy_type == phy_serdes) { 1491 u32 val = readl(gp->regs + PCS_MIISTAT); 1492 1493 if (!(val & PCS_MIISTAT_LS)) 1494 val = readl(gp->regs + PCS_MIISTAT); 1495 1496 if ((val & PCS_MIISTAT_LS) != 0) { 1497 gp->lstate = link_up; 1498 netif_carrier_on(gp->dev); 1499 (void)gem_set_link_modes(gp); 1500 } 1501 goto restart; 1502 } 1503 if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) { 1504 /* Ok, here we got a link. If we had it due to a forced 1505 * fallback, and we were configured for autoneg, we do 1506 * retry a short autoneg pass. If you know your hub is 1507 * broken, use ethtool ;) 1508 */ 1509 if (gp->lstate == link_force_try && gp->want_autoneg) { 1510 gp->lstate = link_force_ret; 1511 gp->last_forced_speed = gp->phy_mii.speed; 1512 gp->timer_ticks = 5; 1513 if (netif_msg_link(gp)) 1514 printk(KERN_INFO "%s: Got link after fallback, retrying" 1515 " autoneg once...\n", gp->dev->name); 1516 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); 1517 } else if (gp->lstate != link_up) { 1518 gp->lstate = link_up; 1519 netif_carrier_on(gp->dev); 1520 if (gem_set_link_modes(gp)) 1521 restart_aneg = 1; 1522 } 1523 } else { 1524 /* If the link was previously up, we restart the 1525 * whole process 1526 */ 1527 if (gp->lstate == link_up) { 1528 gp->lstate = link_down; 1529 if (netif_msg_link(gp)) 1530 printk(KERN_INFO "%s: Link down\n", 1531 gp->dev->name); 1532 netif_carrier_off(gp->dev); 1533 gp->reset_task_pending = 1; 1534 schedule_work(&gp->reset_task); 1535 restart_aneg = 1; 1536 } else if (++gp->timer_ticks > 10) { 1537 if (found_mii_phy(gp)) 1538 restart_aneg = gem_mdio_link_not_up(gp); 1539 else 1540 restart_aneg = 1; 1541 } 1542 } 1543 if (restart_aneg) { 1544 gem_begin_auto_negotiation(gp, NULL); 1545 goto out_unlock; 1546 } 1547restart: 1548 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 1549out_unlock: 1550 gem_put_cell(gp); 1551 spin_unlock(&gp->tx_lock); 1552 spin_unlock_irq(&gp->lock); 1553} 1554 1555/* Must be invoked under gp->lock and gp->tx_lock. */ 1556static void gem_clean_rings(struct gem *gp) 1557{ 1558 struct gem_init_block *gb = gp->init_block; 1559 struct sk_buff *skb; 1560 int i; 1561 dma_addr_t dma_addr; 1562 1563 for (i = 0; i < RX_RING_SIZE; i++) { 1564 struct gem_rxd *rxd; 1565 1566 rxd = &gb->rxd[i]; 1567 if (gp->rx_skbs[i] != NULL) { 1568 skb = gp->rx_skbs[i]; 1569 dma_addr = le64_to_cpu(rxd->buffer); 1570 pci_unmap_page(gp->pdev, dma_addr, 1571 RX_BUF_ALLOC_SIZE(gp), 1572 PCI_DMA_FROMDEVICE); 1573 dev_kfree_skb_any(skb); 1574 gp->rx_skbs[i] = NULL; 1575 } 1576 rxd->status_word = 0; 1577 wmb(); 1578 rxd->buffer = 0; 1579 } 1580 1581 for (i = 0; i < TX_RING_SIZE; i++) { 1582 if (gp->tx_skbs[i] != NULL) { 1583 struct gem_txd *txd; 1584 int frag; 1585 1586 skb = gp->tx_skbs[i]; 1587 gp->tx_skbs[i] = NULL; 1588 1589 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 1590 int ent = i & (TX_RING_SIZE - 1); 1591 1592 txd = &gb->txd[ent]; 1593 dma_addr = le64_to_cpu(txd->buffer); 1594 pci_unmap_page(gp->pdev, dma_addr, 1595 le64_to_cpu(txd->control_word) & 1596 TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); 1597 1598 if (frag != skb_shinfo(skb)->nr_frags) 1599 i++; 1600 } 1601 dev_kfree_skb_any(skb); 1602 } 1603 } 1604} 1605 1606/* Must be invoked under gp->lock and gp->tx_lock. */ 1607static void gem_init_rings(struct gem *gp) 1608{ 1609 struct gem_init_block *gb = gp->init_block; 1610 struct net_device *dev = gp->dev; 1611 int i; 1612 dma_addr_t dma_addr; 1613 1614 gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; 1615 1616 gem_clean_rings(gp); 1617 1618 gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, 1619 (unsigned)VLAN_ETH_FRAME_LEN); 1620 1621 for (i = 0; i < RX_RING_SIZE; i++) { 1622 struct sk_buff *skb; 1623 struct gem_rxd *rxd = &gb->rxd[i]; 1624 1625 skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 1626 if (!skb) { 1627 rxd->buffer = 0; 1628 rxd->status_word = 0; 1629 continue; 1630 } 1631 1632 gp->rx_skbs[i] = skb; 1633 skb->dev = dev; 1634 skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); 1635 dma_addr = pci_map_page(gp->pdev, 1636 virt_to_page(skb->data), 1637 offset_in_page(skb->data), 1638 RX_BUF_ALLOC_SIZE(gp), 1639 PCI_DMA_FROMDEVICE); 1640 rxd->buffer = cpu_to_le64(dma_addr); 1641 wmb(); 1642 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 1643 skb_reserve(skb, RX_OFFSET); 1644 } 1645 1646 for (i = 0; i < TX_RING_SIZE; i++) { 1647 struct gem_txd *txd = &gb->txd[i]; 1648 1649 txd->control_word = 0; 1650 wmb(); 1651 txd->buffer = 0; 1652 } 1653 wmb(); 1654} 1655 1656/* Init PHY interface and start link poll state machine */ 1657static void gem_init_phy(struct gem *gp) 1658{ 1659 u32 mifcfg; 1660 1661 /* Revert MIF CFG setting done on stop_phy */ 1662 mifcfg = readl(gp->regs + MIF_CFG); 1663 mifcfg &= ~MIF_CFG_BBMODE; 1664 writel(mifcfg, gp->regs + MIF_CFG); 1665 1666 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { 1667 int i; 1668 1669 /* Those delay sucks, the HW seem to love them though, I'll 1670 * serisouly consider breaking some locks here to be able 1671 * to schedule instead 1672 */ 1673 for (i = 0; i < 3; i++) { 1674#ifdef CONFIG_PPC_PMAC 1675 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); 1676 msleep(20); 1677#endif 1678 /* Some PHYs used by apple have problem getting back to us, 1679 * we do an additional reset here 1680 */ 1681 phy_write(gp, MII_BMCR, BMCR_RESET); 1682 msleep(20); 1683 if (phy_read(gp, MII_BMCR) != 0xffff) 1684 break; 1685 if (i == 2) 1686 printk(KERN_WARNING "%s: GMAC PHY not responding !\n", 1687 gp->dev->name); 1688 } 1689 } 1690 1691 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 1692 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 1693 u32 val; 1694 1695 /* Init datapath mode register. */ 1696 if (gp->phy_type == phy_mii_mdio0 || 1697 gp->phy_type == phy_mii_mdio1) { 1698 val = PCS_DMODE_MGM; 1699 } else if (gp->phy_type == phy_serialink) { 1700 val = PCS_DMODE_SM | PCS_DMODE_GMOE; 1701 } else { 1702 val = PCS_DMODE_ESM; 1703 } 1704 1705 writel(val, gp->regs + PCS_DMODE); 1706 } 1707 1708 if (gp->phy_type == phy_mii_mdio0 || 1709 gp->phy_type == phy_mii_mdio1) { 1710 // XXX check for errors 1711 mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr); 1712 1713 /* Init PHY */ 1714 if (gp->phy_mii.def && gp->phy_mii.def->ops->init) 1715 gp->phy_mii.def->ops->init(&gp->phy_mii); 1716 } else { 1717 u32 val; 1718 int limit; 1719 1720 /* Reset PCS unit. */ 1721 val = readl(gp->regs + PCS_MIICTRL); 1722 val |= PCS_MIICTRL_RST; 1723 writeb(val, gp->regs + PCS_MIICTRL); 1724 1725 limit = 32; 1726 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { 1727 udelay(100); 1728 if (limit-- <= 0) 1729 break; 1730 } 1731 if (limit <= 0) 1732 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n", 1733 gp->dev->name); 1734 1735 /* Make sure PCS is disabled while changing advertisement 1736 * configuration. 1737 */ 1738 val = readl(gp->regs + PCS_CFG); 1739 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); 1740 writel(val, gp->regs + PCS_CFG); 1741 1742 /* Advertise all capabilities except assymetric 1743 * pause. 1744 */ 1745 val = readl(gp->regs + PCS_MIIADV); 1746 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | 1747 PCS_MIIADV_SP | PCS_MIIADV_AP); 1748 writel(val, gp->regs + PCS_MIIADV); 1749 1750 /* Enable and restart auto-negotiation, disable wrapback/loopback, 1751 * and re-enable PCS. 1752 */ 1753 val = readl(gp->regs + PCS_MIICTRL); 1754 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); 1755 val &= ~PCS_MIICTRL_WB; 1756 writel(val, gp->regs + PCS_MIICTRL); 1757 1758 val = readl(gp->regs + PCS_CFG); 1759 val |= PCS_CFG_ENABLE; 1760 writel(val, gp->regs + PCS_CFG); 1761 1762 /* Make sure serialink loopback is off. The meaning 1763 * of this bit is logically inverted based upon whether 1764 * you are in Serialink or SERDES mode. 1765 */ 1766 val = readl(gp->regs + PCS_SCTRL); 1767 if (gp->phy_type == phy_serialink) 1768 val &= ~PCS_SCTRL_LOOP; 1769 else 1770 val |= PCS_SCTRL_LOOP; 1771 writel(val, gp->regs + PCS_SCTRL); 1772 } 1773 1774 /* Default aneg parameters */ 1775 gp->timer_ticks = 0; 1776 gp->lstate = link_down; 1777 netif_carrier_off(gp->dev); 1778 1779 /* Can I advertise gigabit here ? I'd need BCM PHY docs... */ 1780 spin_lock_irq(&gp->lock); 1781 gem_begin_auto_negotiation(gp, NULL); 1782 spin_unlock_irq(&gp->lock); 1783} 1784 1785/* Must be invoked under gp->lock and gp->tx_lock. */ 1786static void gem_init_dma(struct gem *gp) 1787{ 1788 u64 desc_dma = (u64) gp->gblock_dvma; 1789 u32 val; 1790 1791 val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE); 1792 writel(val, gp->regs + TXDMA_CFG); 1793 1794 writel(desc_dma >> 32, gp->regs + TXDMA_DBHI); 1795 writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW); 1796 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); 1797 1798 writel(0, gp->regs + TXDMA_KICK); 1799 1800 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 1801 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 1802 writel(val, gp->regs + RXDMA_CFG); 1803 1804 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 1805 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 1806 1807 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 1808 1809 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); 1810 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); 1811 writel(val, gp->regs + RXDMA_PTHRESH); 1812 1813 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 1814 writel(((5 & RXDMA_BLANK_IPKTS) | 1815 ((8 << 12) & RXDMA_BLANK_ITIME)), 1816 gp->regs + RXDMA_BLANK); 1817 else 1818 writel(((5 & RXDMA_BLANK_IPKTS) | 1819 ((4 << 12) & RXDMA_BLANK_ITIME)), 1820 gp->regs + RXDMA_BLANK); 1821} 1822 1823/* Must be invoked under gp->lock and gp->tx_lock. */ 1824static u32 gem_setup_multicast(struct gem *gp) 1825{ 1826 u32 rxcfg = 0; 1827 int i; 1828 1829 if ((gp->dev->flags & IFF_ALLMULTI) || 1830 (gp->dev->mc_count > 256)) { 1831 for (i=0; i<16; i++) 1832 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2)); 1833 rxcfg |= MAC_RXCFG_HFE; 1834 } else if (gp->dev->flags & IFF_PROMISC) { 1835 rxcfg |= MAC_RXCFG_PROM; 1836 } else { 1837 u16 hash_table[16]; 1838 u32 crc; 1839 struct dev_mc_list *dmi = gp->dev->mc_list; 1840 int i; 1841 1842 for (i = 0; i < 16; i++) 1843 hash_table[i] = 0; 1844 1845 for (i = 0; i < gp->dev->mc_count; i++) { 1846 char *addrs = dmi->dmi_addr; 1847 1848 dmi = dmi->next; 1849 1850 if (!(*addrs & 1)) 1851 continue; 1852 1853 crc = ether_crc_le(6, addrs); 1854 crc >>= 24; 1855 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 1856 } 1857 for (i=0; i<16; i++) 1858 writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2)); 1859 rxcfg |= MAC_RXCFG_HFE; 1860 } 1861 1862 return rxcfg; 1863} 1864 1865/* Must be invoked under gp->lock and gp->tx_lock. */ 1866static void gem_init_mac(struct gem *gp) 1867{ 1868 unsigned char *e = &gp->dev->dev_addr[0]; 1869 1870 writel(0x1bf0, gp->regs + MAC_SNDPAUSE); 1871 1872 writel(0x00, gp->regs + MAC_IPG0); 1873 writel(0x08, gp->regs + MAC_IPG1); 1874 writel(0x04, gp->regs + MAC_IPG2); 1875 writel(0x40, gp->regs + MAC_STIME); 1876 writel(0x40, gp->regs + MAC_MINFSZ); 1877 1878 /* Ethernet payload + header + FCS + optional VLAN tag. */ 1879 writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ); 1880 1881 writel(0x07, gp->regs + MAC_PASIZE); 1882 writel(0x04, gp->regs + MAC_JAMSIZE); 1883 writel(0x10, gp->regs + MAC_ATTLIM); 1884 writel(0x8808, gp->regs + MAC_MCTYPE); 1885 1886 writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED); 1887 1888 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); 1889 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); 1890 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); 1891 1892 writel(0, gp->regs + MAC_ADDR3); 1893 writel(0, gp->regs + MAC_ADDR4); 1894 writel(0, gp->regs + MAC_ADDR5); 1895 1896 writel(0x0001, gp->regs + MAC_ADDR6); 1897 writel(0xc200, gp->regs + MAC_ADDR7); 1898 writel(0x0180, gp->regs + MAC_ADDR8); 1899 1900 writel(0, gp->regs + MAC_AFILT0); 1901 writel(0, gp->regs + MAC_AFILT1); 1902 writel(0, gp->regs + MAC_AFILT2); 1903 writel(0, gp->regs + MAC_AF21MSK); 1904 writel(0, gp->regs + MAC_AF0MSK); 1905 1906 gp->mac_rx_cfg = gem_setup_multicast(gp); 1907#ifdef STRIP_FCS 1908 gp->mac_rx_cfg |= MAC_RXCFG_SFCS; 1909#endif 1910 writel(0, gp->regs + MAC_NCOLL); 1911 writel(0, gp->regs + MAC_FASUCC); 1912 writel(0, gp->regs + MAC_ECOLL); 1913 writel(0, gp->regs + MAC_LCOLL); 1914 writel(0, gp->regs + MAC_DTIMER); 1915 writel(0, gp->regs + MAC_PATMPS); 1916 writel(0, gp->regs + MAC_RFCTR); 1917 writel(0, gp->regs + MAC_LERR); 1918 writel(0, gp->regs + MAC_AERR); 1919 writel(0, gp->regs + MAC_FCSERR); 1920 writel(0, gp->regs + MAC_RXCVERR); 1921 1922 /* Clear RX/TX/MAC/XIF config, we will set these up and enable 1923 * them once a link is established. 1924 */ 1925 writel(0, gp->regs + MAC_TXCFG); 1926 writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG); 1927 writel(0, gp->regs + MAC_MCCFG); 1928 writel(0, gp->regs + MAC_XIFCFG); 1929 1930 /* Setup MAC interrupts. We want to get all of the interesting 1931 * counter expiration events, but we do not want to hear about 1932 * normal rx/tx as the DMA engine tells us that. 1933 */ 1934 writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK); 1935 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); 1936 1937 /* Don't enable even the PAUSE interrupts for now, we 1938 * make no use of those events other than to record them. 1939 */ 1940 writel(0xffffffff, gp->regs + MAC_MCMASK); 1941 1942 /* Don't enable GEM's WOL in normal operations 1943 */ 1944 if (gp->has_wol) 1945 writel(0, gp->regs + WOL_WAKECSR); 1946} 1947 1948/* Must be invoked under gp->lock and gp->tx_lock. */ 1949static void gem_init_pause_thresholds(struct gem *gp) 1950{ 1951 u32 cfg; 1952 1953 /* Calculate pause thresholds. Setting the OFF threshold to the 1954 * full RX fifo size effectively disables PAUSE generation which 1955 * is what we do for 10/100 only GEMs which have FIFOs too small 1956 * to make real gains from PAUSE. 1957 */ 1958 if (gp->rx_fifo_sz <= (2 * 1024)) { 1959 gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; 1960 } else { 1961 int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; 1962 int off = (gp->rx_fifo_sz - (max_frame * 2)); 1963 int on = off - max_frame; 1964 1965 gp->rx_pause_off = off; 1966 gp->rx_pause_on = on; 1967 } 1968 1969 1970 /* Configure the chip "burst" DMA mode & enable some 1971 * HW bug fixes on Apple version 1972 */ 1973 cfg = 0; 1974 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) 1975 cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX; 1976#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) 1977 cfg |= GREG_CFG_IBURST; 1978#endif 1979 cfg |= ((31 << 1) & GREG_CFG_TXDMALIM); 1980 cfg |= ((31 << 6) & GREG_CFG_RXDMALIM); 1981 writel(cfg, gp->regs + GREG_CFG); 1982 1983 /* If Infinite Burst didn't stick, then use different 1984 * thresholds (and Apple bug fixes don't exist) 1985 */ 1986 if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) { 1987 cfg = ((2 << 1) & GREG_CFG_TXDMALIM); 1988 cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); 1989 writel(cfg, gp->regs + GREG_CFG); 1990 } 1991} 1992 1993static int gem_check_invariants(struct gem *gp) 1994{ 1995 struct pci_dev *pdev = gp->pdev; 1996 u32 mif_cfg; 1997 1998 /* On Apple's sungem, we can't rely on registers as the chip 1999 * was been powered down by the firmware. The PHY is looked 2000 * up later on. 2001 */ 2002 if (pdev->vendor == PCI_VENDOR_ID_APPLE) { 2003 gp->phy_type = phy_mii_mdio0; 2004 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; 2005 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; 2006 gp->swrst_base = 0; 2007 2008 mif_cfg = readl(gp->regs + MIF_CFG); 2009 mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); 2010 mif_cfg |= MIF_CFG_MDI0; 2011 writel(mif_cfg, gp->regs + MIF_CFG); 2012 writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE); 2013 writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG); 2014 2015 /* We hard-code the PHY address so we can properly bring it out of 2016 * reset later on, we can't really probe it at this point, though 2017 * that isn't an issue. 2018 */ 2019 if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC) 2020 gp->mii_phy_addr = 1; 2021 else 2022 gp->mii_phy_addr = 0; 2023 2024 return 0; 2025 } 2026 2027 mif_cfg = readl(gp->regs + MIF_CFG); 2028 2029 if (pdev->vendor == PCI_VENDOR_ID_SUN && 2030 pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) { 2031 /* One of the MII PHYs _must_ be present 2032 * as this chip has no gigabit PHY. 2033 */ 2034 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { 2035 printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n", 2036 mif_cfg); 2037 return -1; 2038 } 2039 } 2040 2041 /* Determine initial PHY interface type guess. MDIO1 is the 2042 * external PHY and thus takes precedence over MDIO0. 2043 */ 2044 2045 if (mif_cfg & MIF_CFG_MDI1) { 2046 gp->phy_type = phy_mii_mdio1; 2047 mif_cfg |= MIF_CFG_PSELECT; 2048 writel(mif_cfg, gp->regs + MIF_CFG); 2049 } else if (mif_cfg & MIF_CFG_MDI0) { 2050 gp->phy_type = phy_mii_mdio0; 2051 mif_cfg &= ~MIF_CFG_PSELECT; 2052 writel(mif_cfg, gp->regs + MIF_CFG); 2053 } else { 2054 gp->phy_type = phy_serialink; 2055 } 2056 if (gp->phy_type == phy_mii_mdio1 || 2057 gp->phy_type == phy_mii_mdio0) { 2058 int i; 2059 2060 for (i = 0; i < 32; i++) { 2061 gp->mii_phy_addr = i; 2062 if (phy_read(gp, MII_BMCR) != 0xffff) 2063 break; 2064 } 2065 if (i == 32) { 2066 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { 2067 printk(KERN_ERR PFX "RIO MII phy will not respond.\n"); 2068 return -1; 2069 } 2070 gp->phy_type = phy_serdes; 2071 } 2072 } 2073 2074 /* Fetch the FIFO configurations now too. */ 2075 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; 2076 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; 2077 2078 if (pdev->vendor == PCI_VENDOR_ID_SUN) { 2079 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { 2080 if (gp->tx_fifo_sz != (9 * 1024) || 2081 gp->rx_fifo_sz != (20 * 1024)) { 2082 printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2083 gp->tx_fifo_sz, gp->rx_fifo_sz); 2084 return -1; 2085 } 2086 gp->swrst_base = 0; 2087 } else { 2088 if (gp->tx_fifo_sz != (2 * 1024) || 2089 gp->rx_fifo_sz != (2 * 1024)) { 2090 printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2091 gp->tx_fifo_sz, gp->rx_fifo_sz); 2092 return -1; 2093 } 2094 gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT; 2095 } 2096 } 2097 2098 return 0; 2099} 2100 2101/* Must be invoked under gp->lock and gp->tx_lock. */ 2102static void gem_reinit_chip(struct gem *gp) 2103{ 2104 /* Reset the chip */ 2105 gem_reset(gp); 2106 2107 /* Make sure ints are disabled */ 2108 gem_disable_ints(gp); 2109 2110 /* Allocate & setup ring buffers */ 2111 gem_init_rings(gp); 2112 2113 /* Configure pause thresholds */ 2114 gem_init_pause_thresholds(gp); 2115 2116 /* Init DMA & MAC engines */ 2117 gem_init_dma(gp); 2118 gem_init_mac(gp); 2119} 2120 2121 2122/* Must be invoked with no lock held. */ 2123static void gem_stop_phy(struct gem *gp, int wol) 2124{ 2125 u32 mifcfg; 2126 unsigned long flags; 2127 2128 /* Let the chip settle down a bit, it seems that helps 2129 * for sleep mode on some models 2130 */ 2131 msleep(10); 2132 2133 /* Make sure we aren't polling PHY status change. We 2134 * don't currently use that feature though 2135 */ 2136 mifcfg = readl(gp->regs + MIF_CFG); 2137 mifcfg &= ~MIF_CFG_POLL; 2138 writel(mifcfg, gp->regs + MIF_CFG); 2139 2140 if (wol && gp->has_wol) { 2141 unsigned char *e = &gp->dev->dev_addr[0]; 2142 u32 csr; 2143 2144 /* Setup wake-on-lan for MAGIC packet */ 2145 writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, 2146 gp->regs + MAC_RXCFG); 2147 writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0); 2148 writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1); 2149 writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2); 2150 2151 writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT); 2152 csr = WOL_WAKECSR_ENABLE; 2153 if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0) 2154 csr |= WOL_WAKECSR_MII; 2155 writel(csr, gp->regs + WOL_WAKECSR); 2156 } else { 2157 writel(0, gp->regs + MAC_RXCFG); 2158 (void)readl(gp->regs + MAC_RXCFG); 2159 /* Machine sleep will die in strange ways if we 2160 * dont wait a bit here, looks like the chip takes 2161 * some time to really shut down 2162 */ 2163 msleep(10); 2164 } 2165 2166 writel(0, gp->regs + MAC_TXCFG); 2167 writel(0, gp->regs + MAC_XIFCFG); 2168 writel(0, gp->regs + TXDMA_CFG); 2169 writel(0, gp->regs + RXDMA_CFG); 2170 2171 if (!wol) { 2172 spin_lock_irqsave(&gp->lock, flags); 2173 spin_lock(&gp->tx_lock); 2174 gem_reset(gp); 2175 writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); 2176 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); 2177 spin_unlock(&gp->tx_lock); 2178 spin_unlock_irqrestore(&gp->lock, flags); 2179 2180 /* No need to take the lock here */ 2181 2182 if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) 2183 gp->phy_mii.def->ops->suspend(&gp->phy_mii); 2184 2185 /* According to Apple, we must set the MDIO pins to this begnign 2186 * state or we may 1) eat more current, 2) damage some PHYs 2187 */ 2188 writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); 2189 writel(0, gp->regs + MIF_BBCLK); 2190 writel(0, gp->regs + MIF_BBDATA); 2191 writel(0, gp->regs + MIF_BBOENAB); 2192 writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG); 2193 (void) readl(gp->regs + MAC_XIFCFG); 2194 } 2195} 2196 2197 2198static int gem_do_start(struct net_device *dev) 2199{ 2200 struct gem *gp = dev->priv; 2201 unsigned long flags; 2202 2203 spin_lock_irqsave(&gp->lock, flags); 2204 spin_lock(&gp->tx_lock); 2205 2206 /* Enable the cell */ 2207 gem_get_cell(gp); 2208 2209 /* Init & setup chip hardware */ 2210 gem_reinit_chip(gp); 2211 2212 gp->running = 1; 2213 2214 if (gp->lstate == link_up) { 2215 netif_carrier_on(gp->dev); 2216 gem_set_link_modes(gp); 2217 } 2218 2219 netif_wake_queue(gp->dev); 2220 2221 spin_unlock(&gp->tx_lock); 2222 spin_unlock_irqrestore(&gp->lock, flags); 2223 2224 if (request_irq(gp->pdev->irq, gem_interrupt, 2225 IRQF_SHARED, dev->name, (void *)dev)) { 2226 printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name); 2227 2228 spin_lock_irqsave(&gp->lock, flags); 2229 spin_lock(&gp->tx_lock); 2230 2231 gp->running = 0; 2232 gem_reset(gp); 2233 gem_clean_rings(gp); 2234 gem_put_cell(gp); 2235 2236 spin_unlock(&gp->tx_lock); 2237 spin_unlock_irqrestore(&gp->lock, flags); 2238 2239 return -EAGAIN; 2240 } 2241 2242 return 0; 2243} 2244 2245static void gem_do_stop(struct net_device *dev, int wol) 2246{ 2247 struct gem *gp = dev->priv; 2248 unsigned long flags; 2249 2250 spin_lock_irqsave(&gp->lock, flags); 2251 spin_lock(&gp->tx_lock); 2252 2253 gp->running = 0; 2254 2255 /* Stop netif queue */ 2256 netif_stop_queue(dev); 2257 2258 /* Make sure ints are disabled */ 2259 gem_disable_ints(gp); 2260 2261 /* We can drop the lock now */ 2262 spin_unlock(&gp->tx_lock); 2263 spin_unlock_irqrestore(&gp->lock, flags); 2264 2265 /* If we are going to sleep with WOL */ 2266 gem_stop_dma(gp); 2267 msleep(10); 2268 if (!wol) 2269 gem_reset(gp); 2270 msleep(10); 2271 2272 /* Get rid of rings */ 2273 gem_clean_rings(gp); 2274 2275 /* No irq needed anymore */ 2276 free_irq(gp->pdev->irq, (void *) dev); 2277 2278 /* Cell not needed neither if no WOL */ 2279 if (!wol) { 2280 spin_lock_irqsave(&gp->lock, flags); 2281 gem_put_cell(gp); 2282 spin_unlock_irqrestore(&gp->lock, flags); 2283 } 2284} 2285 2286static void gem_reset_task(struct work_struct *work) 2287{ 2288 struct gem *gp = container_of(work, struct gem, reset_task); 2289 2290 mutex_lock(&gp->pm_mutex); 2291 2292 netif_poll_disable(gp->dev); 2293 2294 spin_lock_irq(&gp->lock); 2295 spin_lock(&gp->tx_lock); 2296 2297 if (gp->running == 0) 2298 goto not_running; 2299 2300 if (gp->running) { 2301 netif_stop_queue(gp->dev); 2302 2303 /* Reset the chip & rings */ 2304 gem_reinit_chip(gp); 2305 if (gp->lstate == link_up) 2306 gem_set_link_modes(gp); 2307 netif_wake_queue(gp->dev); 2308 } 2309 not_running: 2310 gp->reset_task_pending = 0; 2311 2312 spin_unlock(&gp->tx_lock); 2313 spin_unlock_irq(&gp->lock); 2314 2315 netif_poll_enable(gp->dev); 2316 2317 mutex_unlock(&gp->pm_mutex); 2318} 2319 2320 2321static int gem_open(struct net_device *dev) 2322{ 2323 struct gem *gp = dev->priv; 2324 int rc = 0; 2325 2326 mutex_lock(&gp->pm_mutex); 2327 2328 /* We need the cell enabled */ 2329 if (!gp->asleep) 2330 rc = gem_do_start(dev); 2331 gp->opened = (rc == 0); 2332 2333 mutex_unlock(&gp->pm_mutex); 2334 2335 return rc; 2336} 2337 2338static int gem_close(struct net_device *dev) 2339{ 2340 struct gem *gp = dev->priv; 2341 2342 /* Note: we don't need to call netif_poll_disable() here because 2343 * our caller (dev_close) already did it for us 2344 */ 2345 2346 mutex_lock(&gp->pm_mutex); 2347 2348 gp->opened = 0; 2349 if (!gp->asleep) 2350 gem_do_stop(dev, 0); 2351 2352 mutex_unlock(&gp->pm_mutex); 2353 2354 return 0; 2355} 2356 2357#ifdef CONFIG_PM 2358static int gem_suspend(struct pci_dev *pdev, pm_message_t state) 2359{ 2360 struct net_device *dev = pci_get_drvdata(pdev); 2361 struct gem *gp = dev->priv; 2362 unsigned long flags; 2363 2364 mutex_lock(&gp->pm_mutex); 2365 2366 netif_poll_disable(dev); 2367 2368 printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", 2369 dev->name, 2370 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled"); 2371 2372 /* Keep the cell enabled during the entire operation */ 2373 spin_lock_irqsave(&gp->lock, flags); 2374 spin_lock(&gp->tx_lock); 2375 gem_get_cell(gp); 2376 spin_unlock(&gp->tx_lock); 2377 spin_unlock_irqrestore(&gp->lock, flags); 2378 2379 /* If the driver is opened, we stop the MAC */ 2380 if (gp->opened) { 2381 /* Stop traffic, mark us closed */ 2382 netif_device_detach(dev); 2383 2384 /* Switch off MAC, remember WOL setting */ 2385 gp->asleep_wol = gp->wake_on_lan; 2386 gem_do_stop(dev, gp->asleep_wol); 2387 } else 2388 gp->asleep_wol = 0; 2389 2390 /* Mark us asleep */ 2391 gp->asleep = 1; 2392 wmb(); 2393 2394 /* Stop the link timer */ 2395 del_timer_sync(&gp->link_timer); 2396 2397 /* Now we release the mutex to not block the reset task who 2398 * can take it too. We are marked asleep, so there will be no 2399 * conflict here 2400 */ 2401 mutex_unlock(&gp->pm_mutex); 2402 2403 /* Wait for a pending reset task to complete */ 2404 while (gp->reset_task_pending) 2405 yield(); 2406 flush_scheduled_work(); 2407 2408 /* Shut the PHY down eventually and setup WOL */ 2409 gem_stop_phy(gp, gp->asleep_wol); 2410 2411 /* Make sure bus master is disabled */ 2412 pci_disable_device(gp->pdev); 2413 2414 /* Release the cell, no need to take a lock at this point since 2415 * nothing else can happen now 2416 */ 2417 gem_put_cell(gp); 2418 2419 return 0; 2420} 2421 2422static int gem_resume(struct pci_dev *pdev) 2423{ 2424 struct net_device *dev = pci_get_drvdata(pdev); 2425 struct gem *gp = dev->priv; 2426 unsigned long flags; 2427 2428 printk(KERN_INFO "%s: resuming\n", dev->name); 2429 2430 mutex_lock(&gp->pm_mutex); 2431 2432 /* Keep the cell enabled during the entire operation, no need to 2433 * take a lock here tho since nothing else can happen while we are 2434 * marked asleep 2435 */ 2436 gem_get_cell(gp); 2437 2438 /* Make sure PCI access and bus master are enabled */ 2439 if (pci_enable_device(gp->pdev)) { 2440 printk(KERN_ERR "%s: Can't re-enable chip !\n", 2441 dev->name); 2442 /* Put cell and forget it for now, it will be considered as 2443 * still asleep, a new sleep cycle may bring it back 2444 */ 2445 gem_put_cell(gp); 2446 mutex_unlock(&gp->pm_mutex); 2447 return 0; 2448 } 2449 pci_set_master(gp->pdev); 2450 2451 /* Reset everything */ 2452 gem_reset(gp); 2453 2454 /* Mark us woken up */ 2455 gp->asleep = 0; 2456 wmb(); 2457 2458 /* Bring the PHY back. Again, lock is useless at this point as 2459 * nothing can be happening until we restart the whole thing 2460 */ 2461 gem_init_phy(gp); 2462 2463 /* If we were opened, bring everything back */ 2464 if (gp->opened) { 2465 /* Restart MAC */ 2466 gem_do_start(dev); 2467 2468 /* Re-attach net device */ 2469 netif_device_attach(dev); 2470 2471 } 2472 2473 spin_lock_irqsave(&gp->lock, flags); 2474 spin_lock(&gp->tx_lock); 2475 2476 /* If we had WOL enabled, the cell clock was never turned off during 2477 * sleep, so we end up beeing unbalanced. Fix that here 2478 */ 2479 if (gp->asleep_wol) 2480 gem_put_cell(gp); 2481 2482 /* This function doesn't need to hold the cell, it will be held if the 2483 * driver is open by gem_do_start(). 2484 */ 2485 gem_put_cell(gp); 2486 2487 spin_unlock(&gp->tx_lock); 2488 spin_unlock_irqrestore(&gp->lock, flags); 2489 2490 netif_poll_enable(dev); 2491 2492 mutex_unlock(&gp->pm_mutex); 2493 2494 return 0; 2495} 2496#endif /* CONFIG_PM */ 2497 2498static struct net_device_stats *gem_get_stats(struct net_device *dev) 2499{ 2500 struct gem *gp = dev->priv; 2501 struct net_device_stats *stats = &gp->net_stats; 2502 2503 spin_lock_irq(&gp->lock); 2504 spin_lock(&gp->tx_lock); 2505 2506 /* I have seen this being called while the PM was in progress, 2507 * so we shield against this 2508 */ 2509 if (gp->running) { 2510 stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); 2511 writel(0, gp->regs + MAC_FCSERR); 2512 2513 stats->rx_frame_errors += readl(gp->regs + MAC_AERR); 2514 writel(0, gp->regs + MAC_AERR); 2515 2516 stats->rx_length_errors += readl(gp->regs + MAC_LERR); 2517 writel(0, gp->regs + MAC_LERR); 2518 2519 stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL); 2520 stats->collisions += 2521 (readl(gp->regs + MAC_ECOLL) + 2522 readl(gp->regs + MAC_LCOLL)); 2523 writel(0, gp->regs + MAC_ECOLL); 2524 writel(0, gp->regs + MAC_LCOLL); 2525 } 2526 2527 spin_unlock(&gp->tx_lock); 2528 spin_unlock_irq(&gp->lock); 2529 2530 return &gp->net_stats; 2531} 2532 2533static void gem_set_multicast(struct net_device *dev) 2534{ 2535 struct gem *gp = dev->priv; 2536 u32 rxcfg, rxcfg_new; 2537 int limit = 10000; 2538 2539 2540 spin_lock_irq(&gp->lock); 2541 spin_lock(&gp->tx_lock); 2542 2543 if (!gp->running) 2544 goto bail; 2545 2546 netif_stop_queue(dev); 2547 2548 rxcfg = readl(gp->regs + MAC_RXCFG); 2549 rxcfg_new = gem_setup_multicast(gp); 2550#ifdef STRIP_FCS 2551 rxcfg_new |= MAC_RXCFG_SFCS; 2552#endif 2553 gp->mac_rx_cfg = rxcfg_new; 2554 2555 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 2556 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { 2557 if (!limit--) 2558 break; 2559 udelay(10); 2560 } 2561 2562 rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE); 2563 rxcfg |= rxcfg_new; 2564 2565 writel(rxcfg, gp->regs + MAC_RXCFG); 2566 2567 netif_wake_queue(dev); 2568 2569 bail: 2570 spin_unlock(&gp->tx_lock); 2571 spin_unlock_irq(&gp->lock); 2572} 2573 2574/* Jumbo-grams don't seem to work :-( */ 2575#define GEM_MIN_MTU 68 2576#if 1 2577#define GEM_MAX_MTU 1500 2578#else 2579#define GEM_MAX_MTU 9000 2580#endif 2581 2582static int gem_change_mtu(struct net_device *dev, int new_mtu) 2583{ 2584 struct gem *gp = dev->priv; 2585 2586 if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) 2587 return -EINVAL; 2588 2589 if (!netif_running(dev) || !netif_device_present(dev)) { 2590 /* We'll just catch it later when the 2591 * device is up'd or resumed. 2592 */ 2593 dev->mtu = new_mtu; 2594 return 0; 2595 } 2596 2597 mutex_lock(&gp->pm_mutex); 2598 spin_lock_irq(&gp->lock); 2599 spin_lock(&gp->tx_lock); 2600 dev->mtu = new_mtu; 2601 if (gp->running) { 2602 gem_reinit_chip(gp); 2603 if (gp->lstate == link_up) 2604 gem_set_link_modes(gp); 2605 } 2606 spin_unlock(&gp->tx_lock); 2607 spin_unlock_irq(&gp->lock); 2608 mutex_unlock(&gp->pm_mutex); 2609 2610 return 0; 2611} 2612 2613static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2614{ 2615 struct gem *gp = dev->priv; 2616 2617 strcpy(info->driver, DRV_NAME); 2618 strcpy(info->version, DRV_VERSION); 2619 strcpy(info->bus_info, pci_name(gp->pdev)); 2620} 2621 2622static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2623{ 2624 struct gem *gp = dev->priv; 2625 2626 if (gp->phy_type == phy_mii_mdio0 || 2627 gp->phy_type == phy_mii_mdio1) { 2628 if (gp->phy_mii.def) 2629 cmd->supported = gp->phy_mii.def->features; 2630 else 2631 cmd->supported = (SUPPORTED_10baseT_Half | 2632 SUPPORTED_10baseT_Full); 2633 2634 /* XXX hardcoded stuff for now */ 2635 cmd->port = PORT_MII; 2636 cmd->transceiver = XCVR_EXTERNAL; 2637 cmd->phy_address = 0; /* XXX fixed PHYAD */ 2638 2639 /* Return current PHY settings */ 2640 spin_lock_irq(&gp->lock); 2641 cmd->autoneg = gp->want_autoneg; 2642 cmd->speed = gp->phy_mii.speed; 2643 cmd->duplex = gp->phy_mii.duplex; 2644 cmd->advertising = gp->phy_mii.advertising; 2645 2646 /* If we started with a forced mode, we don't have a default 2647 * advertise set, we need to return something sensible so 2648 * userland can re-enable autoneg properly. 2649 */ 2650 if (cmd->advertising == 0) 2651 cmd->advertising = cmd->supported; 2652 spin_unlock_irq(&gp->lock); 2653 } else { // XXX PCS ? 2654 cmd->supported = 2655 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2656 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 2657 SUPPORTED_Autoneg); 2658 cmd->advertising = cmd->supported; 2659 cmd->speed = 0; 2660 cmd->duplex = cmd->port = cmd->phy_address = 2661 cmd->transceiver = cmd->autoneg = 0; 2662 } 2663 cmd->maxtxpkt = cmd->maxrxpkt = 0; 2664 2665 return 0; 2666} 2667 2668static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2669{ 2670 struct gem *gp = dev->priv; 2671 2672 /* Verify the settings we care about. */ 2673 if (cmd->autoneg != AUTONEG_ENABLE && 2674 cmd->autoneg != AUTONEG_DISABLE) 2675 return -EINVAL; 2676 2677 if (cmd->autoneg == AUTONEG_ENABLE && 2678 cmd->advertising == 0) 2679 return -EINVAL; 2680 2681 if (cmd->autoneg == AUTONEG_DISABLE && 2682 ((cmd->speed != SPEED_1000 && 2683 cmd->speed != SPEED_100 && 2684 cmd->speed != SPEED_10) || 2685 (cmd->duplex != DUPLEX_HALF && 2686 cmd->duplex != DUPLEX_FULL))) 2687 return -EINVAL; 2688 2689 /* Apply settings and restart link process. */ 2690 spin_lock_irq(&gp->lock); 2691 gem_get_cell(gp); 2692 gem_begin_auto_negotiation(gp, cmd); 2693 gem_put_cell(gp); 2694 spin_unlock_irq(&gp->lock); 2695 2696 return 0; 2697} 2698 2699static int gem_nway_reset(struct net_device *dev) 2700{ 2701 struct gem *gp = dev->priv; 2702 2703 if (!gp->want_autoneg) 2704 return -EINVAL; 2705 2706 /* Restart link process. */ 2707 spin_lock_irq(&gp->lock); 2708 gem_get_cell(gp); 2709 gem_begin_auto_negotiation(gp, NULL); 2710 gem_put_cell(gp); 2711 spin_unlock_irq(&gp->lock); 2712 2713 return 0; 2714} 2715 2716static u32 gem_get_msglevel(struct net_device *dev) 2717{ 2718 struct gem *gp = dev->priv; 2719 return gp->msg_enable; 2720} 2721 2722static void gem_set_msglevel(struct net_device *dev, u32 value) 2723{ 2724 struct gem *gp = dev->priv; 2725 gp->msg_enable = value; 2726} 2727 2728 2729/* Add more when I understand how to program the chip */ 2730/* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */ 2731 2732#define WOL_SUPPORTED_MASK (WAKE_MAGIC) 2733 2734static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2735{ 2736 struct gem *gp = dev->priv; 2737 2738 /* Add more when I understand how to program the chip */ 2739 if (gp->has_wol) { 2740 wol->supported = WOL_SUPPORTED_MASK; 2741 wol->wolopts = gp->wake_on_lan; 2742 } else { 2743 wol->supported = 0; 2744 wol->wolopts = 0; 2745 } 2746} 2747 2748static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2749{ 2750 struct gem *gp = dev->priv; 2751 2752 if (!gp->has_wol) 2753 return -EOPNOTSUPP; 2754 gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK; 2755 return 0; 2756} 2757 2758static const struct ethtool_ops gem_ethtool_ops = { 2759 .get_drvinfo = gem_get_drvinfo, 2760 .get_link = ethtool_op_get_link, 2761 .get_settings = gem_get_settings, 2762 .set_settings = gem_set_settings, 2763 .nway_reset = gem_nway_reset, 2764 .get_msglevel = gem_get_msglevel, 2765 .set_msglevel = gem_set_msglevel, 2766 .get_wol = gem_get_wol, 2767 .set_wol = gem_set_wol, 2768}; 2769 2770static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2771{ 2772 struct gem *gp = dev->priv; 2773 struct mii_ioctl_data *data = if_mii(ifr); 2774 int rc = -EOPNOTSUPP; 2775 unsigned long flags; 2776 2777 /* Hold the PM mutex while doing ioctl's or we may collide 2778 * with power management. 2779 */ 2780 mutex_lock(&gp->pm_mutex); 2781 2782 spin_lock_irqsave(&gp->lock, flags); 2783 gem_get_cell(gp); 2784 spin_unlock_irqrestore(&gp->lock, flags); 2785 2786 switch (cmd) { 2787 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 2788 data->phy_id = gp->mii_phy_addr; 2789 /* Fallthrough... */ 2790 2791 case SIOCGMIIREG: /* Read MII PHY register. */ 2792 if (!gp->running) 2793 rc = -EAGAIN; 2794 else { 2795 data->val_out = __phy_read(gp, data->phy_id & 0x1f, 2796 data->reg_num & 0x1f); 2797 rc = 0; 2798 } 2799 break; 2800 2801 case SIOCSMIIREG: /* Write MII PHY register. */ 2802 if (!capable(CAP_NET_ADMIN)) 2803 rc = -EPERM; 2804 else if (!gp->running) 2805 rc = -EAGAIN; 2806 else { 2807 __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, 2808 data->val_in); 2809 rc = 0; 2810 } 2811 break; 2812 }; 2813 2814 spin_lock_irqsave(&gp->lock, flags); 2815 gem_put_cell(gp); 2816 spin_unlock_irqrestore(&gp->lock, flags); 2817 2818 mutex_unlock(&gp->pm_mutex); 2819 2820 return rc; 2821} 2822 2823#if (!defined(__sparc__) && !defined(CONFIG_PPC_PMAC)) 2824/* Fetch MAC address from vital product data of PCI ROM. */ 2825static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) 2826{ 2827 int this_offset; 2828 2829 for (this_offset = 0x20; this_offset < len; this_offset++) { 2830 void __iomem *p = rom_base + this_offset; 2831 int i; 2832 2833 if (readb(p + 0) != 0x90 || 2834 readb(p + 1) != 0x00 || 2835 readb(p + 2) != 0x09 || 2836 readb(p + 3) != 0x4e || 2837 readb(p + 4) != 0x41 || 2838 readb(p + 5) != 0x06) 2839 continue; 2840 2841 this_offset += 6; 2842 p += 6; 2843 2844 for (i = 0; i < 6; i++) 2845 dev_addr[i] = readb(p + i); 2846 return 1; 2847 } 2848 return 0; 2849} 2850 2851static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) 2852{ 2853 size_t size; 2854 void __iomem *p = pci_map_rom(pdev, &size); 2855 2856 if (p) { 2857 int found; 2858 2859 found = readb(p) == 0x55 && 2860 readb(p + 1) == 0xaa && 2861 find_eth_addr_in_vpd(p, (64 * 1024), dev_addr); 2862 pci_unmap_rom(pdev, p); 2863 if (found) 2864 return; 2865 } 2866 2867 /* Sun MAC prefix then 3 random bytes. */ 2868 dev_addr[0] = 0x08; 2869 dev_addr[1] = 0x00; 2870 dev_addr[2] = 0x20; 2871 get_random_bytes(dev_addr + 3, 3); 2872 return; 2873} 2874#endif /* not Sparc and not PPC */ 2875 2876static int __devinit gem_get_device_address(struct gem *gp) 2877{ 2878#if defined(__sparc__) || defined(CONFIG_PPC_PMAC) 2879 struct net_device *dev = gp->dev; 2880#endif 2881 2882#if defined(__sparc__) 2883 struct pci_dev *pdev = gp->pdev; 2884 struct pcidev_cookie *pcp = pdev->sysdata; 2885 int use_idprom = 1; 2886 2887 if (pcp != NULL) { 2888 unsigned char *addr; 2889 int len; 2890 2891 addr = of_get_property(pcp->prom_node, "local-mac-address", 2892 &len); 2893 if (addr && len == 6) { 2894 use_idprom = 0; 2895 memcpy(dev->dev_addr, addr, 6); 2896 } 2897 } 2898 if (use_idprom) 2899 memcpy(dev->dev_addr, idprom->id_ethaddr, 6); 2900#elif defined(CONFIG_PPC_PMAC) 2901 const unsigned char *addr; 2902 2903 addr = get_property(gp->of_node, "local-mac-address", NULL); 2904 if (addr == NULL) { 2905 printk("\n"); 2906 printk(KERN_ERR "%s: can't get mac-address\n", dev->name); 2907 return -1; 2908 } 2909 memcpy(dev->dev_addr, addr, 6); 2910#else 2911 get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); 2912#endif 2913 return 0; 2914} 2915 2916static void gem_remove_one(struct pci_dev *pdev) 2917{ 2918 struct net_device *dev = pci_get_drvdata(pdev); 2919 2920 if (dev) { 2921 struct gem *gp = dev->priv; 2922 2923 unregister_netdev(dev); 2924 2925 /* Stop the link timer */ 2926 del_timer_sync(&gp->link_timer); 2927 2928 /* We shouldn't need any locking here */ 2929 gem_get_cell(gp); 2930 2931 /* Wait for a pending reset task to complete */ 2932 while (gp->reset_task_pending) 2933 yield(); 2934 flush_scheduled_work(); 2935 2936 /* Shut the PHY down */ 2937 gem_stop_phy(gp, 0); 2938 2939 gem_put_cell(gp); 2940 2941 /* Make sure bus master is disabled */ 2942 pci_disable_device(gp->pdev); 2943 2944 /* Free resources */ 2945 pci_free_consistent(pdev, 2946 sizeof(struct gem_init_block), 2947 gp->init_block, 2948 gp->gblock_dvma); 2949 iounmap(gp->regs); 2950 pci_release_regions(pdev); 2951 free_netdev(dev); 2952 2953 pci_set_drvdata(pdev, NULL); 2954 } 2955} 2956 2957static int __devinit gem_init_one(struct pci_dev *pdev, 2958 const struct pci_device_id *ent) 2959{ 2960 static int gem_version_printed = 0; 2961 unsigned long gemreg_base, gemreg_len; 2962 struct net_device *dev; 2963 struct gem *gp; 2964 int i, err, pci_using_dac; 2965 2966 if (gem_version_printed++ == 0) 2967 printk(KERN_INFO "%s", version); 2968 2969 /* Apple gmac note: during probe, the chip is powered up by 2970 * the arch code to allow the code below to work (and to let 2971 * the chip be probed on the config space. It won't stay powered 2972 * up until the interface is brought up however, so we can't rely 2973 * on register configuration done at this point. 2974 */ 2975 err = pci_enable_device(pdev); 2976 if (err) { 2977 printk(KERN_ERR PFX "Cannot enable MMIO operation, " 2978 "aborting.\n"); 2979 return err; 2980 } 2981 pci_set_master(pdev); 2982 2983 /* Configure DMA attributes. */ 2984 2985 /* All of the GEM documentation states that 64-bit DMA addressing 2986 * is fully supported and should work just fine. However the 2987 * front end for RIO based GEMs is different and only supports 2988 * 32-bit addressing. 2989 * 2990 * For now we assume the various PPC GEMs are 32-bit only as well. 2991 */ 2992 if (pdev->vendor == PCI_VENDOR_ID_SUN && 2993 pdev->device == PCI_DEVICE_ID_SUN_GEM && 2994 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 2995 pci_using_dac = 1; 2996 } else { 2997 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 2998 if (err) { 2999 printk(KERN_ERR PFX "No usable DMA configuration, " 3000 "aborting.\n"); 3001 goto err_disable_device; 3002 } 3003 pci_using_dac = 0; 3004 } 3005 3006 gemreg_base = pci_resource_start(pdev, 0); 3007 gemreg_len = pci_resource_len(pdev, 0); 3008 3009 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { 3010 printk(KERN_ERR PFX "Cannot find proper PCI device " 3011 "base address, aborting.\n"); 3012 err = -ENODEV; 3013 goto err_disable_device; 3014 } 3015 3016 dev = alloc_etherdev(sizeof(*gp)); 3017 if (!dev) { 3018 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 3019 err = -ENOMEM; 3020 goto err_disable_device; 3021 } 3022 SET_MODULE_OWNER(dev); 3023 SET_NETDEV_DEV(dev, &pdev->dev); 3024 3025 gp = dev->priv; 3026 3027 err = pci_request_regions(pdev, DRV_NAME); 3028 if (err) { 3029 printk(KERN_ERR PFX "Cannot obtain PCI resources, " 3030 "aborting.\n"); 3031 goto err_out_free_netdev; 3032 } 3033 3034 gp->pdev = pdev; 3035 dev->base_addr = (long) pdev; 3036 gp->dev = dev; 3037 3038 gp->msg_enable = DEFAULT_MSG; 3039 3040 spin_lock_init(&gp->lock); 3041 spin_lock_init(&gp->tx_lock); 3042 mutex_init(&gp->pm_mutex); 3043 3044 init_timer(&gp->link_timer); 3045 gp->link_timer.function = gem_link_timer; 3046 gp->link_timer.data = (unsigned long) gp; 3047 3048 INIT_WORK(&gp->reset_task, gem_reset_task); 3049 3050 gp->lstate = link_down; 3051 gp->timer_ticks = 0; 3052 netif_carrier_off(dev); 3053 3054 gp->regs = ioremap(gemreg_base, gemreg_len); 3055 if (gp->regs == 0UL) { 3056 printk(KERN_ERR PFX "Cannot map device registers, " 3057 "aborting.\n"); 3058 err = -EIO; 3059 goto err_out_free_res; 3060 } 3061 3062 /* On Apple, we want a reference to the Open Firmware device-tree 3063 * node. We use it for clock control. 3064 */ 3065#ifdef CONFIG_PPC_PMAC 3066 gp->of_node = pci_device_to_OF_node(pdev); 3067#endif 3068 3069 /* Only Apple version supports WOL afaik */ 3070 if (pdev->vendor == PCI_VENDOR_ID_APPLE) 3071 gp->has_wol = 1; 3072 3073 /* Make sure cell is enabled */ 3074 gem_get_cell(gp); 3075 3076 /* Make sure everything is stopped and in init state */ 3077 gem_reset(gp); 3078 3079 /* Fill up the mii_phy structure (even if we won't use it) */ 3080 gp->phy_mii.dev = dev; 3081 gp->phy_mii.mdio_read = _phy_read; 3082 gp->phy_mii.mdio_write = _phy_write; 3083#ifdef CONFIG_PPC_PMAC 3084 gp->phy_mii.platform_data = gp->of_node; 3085#endif 3086 /* By default, we start with autoneg */ 3087 gp->want_autoneg = 1; 3088 3089 /* Check fifo sizes, PHY type, etc... */ 3090 if (gem_check_invariants(gp)) { 3091 err = -ENODEV; 3092 goto err_out_iounmap; 3093 } 3094 3095 /* It is guaranteed that the returned buffer will be at least 3096 * PAGE_SIZE aligned. 3097 */ 3098 gp->init_block = (struct gem_init_block *) 3099 pci_alloc_consistent(pdev, sizeof(struct gem_init_block), 3100 &gp->gblock_dvma); 3101 if (!gp->init_block) { 3102 printk(KERN_ERR PFX "Cannot allocate init block, " 3103 "aborting.\n"); 3104 err = -ENOMEM; 3105 goto err_out_iounmap; 3106 } 3107 3108 if (gem_get_device_address(gp)) 3109 goto err_out_free_consistent; 3110 3111 dev->open = gem_open; 3112 dev->stop = gem_close; 3113 dev->hard_start_xmit = gem_start_xmit; 3114 dev->get_stats = gem_get_stats; 3115 dev->set_multicast_list = gem_set_multicast; 3116 dev->do_ioctl = gem_ioctl; 3117 dev->poll = gem_poll; 3118 dev->weight = 64; 3119 dev->ethtool_ops = &gem_ethtool_ops; 3120 dev->tx_timeout = gem_tx_timeout; 3121 dev->watchdog_timeo = 5 * HZ; 3122 dev->change_mtu = gem_change_mtu; 3123 dev->irq = pdev->irq; 3124 dev->dma = 0; 3125#ifdef CONFIG_NET_POLL_CONTROLLER 3126 dev->poll_controller = gem_poll_controller; 3127#endif 3128 3129 /* Set that now, in case PM kicks in now */ 3130 pci_set_drvdata(pdev, dev); 3131 3132 /* Detect & init PHY, start autoneg, we release the cell now 3133 * too, it will be managed by whoever needs it 3134 */ 3135 gem_init_phy(gp); 3136 3137 spin_lock_irq(&gp->lock); 3138 gem_put_cell(gp); 3139 spin_unlock_irq(&gp->lock); 3140 3141 /* Register with kernel */ 3142 if (register_netdev(dev)) { 3143 printk(KERN_ERR PFX "Cannot register net device, " 3144 "aborting.\n"); 3145 err = -ENOMEM; 3146 goto err_out_free_consistent; 3147 } 3148 3149 printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet ", 3150 dev->name); 3151 for (i = 0; i < 6; i++) 3152 printk("%2.2x%c", dev->dev_addr[i], 3153 i == 5 ? ' ' : ':'); 3154 printk("\n"); 3155 3156 if (gp->phy_type == phy_mii_mdio0 || 3157 gp->phy_type == phy_mii_mdio1) 3158 printk(KERN_INFO "%s: Found %s PHY\n", dev->name, 3159 gp->phy_mii.def ? gp->phy_mii.def->name : "no"); 3160 3161 /* GEM can do it all... */ 3162 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; 3163 if (pci_using_dac) 3164 dev->features |= NETIF_F_HIGHDMA; 3165 3166 return 0; 3167 3168err_out_free_consistent: 3169 gem_remove_one(pdev); 3170err_out_iounmap: 3171 gem_put_cell(gp); 3172 iounmap(gp->regs); 3173 3174err_out_free_res: 3175 pci_release_regions(pdev); 3176 3177err_out_free_netdev: 3178 free_netdev(dev); 3179err_disable_device: 3180 pci_disable_device(pdev); 3181 return err; 3182 3183} 3184 3185 3186static struct pci_driver gem_driver = { 3187 .name = GEM_MODULE_NAME, 3188 .id_table = gem_pci_tbl, 3189 .probe = gem_init_one, 3190 .remove = gem_remove_one, 3191#ifdef CONFIG_PM 3192 .suspend = gem_suspend, 3193 .resume = gem_resume, 3194#endif /* CONFIG_PM */ 3195}; 3196 3197static int __init gem_init(void) 3198{ 3199 return pci_register_driver(&gem_driver); 3200} 3201 3202static void __exit gem_cleanup(void) 3203{ 3204 pci_unregister_driver(&gem_driver); 3205} 3206 3207module_init(gem_init); 3208module_exit(gem_cleanup);