Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.13 3207 lines 81 kB view raw
1/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ 2 * sungem.c: Sun GEM ethernet driver. 3 * 4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com) 5 * 6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management 7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org) 8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. 9 * 10 * NAPI and NETPOLL support 11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) 12 * 13 * TODO: 14 * - Now that the driver was significantly simplified, I need to rework 15 * the locking. I'm sure we don't need _2_ spinlocks, and we probably 16 * can avoid taking most of them for so long period of time (and schedule 17 * instead). The main issues at this point are caused by the netdev layer 18 * though: 19 * 20 * gem_change_mtu() and gem_set_multicast() are called with a read_lock() 21 * help by net/core/dev.c, thus they can't schedule. That means they can't 22 * call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock 23 * where it could have been dropped. change_mtu especially would love also to 24 * be able to msleep instead of horrid locked delays when resetting the HW, 25 * but that read_lock() makes it impossible, unless I defer it's action to 26 * the reset task, which means it'll be asynchronous (won't take effect until 27 * the system schedules a bit). 28 * 29 * Also, it would probably be possible to also remove most of the long-life 30 * locking in open/resume code path (gem_reinit_chip) by beeing more careful 31 * about when we can start taking interrupts or get xmit() called... 32 */ 33 34#include <linux/module.h> 35#include <linux/kernel.h> 36#include <linux/types.h> 37#include <linux/fcntl.h> 38#include <linux/interrupt.h> 39#include <linux/ioport.h> 40#include <linux/in.h> 41#include <linux/slab.h> 42#include <linux/string.h> 43#include <linux/delay.h> 44#include <linux/init.h> 45#include <linux/errno.h> 46#include <linux/pci.h> 47#include <linux/dma-mapping.h> 48#include <linux/netdevice.h> 49#include <linux/etherdevice.h> 50#include <linux/skbuff.h> 51#include <linux/mii.h> 52#include <linux/ethtool.h> 53#include <linux/crc32.h> 54#include <linux/random.h> 55#include <linux/workqueue.h> 56#include <linux/if_vlan.h> 57#include <linux/bitops.h> 58 59#include <asm/system.h> 60#include <asm/io.h> 61#include <asm/byteorder.h> 62#include <asm/uaccess.h> 63#include <asm/irq.h> 64 65#ifdef __sparc__ 66#include <asm/idprom.h> 67#include <asm/openprom.h> 68#include <asm/oplib.h> 69#include <asm/pbm.h> 70#endif 71 72#ifdef CONFIG_PPC_PMAC 73#include <asm/pci-bridge.h> 74#include <asm/prom.h> 75#include <asm/machdep.h> 76#include <asm/pmac_feature.h> 77#endif 78 79#include "sungem_phy.h" 80#include "sungem.h" 81 82/* Stripping FCS is causing problems, disabled for now */ 83#undef STRIP_FCS 84 85#define DEFAULT_MSG (NETIF_MSG_DRV | \ 86 NETIF_MSG_PROBE | \ 87 NETIF_MSG_LINK) 88 89#define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ 90 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ 91 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full) 92 93#define DRV_NAME "sungem" 94#define DRV_VERSION "0.98" 95#define DRV_RELDATE "8/24/03" 96#define DRV_AUTHOR "David S. Miller (davem@redhat.com)" 97 98static char version[] __devinitdata = 99 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; 100 101MODULE_AUTHOR(DRV_AUTHOR); 102MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); 103MODULE_LICENSE("GPL"); 104 105#define GEM_MODULE_NAME "gem" 106#define PFX GEM_MODULE_NAME ": " 107 108static struct pci_device_id gem_pci_tbl[] = { 109 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, 110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 111 112 /* These models only differ from the original GEM in 113 * that their tx/rx fifos are of a different size and 114 * they only support 10/100 speeds. -DaveM 115 * 116 * Apple's GMAC does support gigabit on machines with 117 * the BCM54xx PHYs. -BenH 118 */ 119 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM, 120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 121 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC, 122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 123 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP, 124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 125 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2, 126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 127 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC, 128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 129 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, 130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 131 {0, } 132}; 133 134MODULE_DEVICE_TABLE(pci, gem_pci_tbl); 135 136static u16 __phy_read(struct gem *gp, int phy_addr, int reg) 137{ 138 u32 cmd; 139 int limit = 10000; 140 141 cmd = (1 << 30); 142 cmd |= (2 << 28); 143 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; 144 cmd |= (reg << 18) & MIF_FRAME_REGAD; 145 cmd |= (MIF_FRAME_TAMSB); 146 writel(cmd, gp->regs + MIF_FRAME); 147 148 while (limit--) { 149 cmd = readl(gp->regs + MIF_FRAME); 150 if (cmd & MIF_FRAME_TALSB) 151 break; 152 153 udelay(10); 154 } 155 156 if (!limit) 157 cmd = 0xffff; 158 159 return cmd & MIF_FRAME_DATA; 160} 161 162static inline int _phy_read(struct net_device *dev, int mii_id, int reg) 163{ 164 struct gem *gp = dev->priv; 165 return __phy_read(gp, mii_id, reg); 166} 167 168static inline u16 phy_read(struct gem *gp, int reg) 169{ 170 return __phy_read(gp, gp->mii_phy_addr, reg); 171} 172 173static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) 174{ 175 u32 cmd; 176 int limit = 10000; 177 178 cmd = (1 << 30); 179 cmd |= (1 << 28); 180 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; 181 cmd |= (reg << 18) & MIF_FRAME_REGAD; 182 cmd |= (MIF_FRAME_TAMSB); 183 cmd |= (val & MIF_FRAME_DATA); 184 writel(cmd, gp->regs + MIF_FRAME); 185 186 while (limit--) { 187 cmd = readl(gp->regs + MIF_FRAME); 188 if (cmd & MIF_FRAME_TALSB) 189 break; 190 191 udelay(10); 192 } 193} 194 195static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val) 196{ 197 struct gem *gp = dev->priv; 198 __phy_write(gp, mii_id, reg, val & 0xffff); 199} 200 201static inline void phy_write(struct gem *gp, int reg, u16 val) 202{ 203 __phy_write(gp, gp->mii_phy_addr, reg, val); 204} 205 206static inline void gem_enable_ints(struct gem *gp) 207{ 208 /* Enable all interrupts but TXDONE */ 209 writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK); 210} 211 212static inline void gem_disable_ints(struct gem *gp) 213{ 214 /* Disable all interrupts, including TXDONE */ 215 writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); 216} 217 218static void gem_get_cell(struct gem *gp) 219{ 220 BUG_ON(gp->cell_enabled < 0); 221 gp->cell_enabled++; 222#ifdef CONFIG_PPC_PMAC 223 if (gp->cell_enabled == 1) { 224 mb(); 225 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1); 226 udelay(10); 227 } 228#endif /* CONFIG_PPC_PMAC */ 229} 230 231/* Turn off the chip's clock */ 232static void gem_put_cell(struct gem *gp) 233{ 234 BUG_ON(gp->cell_enabled <= 0); 235 gp->cell_enabled--; 236#ifdef CONFIG_PPC_PMAC 237 if (gp->cell_enabled == 0) { 238 mb(); 239 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0); 240 udelay(10); 241 } 242#endif /* CONFIG_PPC_PMAC */ 243} 244 245static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) 246{ 247 if (netif_msg_intr(gp)) 248 printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name); 249} 250 251static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 252{ 253 u32 pcs_istat = readl(gp->regs + PCS_ISTAT); 254 u32 pcs_miistat; 255 256 if (netif_msg_intr(gp)) 257 printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n", 258 gp->dev->name, pcs_istat); 259 260 if (!(pcs_istat & PCS_ISTAT_LSC)) { 261 printk(KERN_ERR "%s: PCS irq but no link status change???\n", 262 dev->name); 263 return 0; 264 } 265 266 /* The link status bit latches on zero, so you must 267 * read it twice in such a case to see a transition 268 * to the link being up. 269 */ 270 pcs_miistat = readl(gp->regs + PCS_MIISTAT); 271 if (!(pcs_miistat & PCS_MIISTAT_LS)) 272 pcs_miistat |= 273 (readl(gp->regs + PCS_MIISTAT) & 274 PCS_MIISTAT_LS); 275 276 if (pcs_miistat & PCS_MIISTAT_ANC) { 277 /* The remote-fault indication is only valid 278 * when autoneg has completed. 279 */ 280 if (pcs_miistat & PCS_MIISTAT_RF) 281 printk(KERN_INFO "%s: PCS AutoNEG complete, " 282 "RemoteFault\n", dev->name); 283 else 284 printk(KERN_INFO "%s: PCS AutoNEG complete.\n", 285 dev->name); 286 } 287 288 if (pcs_miistat & PCS_MIISTAT_LS) { 289 printk(KERN_INFO "%s: PCS link is now up.\n", 290 dev->name); 291 netif_carrier_on(gp->dev); 292 } else { 293 printk(KERN_INFO "%s: PCS link is now down.\n", 294 dev->name); 295 netif_carrier_off(gp->dev); 296 /* If this happens and the link timer is not running, 297 * reset so we re-negotiate. 298 */ 299 if (!timer_pending(&gp->link_timer)) 300 return 1; 301 } 302 303 return 0; 304} 305 306static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 307{ 308 u32 txmac_stat = readl(gp->regs + MAC_TXSTAT); 309 310 if (netif_msg_intr(gp)) 311 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", 312 gp->dev->name, txmac_stat); 313 314 /* Defer timer expiration is quite normal, 315 * don't even log the event. 316 */ 317 if ((txmac_stat & MAC_TXSTAT_DTE) && 318 !(txmac_stat & ~MAC_TXSTAT_DTE)) 319 return 0; 320 321 if (txmac_stat & MAC_TXSTAT_URUN) { 322 printk(KERN_ERR "%s: TX MAC xmit underrun.\n", 323 dev->name); 324 gp->net_stats.tx_fifo_errors++; 325 } 326 327 if (txmac_stat & MAC_TXSTAT_MPE) { 328 printk(KERN_ERR "%s: TX MAC max packet size error.\n", 329 dev->name); 330 gp->net_stats.tx_errors++; 331 } 332 333 /* The rest are all cases of one of the 16-bit TX 334 * counters expiring. 335 */ 336 if (txmac_stat & MAC_TXSTAT_NCE) 337 gp->net_stats.collisions += 0x10000; 338 339 if (txmac_stat & MAC_TXSTAT_ECE) { 340 gp->net_stats.tx_aborted_errors += 0x10000; 341 gp->net_stats.collisions += 0x10000; 342 } 343 344 if (txmac_stat & MAC_TXSTAT_LCE) { 345 gp->net_stats.tx_aborted_errors += 0x10000; 346 gp->net_stats.collisions += 0x10000; 347 } 348 349 /* We do not keep track of MAC_TXSTAT_FCE and 350 * MAC_TXSTAT_PCE events. 351 */ 352 return 0; 353} 354 355/* When we get a RX fifo overflow, the RX unit in GEM is probably hung 356 * so we do the following. 357 * 358 * If any part of the reset goes wrong, we return 1 and that causes the 359 * whole chip to be reset. 360 */ 361static int gem_rxmac_reset(struct gem *gp) 362{ 363 struct net_device *dev = gp->dev; 364 int limit, i; 365 u64 desc_dma; 366 u32 val; 367 368 /* First, reset & disable MAC RX. */ 369 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); 370 for (limit = 0; limit < 5000; limit++) { 371 if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD)) 372 break; 373 udelay(10); 374 } 375 if (limit == 5000) { 376 printk(KERN_ERR "%s: RX MAC will not reset, resetting whole " 377 "chip.\n", dev->name); 378 return 1; 379 } 380 381 writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB, 382 gp->regs + MAC_RXCFG); 383 for (limit = 0; limit < 5000; limit++) { 384 if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB)) 385 break; 386 udelay(10); 387 } 388 if (limit == 5000) { 389 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " 390 "chip.\n", dev->name); 391 return 1; 392 } 393 394 /* Second, disable RX DMA. */ 395 writel(0, gp->regs + RXDMA_CFG); 396 for (limit = 0; limit < 5000; limit++) { 397 if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE)) 398 break; 399 udelay(10); 400 } 401 if (limit == 5000) { 402 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " 403 "chip.\n", dev->name); 404 return 1; 405 } 406 407 udelay(5000); 408 409 /* Execute RX reset command. */ 410 writel(gp->swrst_base | GREG_SWRST_RXRST, 411 gp->regs + GREG_SWRST); 412 for (limit = 0; limit < 5000; limit++) { 413 if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST)) 414 break; 415 udelay(10); 416 } 417 if (limit == 5000) { 418 printk(KERN_ERR "%s: RX reset command will not execute, resetting " 419 "whole chip.\n", dev->name); 420 return 1; 421 } 422 423 /* Refresh the RX ring. */ 424 for (i = 0; i < RX_RING_SIZE; i++) { 425 struct gem_rxd *rxd = &gp->init_block->rxd[i]; 426 427 if (gp->rx_skbs[i] == NULL) { 428 printk(KERN_ERR "%s: Parts of RX ring empty, resetting " 429 "whole chip.\n", dev->name); 430 return 1; 431 } 432 433 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 434 } 435 gp->rx_new = gp->rx_old = 0; 436 437 /* Now we must reprogram the rest of RX unit. */ 438 desc_dma = (u64) gp->gblock_dvma; 439 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); 440 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 441 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 442 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 443 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 444 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 445 writel(val, gp->regs + RXDMA_CFG); 446 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 447 writel(((5 & RXDMA_BLANK_IPKTS) | 448 ((8 << 12) & RXDMA_BLANK_ITIME)), 449 gp->regs + RXDMA_BLANK); 450 else 451 writel(((5 & RXDMA_BLANK_IPKTS) | 452 ((4 << 12) & RXDMA_BLANK_ITIME)), 453 gp->regs + RXDMA_BLANK); 454 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); 455 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); 456 writel(val, gp->regs + RXDMA_PTHRESH); 457 val = readl(gp->regs + RXDMA_CFG); 458 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 459 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); 460 val = readl(gp->regs + MAC_RXCFG); 461 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 462 463 return 0; 464} 465 466static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 467{ 468 u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT); 469 int ret = 0; 470 471 if (netif_msg_intr(gp)) 472 printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n", 473 gp->dev->name, rxmac_stat); 474 475 if (rxmac_stat & MAC_RXSTAT_OFLW) { 476 u32 smac = readl(gp->regs + MAC_SMACHINE); 477 478 printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n", 479 dev->name, smac); 480 gp->net_stats.rx_over_errors++; 481 gp->net_stats.rx_fifo_errors++; 482 483 ret = gem_rxmac_reset(gp); 484 } 485 486 if (rxmac_stat & MAC_RXSTAT_ACE) 487 gp->net_stats.rx_frame_errors += 0x10000; 488 489 if (rxmac_stat & MAC_RXSTAT_CCE) 490 gp->net_stats.rx_crc_errors += 0x10000; 491 492 if (rxmac_stat & MAC_RXSTAT_LCE) 493 gp->net_stats.rx_length_errors += 0x10000; 494 495 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE 496 * events. 497 */ 498 return ret; 499} 500 501static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 502{ 503 u32 mac_cstat = readl(gp->regs + MAC_CSTAT); 504 505 if (netif_msg_intr(gp)) 506 printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n", 507 gp->dev->name, mac_cstat); 508 509 /* This interrupt is just for pause frame and pause 510 * tracking. It is useful for diagnostics and debug 511 * but probably by default we will mask these events. 512 */ 513 if (mac_cstat & MAC_CSTAT_PS) 514 gp->pause_entered++; 515 516 if (mac_cstat & MAC_CSTAT_PRCV) 517 gp->pause_last_time_recvd = (mac_cstat >> 16); 518 519 return 0; 520} 521 522static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 523{ 524 u32 mif_status = readl(gp->regs + MIF_STATUS); 525 u32 reg_val, changed_bits; 526 527 reg_val = (mif_status & MIF_STATUS_DATA) >> 16; 528 changed_bits = (mif_status & MIF_STATUS_STAT); 529 530 gem_handle_mif_event(gp, reg_val, changed_bits); 531 532 return 0; 533} 534 535static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 536{ 537 u32 pci_estat = readl(gp->regs + GREG_PCIESTAT); 538 539 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 540 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 541 printk(KERN_ERR "%s: PCI error [%04x] ", 542 dev->name, pci_estat); 543 544 if (pci_estat & GREG_PCIESTAT_BADACK) 545 printk("<No ACK64# during ABS64 cycle> "); 546 if (pci_estat & GREG_PCIESTAT_DTRTO) 547 printk("<Delayed transaction timeout> "); 548 if (pci_estat & GREG_PCIESTAT_OTHER) 549 printk("<other>"); 550 printk("\n"); 551 } else { 552 pci_estat |= GREG_PCIESTAT_OTHER; 553 printk(KERN_ERR "%s: PCI error\n", dev->name); 554 } 555 556 if (pci_estat & GREG_PCIESTAT_OTHER) { 557 u16 pci_cfg_stat; 558 559 /* Interrogate PCI config space for the 560 * true cause. 561 */ 562 pci_read_config_word(gp->pdev, PCI_STATUS, 563 &pci_cfg_stat); 564 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", 565 dev->name, pci_cfg_stat); 566 if (pci_cfg_stat & PCI_STATUS_PARITY) 567 printk(KERN_ERR "%s: PCI parity error detected.\n", 568 dev->name); 569 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) 570 printk(KERN_ERR "%s: PCI target abort.\n", 571 dev->name); 572 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) 573 printk(KERN_ERR "%s: PCI master acks target abort.\n", 574 dev->name); 575 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) 576 printk(KERN_ERR "%s: PCI master abort.\n", 577 dev->name); 578 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) 579 printk(KERN_ERR "%s: PCI system error SERR#.\n", 580 dev->name); 581 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) 582 printk(KERN_ERR "%s: PCI parity error.\n", 583 dev->name); 584 585 /* Write the error bits back to clear them. */ 586 pci_cfg_stat &= (PCI_STATUS_PARITY | 587 PCI_STATUS_SIG_TARGET_ABORT | 588 PCI_STATUS_REC_TARGET_ABORT | 589 PCI_STATUS_REC_MASTER_ABORT | 590 PCI_STATUS_SIG_SYSTEM_ERROR | 591 PCI_STATUS_DETECTED_PARITY); 592 pci_write_config_word(gp->pdev, 593 PCI_STATUS, pci_cfg_stat); 594 } 595 596 /* For all PCI errors, we should reset the chip. */ 597 return 1; 598} 599 600/* All non-normal interrupt conditions get serviced here. 601 * Returns non-zero if we should just exit the interrupt 602 * handler right now (ie. if we reset the card which invalidates 603 * all of the other original irq status bits). 604 */ 605static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status) 606{ 607 if (gem_status & GREG_STAT_RXNOBUF) { 608 /* Frame arrived, no free RX buffers available. */ 609 if (netif_msg_rx_err(gp)) 610 printk(KERN_DEBUG "%s: no buffer for rx frame\n", 611 gp->dev->name); 612 gp->net_stats.rx_dropped++; 613 } 614 615 if (gem_status & GREG_STAT_RXTAGERR) { 616 /* corrupt RX tag framing */ 617 if (netif_msg_rx_err(gp)) 618 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 619 gp->dev->name); 620 gp->net_stats.rx_errors++; 621 622 goto do_reset; 623 } 624 625 if (gem_status & GREG_STAT_PCS) { 626 if (gem_pcs_interrupt(dev, gp, gem_status)) 627 goto do_reset; 628 } 629 630 if (gem_status & GREG_STAT_TXMAC) { 631 if (gem_txmac_interrupt(dev, gp, gem_status)) 632 goto do_reset; 633 } 634 635 if (gem_status & GREG_STAT_RXMAC) { 636 if (gem_rxmac_interrupt(dev, gp, gem_status)) 637 goto do_reset; 638 } 639 640 if (gem_status & GREG_STAT_MAC) { 641 if (gem_mac_interrupt(dev, gp, gem_status)) 642 goto do_reset; 643 } 644 645 if (gem_status & GREG_STAT_MIF) { 646 if (gem_mif_interrupt(dev, gp, gem_status)) 647 goto do_reset; 648 } 649 650 if (gem_status & GREG_STAT_PCIERR) { 651 if (gem_pci_interrupt(dev, gp, gem_status)) 652 goto do_reset; 653 } 654 655 return 0; 656 657do_reset: 658 gp->reset_task_pending = 1; 659 schedule_work(&gp->reset_task); 660 661 return 1; 662} 663 664static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) 665{ 666 int entry, limit; 667 668 if (netif_msg_intr(gp)) 669 printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n", 670 gp->dev->name, gem_status); 671 672 entry = gp->tx_old; 673 limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); 674 while (entry != limit) { 675 struct sk_buff *skb; 676 struct gem_txd *txd; 677 dma_addr_t dma_addr; 678 u32 dma_len; 679 int frag; 680 681 if (netif_msg_tx_done(gp)) 682 printk(KERN_DEBUG "%s: tx done, slot %d\n", 683 gp->dev->name, entry); 684 skb = gp->tx_skbs[entry]; 685 if (skb_shinfo(skb)->nr_frags) { 686 int last = entry + skb_shinfo(skb)->nr_frags; 687 int walk = entry; 688 int incomplete = 0; 689 690 last &= (TX_RING_SIZE - 1); 691 for (;;) { 692 walk = NEXT_TX(walk); 693 if (walk == limit) 694 incomplete = 1; 695 if (walk == last) 696 break; 697 } 698 if (incomplete) 699 break; 700 } 701 gp->tx_skbs[entry] = NULL; 702 gp->net_stats.tx_bytes += skb->len; 703 704 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 705 txd = &gp->init_block->txd[entry]; 706 707 dma_addr = le64_to_cpu(txd->buffer); 708 dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; 709 710 pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); 711 entry = NEXT_TX(entry); 712 } 713 714 gp->net_stats.tx_packets++; 715 dev_kfree_skb_irq(skb); 716 } 717 gp->tx_old = entry; 718 719 if (netif_queue_stopped(dev) && 720 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) 721 netif_wake_queue(dev); 722} 723 724static __inline__ void gem_post_rxds(struct gem *gp, int limit) 725{ 726 int cluster_start, curr, count, kick; 727 728 cluster_start = curr = (gp->rx_new & ~(4 - 1)); 729 count = 0; 730 kick = -1; 731 wmb(); 732 while (curr != limit) { 733 curr = NEXT_RX(curr); 734 if (++count == 4) { 735 struct gem_rxd *rxd = 736 &gp->init_block->rxd[cluster_start]; 737 for (;;) { 738 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 739 rxd++; 740 cluster_start = NEXT_RX(cluster_start); 741 if (cluster_start == curr) 742 break; 743 } 744 kick = curr; 745 count = 0; 746 } 747 } 748 if (kick >= 0) { 749 mb(); 750 writel(kick, gp->regs + RXDMA_KICK); 751 } 752} 753 754static int gem_rx(struct gem *gp, int work_to_do) 755{ 756 int entry, drops, work_done = 0; 757 u32 done; 758 759 if (netif_msg_rx_status(gp)) 760 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", 761 gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); 762 763 entry = gp->rx_new; 764 drops = 0; 765 done = readl(gp->regs + RXDMA_DONE); 766 for (;;) { 767 struct gem_rxd *rxd = &gp->init_block->rxd[entry]; 768 struct sk_buff *skb; 769 u64 status = cpu_to_le64(rxd->status_word); 770 dma_addr_t dma_addr; 771 int len; 772 773 if ((status & RXDCTRL_OWN) != 0) 774 break; 775 776 if (work_done >= RX_RING_SIZE || work_done >= work_to_do) 777 break; 778 779 /* When writing back RX descriptor, GEM writes status 780 * then buffer address, possibly in seperate transactions. 781 * If we don't wait for the chip to write both, we could 782 * post a new buffer to this descriptor then have GEM spam 783 * on the buffer address. We sync on the RX completion 784 * register to prevent this from happening. 785 */ 786 if (entry == done) { 787 done = readl(gp->regs + RXDMA_DONE); 788 if (entry == done) 789 break; 790 } 791 792 /* We can now account for the work we're about to do */ 793 work_done++; 794 795 skb = gp->rx_skbs[entry]; 796 797 len = (status & RXDCTRL_BUFSZ) >> 16; 798 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { 799 gp->net_stats.rx_errors++; 800 if (len < ETH_ZLEN) 801 gp->net_stats.rx_length_errors++; 802 if (len & RXDCTRL_BAD) 803 gp->net_stats.rx_crc_errors++; 804 805 /* We'll just return it to GEM. */ 806 drop_it: 807 gp->net_stats.rx_dropped++; 808 goto next; 809 } 810 811 dma_addr = cpu_to_le64(rxd->buffer); 812 if (len > RX_COPY_THRESHOLD) { 813 struct sk_buff *new_skb; 814 815 new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 816 if (new_skb == NULL) { 817 drops++; 818 goto drop_it; 819 } 820 pci_unmap_page(gp->pdev, dma_addr, 821 RX_BUF_ALLOC_SIZE(gp), 822 PCI_DMA_FROMDEVICE); 823 gp->rx_skbs[entry] = new_skb; 824 new_skb->dev = gp->dev; 825 skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); 826 rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, 827 virt_to_page(new_skb->data), 828 offset_in_page(new_skb->data), 829 RX_BUF_ALLOC_SIZE(gp), 830 PCI_DMA_FROMDEVICE)); 831 skb_reserve(new_skb, RX_OFFSET); 832 833 /* Trim the original skb for the netif. */ 834 skb_trim(skb, len); 835 } else { 836 struct sk_buff *copy_skb = dev_alloc_skb(len + 2); 837 838 if (copy_skb == NULL) { 839 drops++; 840 goto drop_it; 841 } 842 843 copy_skb->dev = gp->dev; 844 skb_reserve(copy_skb, 2); 845 skb_put(copy_skb, len); 846 pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 847 memcpy(copy_skb->data, skb->data, len); 848 pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 849 850 /* We'll reuse the original ring buffer. */ 851 skb = copy_skb; 852 } 853 854 skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff); 855 skb->ip_summed = CHECKSUM_HW; 856 skb->protocol = eth_type_trans(skb, gp->dev); 857 858 netif_receive_skb(skb); 859 860 gp->net_stats.rx_packets++; 861 gp->net_stats.rx_bytes += len; 862 gp->dev->last_rx = jiffies; 863 864 next: 865 entry = NEXT_RX(entry); 866 } 867 868 gem_post_rxds(gp, entry); 869 870 gp->rx_new = entry; 871 872 if (drops) 873 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", 874 gp->dev->name); 875 876 return work_done; 877} 878 879static int gem_poll(struct net_device *dev, int *budget) 880{ 881 struct gem *gp = dev->priv; 882 unsigned long flags; 883 884 /* 885 * NAPI locking nightmare: See comment at head of driver 886 */ 887 spin_lock_irqsave(&gp->lock, flags); 888 889 do { 890 int work_to_do, work_done; 891 892 /* Handle anomalies */ 893 if (gp->status & GREG_STAT_ABNORMAL) { 894 if (gem_abnormal_irq(dev, gp, gp->status)) 895 break; 896 } 897 898 /* Run TX completion thread */ 899 spin_lock(&gp->tx_lock); 900 gem_tx(dev, gp, gp->status); 901 spin_unlock(&gp->tx_lock); 902 903 spin_unlock_irqrestore(&gp->lock, flags); 904 905 /* Run RX thread. We don't use any locking here, 906 * code willing to do bad things - like cleaning the 907 * rx ring - must call netif_poll_disable(), which 908 * schedule_timeout()'s if polling is already disabled. 909 */ 910 work_to_do = min(*budget, dev->quota); 911 912 work_done = gem_rx(gp, work_to_do); 913 914 *budget -= work_done; 915 dev->quota -= work_done; 916 917 if (work_done >= work_to_do) 918 return 1; 919 920 spin_lock_irqsave(&gp->lock, flags); 921 922 gp->status = readl(gp->regs + GREG_STAT); 923 } while (gp->status & GREG_STAT_NAPI); 924 925 __netif_rx_complete(dev); 926 gem_enable_ints(gp); 927 928 spin_unlock_irqrestore(&gp->lock, flags); 929 return 0; 930} 931 932static irqreturn_t gem_interrupt(int irq, void *dev_id, struct pt_regs *regs) 933{ 934 struct net_device *dev = dev_id; 935 struct gem *gp = dev->priv; 936 unsigned long flags; 937 938 /* Swallow interrupts when shutting the chip down, though 939 * that shouldn't happen, we should have done free_irq() at 940 * this point... 941 */ 942 if (!gp->running) 943 return IRQ_HANDLED; 944 945 spin_lock_irqsave(&gp->lock, flags); 946 947 if (netif_rx_schedule_prep(dev)) { 948 u32 gem_status = readl(gp->regs + GREG_STAT); 949 950 if (gem_status == 0) { 951 spin_unlock_irqrestore(&gp->lock, flags); 952 return IRQ_NONE; 953 } 954 gp->status = gem_status; 955 gem_disable_ints(gp); 956 __netif_rx_schedule(dev); 957 } 958 959 spin_unlock_irqrestore(&gp->lock, flags); 960 961 /* If polling was disabled at the time we received that 962 * interrupt, we may return IRQ_HANDLED here while we 963 * should return IRQ_NONE. No big deal... 964 */ 965 return IRQ_HANDLED; 966} 967 968#ifdef CONFIG_NET_POLL_CONTROLLER 969static void gem_poll_controller(struct net_device *dev) 970{ 971 /* gem_interrupt is safe to reentrance so no need 972 * to disable_irq here. 973 */ 974 gem_interrupt(dev->irq, dev, NULL); 975} 976#endif 977 978static void gem_tx_timeout(struct net_device *dev) 979{ 980 struct gem *gp = dev->priv; 981 982 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 983 if (!gp->running) { 984 printk("%s: hrm.. hw not running !\n", dev->name); 985 return; 986 } 987 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n", 988 dev->name, 989 readl(gp->regs + TXDMA_CFG), 990 readl(gp->regs + MAC_TXSTAT), 991 readl(gp->regs + MAC_TXCFG)); 992 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", 993 dev->name, 994 readl(gp->regs + RXDMA_CFG), 995 readl(gp->regs + MAC_RXSTAT), 996 readl(gp->regs + MAC_RXCFG)); 997 998 spin_lock_irq(&gp->lock); 999 spin_lock(&gp->tx_lock); 1000 1001 gp->reset_task_pending = 1; 1002 schedule_work(&gp->reset_task); 1003 1004 spin_unlock(&gp->tx_lock); 1005 spin_unlock_irq(&gp->lock); 1006} 1007 1008static __inline__ int gem_intme(int entry) 1009{ 1010 /* Algorithm: IRQ every 1/2 of descriptors. */ 1011 if (!(entry & ((TX_RING_SIZE>>1)-1))) 1012 return 1; 1013 1014 return 0; 1015} 1016 1017static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) 1018{ 1019 struct gem *gp = dev->priv; 1020 int entry; 1021 u64 ctrl; 1022 unsigned long flags; 1023 1024 ctrl = 0; 1025 if (skb->ip_summed == CHECKSUM_HW) { 1026 u64 csum_start_off, csum_stuff_off; 1027 1028 csum_start_off = (u64) (skb->h.raw - skb->data); 1029 csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data); 1030 1031 ctrl = (TXDCTRL_CENAB | 1032 (csum_start_off << 15) | 1033 (csum_stuff_off << 21)); 1034 } 1035 1036 local_irq_save(flags); 1037 if (!spin_trylock(&gp->tx_lock)) { 1038 /* Tell upper layer to requeue */ 1039 local_irq_restore(flags); 1040 return NETDEV_TX_LOCKED; 1041 } 1042 /* We raced with gem_do_stop() */ 1043 if (!gp->running) { 1044 spin_unlock_irqrestore(&gp->tx_lock, flags); 1045 return NETDEV_TX_BUSY; 1046 } 1047 1048 /* This is a hard error, log it. */ 1049 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { 1050 netif_stop_queue(dev); 1051 spin_unlock_irqrestore(&gp->tx_lock, flags); 1052 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", 1053 dev->name); 1054 return NETDEV_TX_BUSY; 1055 } 1056 1057 entry = gp->tx_new; 1058 gp->tx_skbs[entry] = skb; 1059 1060 if (skb_shinfo(skb)->nr_frags == 0) { 1061 struct gem_txd *txd = &gp->init_block->txd[entry]; 1062 dma_addr_t mapping; 1063 u32 len; 1064 1065 len = skb->len; 1066 mapping = pci_map_page(gp->pdev, 1067 virt_to_page(skb->data), 1068 offset_in_page(skb->data), 1069 len, PCI_DMA_TODEVICE); 1070 ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; 1071 if (gem_intme(entry)) 1072 ctrl |= TXDCTRL_INTME; 1073 txd->buffer = cpu_to_le64(mapping); 1074 wmb(); 1075 txd->control_word = cpu_to_le64(ctrl); 1076 entry = NEXT_TX(entry); 1077 } else { 1078 struct gem_txd *txd; 1079 u32 first_len; 1080 u64 intme; 1081 dma_addr_t first_mapping; 1082 int frag, first_entry = entry; 1083 1084 intme = 0; 1085 if (gem_intme(entry)) 1086 intme |= TXDCTRL_INTME; 1087 1088 /* We must give this initial chunk to the device last. 1089 * Otherwise we could race with the device. 1090 */ 1091 first_len = skb_headlen(skb); 1092 first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), 1093 offset_in_page(skb->data), 1094 first_len, PCI_DMA_TODEVICE); 1095 entry = NEXT_TX(entry); 1096 1097 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 1098 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 1099 u32 len; 1100 dma_addr_t mapping; 1101 u64 this_ctrl; 1102 1103 len = this_frag->size; 1104 mapping = pci_map_page(gp->pdev, 1105 this_frag->page, 1106 this_frag->page_offset, 1107 len, PCI_DMA_TODEVICE); 1108 this_ctrl = ctrl; 1109 if (frag == skb_shinfo(skb)->nr_frags - 1) 1110 this_ctrl |= TXDCTRL_EOF; 1111 1112 txd = &gp->init_block->txd[entry]; 1113 txd->buffer = cpu_to_le64(mapping); 1114 wmb(); 1115 txd->control_word = cpu_to_le64(this_ctrl | len); 1116 1117 if (gem_intme(entry)) 1118 intme |= TXDCTRL_INTME; 1119 1120 entry = NEXT_TX(entry); 1121 } 1122 txd = &gp->init_block->txd[first_entry]; 1123 txd->buffer = cpu_to_le64(first_mapping); 1124 wmb(); 1125 txd->control_word = 1126 cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); 1127 } 1128 1129 gp->tx_new = entry; 1130 if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1)) 1131 netif_stop_queue(dev); 1132 1133 if (netif_msg_tx_queued(gp)) 1134 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", 1135 dev->name, entry, skb->len); 1136 mb(); 1137 writel(gp->tx_new, gp->regs + TXDMA_KICK); 1138 spin_unlock_irqrestore(&gp->tx_lock, flags); 1139 1140 dev->trans_start = jiffies; 1141 1142 return NETDEV_TX_OK; 1143} 1144 1145#define STOP_TRIES 32 1146 1147/* Must be invoked under gp->lock and gp->tx_lock. */ 1148static void gem_reset(struct gem *gp) 1149{ 1150 int limit; 1151 u32 val; 1152 1153 /* Make sure we won't get any more interrupts */ 1154 writel(0xffffffff, gp->regs + GREG_IMASK); 1155 1156 /* Reset the chip */ 1157 writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST, 1158 gp->regs + GREG_SWRST); 1159 1160 limit = STOP_TRIES; 1161 1162 do { 1163 udelay(20); 1164 val = readl(gp->regs + GREG_SWRST); 1165 if (limit-- <= 0) 1166 break; 1167 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); 1168 1169 if (limit <= 0) 1170 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); 1171} 1172 1173/* Must be invoked under gp->lock and gp->tx_lock. */ 1174static void gem_start_dma(struct gem *gp) 1175{ 1176 u32 val; 1177 1178 /* We are ready to rock, turn everything on. */ 1179 val = readl(gp->regs + TXDMA_CFG); 1180 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); 1181 val = readl(gp->regs + RXDMA_CFG); 1182 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 1183 val = readl(gp->regs + MAC_TXCFG); 1184 writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); 1185 val = readl(gp->regs + MAC_RXCFG); 1186 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 1187 1188 (void) readl(gp->regs + MAC_RXCFG); 1189 udelay(100); 1190 1191 gem_enable_ints(gp); 1192 1193 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 1194} 1195 1196/* Must be invoked under gp->lock and gp->tx_lock. DMA won't be 1197 * actually stopped before about 4ms tho ... 1198 */ 1199static void gem_stop_dma(struct gem *gp) 1200{ 1201 u32 val; 1202 1203 /* We are done rocking, turn everything off. */ 1204 val = readl(gp->regs + TXDMA_CFG); 1205 writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); 1206 val = readl(gp->regs + RXDMA_CFG); 1207 writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 1208 val = readl(gp->regs + MAC_TXCFG); 1209 writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); 1210 val = readl(gp->regs + MAC_RXCFG); 1211 writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 1212 1213 (void) readl(gp->regs + MAC_RXCFG); 1214 1215 /* Need to wait a bit ... done by the caller */ 1216} 1217 1218 1219/* Must be invoked under gp->lock and gp->tx_lock. */ 1220// XXX dbl check what that function should do when called on PCS PHY 1221static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) 1222{ 1223 u32 advertise, features; 1224 int autoneg; 1225 int speed; 1226 int duplex; 1227 1228 if (gp->phy_type != phy_mii_mdio0 && 1229 gp->phy_type != phy_mii_mdio1) 1230 goto non_mii; 1231 1232 /* Setup advertise */ 1233 if (found_mii_phy(gp)) 1234 features = gp->phy_mii.def->features; 1235 else 1236 features = 0; 1237 1238 advertise = features & ADVERTISE_MASK; 1239 if (gp->phy_mii.advertising != 0) 1240 advertise &= gp->phy_mii.advertising; 1241 1242 autoneg = gp->want_autoneg; 1243 speed = gp->phy_mii.speed; 1244 duplex = gp->phy_mii.duplex; 1245 1246 /* Setup link parameters */ 1247 if (!ep) 1248 goto start_aneg; 1249 if (ep->autoneg == AUTONEG_ENABLE) { 1250 advertise = ep->advertising; 1251 autoneg = 1; 1252 } else { 1253 autoneg = 0; 1254 speed = ep->speed; 1255 duplex = ep->duplex; 1256 } 1257 1258start_aneg: 1259 /* Sanitize settings based on PHY capabilities */ 1260 if ((features & SUPPORTED_Autoneg) == 0) 1261 autoneg = 0; 1262 if (speed == SPEED_1000 && 1263 !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full))) 1264 speed = SPEED_100; 1265 if (speed == SPEED_100 && 1266 !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full))) 1267 speed = SPEED_10; 1268 if (duplex == DUPLEX_FULL && 1269 !(features & (SUPPORTED_1000baseT_Full | 1270 SUPPORTED_100baseT_Full | 1271 SUPPORTED_10baseT_Full))) 1272 duplex = DUPLEX_HALF; 1273 if (speed == 0) 1274 speed = SPEED_10; 1275 1276 /* If we are asleep, we don't try to actually setup the PHY, we 1277 * just store the settings 1278 */ 1279 if (gp->asleep) { 1280 gp->phy_mii.autoneg = gp->want_autoneg = autoneg; 1281 gp->phy_mii.speed = speed; 1282 gp->phy_mii.duplex = duplex; 1283 return; 1284 } 1285 1286 /* Configure PHY & start aneg */ 1287 gp->want_autoneg = autoneg; 1288 if (autoneg) { 1289 if (found_mii_phy(gp)) 1290 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise); 1291 gp->lstate = link_aneg; 1292 } else { 1293 if (found_mii_phy(gp)) 1294 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex); 1295 gp->lstate = link_force_ok; 1296 } 1297 1298non_mii: 1299 gp->timer_ticks = 0; 1300 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 1301} 1302 1303/* A link-up condition has occurred, initialize and enable the 1304 * rest of the chip. 1305 * 1306 * Must be invoked under gp->lock and gp->tx_lock. 1307 */ 1308static int gem_set_link_modes(struct gem *gp) 1309{ 1310 u32 val; 1311 int full_duplex, speed, pause; 1312 1313 full_duplex = 0; 1314 speed = SPEED_10; 1315 pause = 0; 1316 1317 if (found_mii_phy(gp)) { 1318 if (gp->phy_mii.def->ops->read_link(&gp->phy_mii)) 1319 return 1; 1320 full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL); 1321 speed = gp->phy_mii.speed; 1322 pause = gp->phy_mii.pause; 1323 } else if (gp->phy_type == phy_serialink || 1324 gp->phy_type == phy_serdes) { 1325 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1326 1327 if (pcs_lpa & PCS_MIIADV_FD) 1328 full_duplex = 1; 1329 speed = SPEED_1000; 1330 } 1331 1332 if (netif_msg_link(gp)) 1333 printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n", 1334 gp->dev->name, speed, (full_duplex ? "full" : "half")); 1335 1336 if (!gp->running) 1337 return 0; 1338 1339 val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); 1340 if (full_duplex) { 1341 val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); 1342 } else { 1343 /* MAC_TXCFG_NBO must be zero. */ 1344 } 1345 writel(val, gp->regs + MAC_TXCFG); 1346 1347 val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); 1348 if (!full_duplex && 1349 (gp->phy_type == phy_mii_mdio0 || 1350 gp->phy_type == phy_mii_mdio1)) { 1351 val |= MAC_XIFCFG_DISE; 1352 } else if (full_duplex) { 1353 val |= MAC_XIFCFG_FLED; 1354 } 1355 1356 if (speed == SPEED_1000) 1357 val |= (MAC_XIFCFG_GMII); 1358 1359 writel(val, gp->regs + MAC_XIFCFG); 1360 1361 /* If gigabit and half-duplex, enable carrier extension 1362 * mode. Else, disable it. 1363 */ 1364 if (speed == SPEED_1000 && !full_duplex) { 1365 val = readl(gp->regs + MAC_TXCFG); 1366 writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); 1367 1368 val = readl(gp->regs + MAC_RXCFG); 1369 writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); 1370 } else { 1371 val = readl(gp->regs + MAC_TXCFG); 1372 writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); 1373 1374 val = readl(gp->regs + MAC_RXCFG); 1375 writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); 1376 } 1377 1378 if (gp->phy_type == phy_serialink || 1379 gp->phy_type == phy_serdes) { 1380 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1381 1382 if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) 1383 pause = 1; 1384 } 1385 1386 if (netif_msg_link(gp)) { 1387 if (pause) { 1388 printk(KERN_INFO "%s: Pause is enabled " 1389 "(rxfifo: %d off: %d on: %d)\n", 1390 gp->dev->name, 1391 gp->rx_fifo_sz, 1392 gp->rx_pause_off, 1393 gp->rx_pause_on); 1394 } else { 1395 printk(KERN_INFO "%s: Pause is disabled\n", 1396 gp->dev->name); 1397 } 1398 } 1399 1400 if (!full_duplex) 1401 writel(512, gp->regs + MAC_STIME); 1402 else 1403 writel(64, gp->regs + MAC_STIME); 1404 val = readl(gp->regs + MAC_MCCFG); 1405 if (pause) 1406 val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); 1407 else 1408 val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); 1409 writel(val, gp->regs + MAC_MCCFG); 1410 1411 gem_start_dma(gp); 1412 1413 return 0; 1414} 1415 1416/* Must be invoked under gp->lock and gp->tx_lock. */ 1417static int gem_mdio_link_not_up(struct gem *gp) 1418{ 1419 switch (gp->lstate) { 1420 case link_force_ret: 1421 if (netif_msg_link(gp)) 1422 printk(KERN_INFO "%s: Autoneg failed again, keeping" 1423 " forced mode\n", gp->dev->name); 1424 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, 1425 gp->last_forced_speed, DUPLEX_HALF); 1426 gp->timer_ticks = 5; 1427 gp->lstate = link_force_ok; 1428 return 0; 1429 case link_aneg: 1430 /* We try forced modes after a failed aneg only on PHYs that don't 1431 * have "magic_aneg" bit set, which means they internally do the 1432 * while forced-mode thingy. On these, we just restart aneg 1433 */ 1434 if (gp->phy_mii.def->magic_aneg) 1435 return 1; 1436 if (netif_msg_link(gp)) 1437 printk(KERN_INFO "%s: switching to forced 100bt\n", 1438 gp->dev->name); 1439 /* Try forced modes. */ 1440 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, 1441 DUPLEX_HALF); 1442 gp->timer_ticks = 5; 1443 gp->lstate = link_force_try; 1444 return 0; 1445 case link_force_try: 1446 /* Downgrade from 100 to 10 Mbps if necessary. 1447 * If already at 10Mbps, warn user about the 1448 * situation every 10 ticks. 1449 */ 1450 if (gp->phy_mii.speed == SPEED_100) { 1451 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, 1452 DUPLEX_HALF); 1453 gp->timer_ticks = 5; 1454 if (netif_msg_link(gp)) 1455 printk(KERN_INFO "%s: switching to forced 10bt\n", 1456 gp->dev->name); 1457 return 0; 1458 } else 1459 return 1; 1460 default: 1461 return 0; 1462 } 1463} 1464 1465static void gem_link_timer(unsigned long data) 1466{ 1467 struct gem *gp = (struct gem *) data; 1468 int restart_aneg = 0; 1469 1470 if (gp->asleep) 1471 return; 1472 1473 spin_lock_irq(&gp->lock); 1474 spin_lock(&gp->tx_lock); 1475 gem_get_cell(gp); 1476 1477 /* If the reset task is still pending, we just 1478 * reschedule the link timer 1479 */ 1480 if (gp->reset_task_pending) 1481 goto restart; 1482 1483 if (gp->phy_type == phy_serialink || 1484 gp->phy_type == phy_serdes) { 1485 u32 val = readl(gp->regs + PCS_MIISTAT); 1486 1487 if (!(val & PCS_MIISTAT_LS)) 1488 val = readl(gp->regs + PCS_MIISTAT); 1489 1490 if ((val & PCS_MIISTAT_LS) != 0) { 1491 gp->lstate = link_up; 1492 netif_carrier_on(gp->dev); 1493 (void)gem_set_link_modes(gp); 1494 } 1495 goto restart; 1496 } 1497 if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) { 1498 /* Ok, here we got a link. If we had it due to a forced 1499 * fallback, and we were configured for autoneg, we do 1500 * retry a short autoneg pass. If you know your hub is 1501 * broken, use ethtool ;) 1502 */ 1503 if (gp->lstate == link_force_try && gp->want_autoneg) { 1504 gp->lstate = link_force_ret; 1505 gp->last_forced_speed = gp->phy_mii.speed; 1506 gp->timer_ticks = 5; 1507 if (netif_msg_link(gp)) 1508 printk(KERN_INFO "%s: Got link after fallback, retrying" 1509 " autoneg once...\n", gp->dev->name); 1510 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); 1511 } else if (gp->lstate != link_up) { 1512 gp->lstate = link_up; 1513 netif_carrier_on(gp->dev); 1514 if (gem_set_link_modes(gp)) 1515 restart_aneg = 1; 1516 } 1517 } else { 1518 /* If the link was previously up, we restart the 1519 * whole process 1520 */ 1521 if (gp->lstate == link_up) { 1522 gp->lstate = link_down; 1523 if (netif_msg_link(gp)) 1524 printk(KERN_INFO "%s: Link down\n", 1525 gp->dev->name); 1526 netif_carrier_off(gp->dev); 1527 gp->reset_task_pending = 1; 1528 schedule_work(&gp->reset_task); 1529 restart_aneg = 1; 1530 } else if (++gp->timer_ticks > 10) { 1531 if (found_mii_phy(gp)) 1532 restart_aneg = gem_mdio_link_not_up(gp); 1533 else 1534 restart_aneg = 1; 1535 } 1536 } 1537 if (restart_aneg) { 1538 gem_begin_auto_negotiation(gp, NULL); 1539 goto out_unlock; 1540 } 1541restart: 1542 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 1543out_unlock: 1544 gem_put_cell(gp); 1545 spin_unlock(&gp->tx_lock); 1546 spin_unlock_irq(&gp->lock); 1547} 1548 1549/* Must be invoked under gp->lock and gp->tx_lock. */ 1550static void gem_clean_rings(struct gem *gp) 1551{ 1552 struct gem_init_block *gb = gp->init_block; 1553 struct sk_buff *skb; 1554 int i; 1555 dma_addr_t dma_addr; 1556 1557 for (i = 0; i < RX_RING_SIZE; i++) { 1558 struct gem_rxd *rxd; 1559 1560 rxd = &gb->rxd[i]; 1561 if (gp->rx_skbs[i] != NULL) { 1562 skb = gp->rx_skbs[i]; 1563 dma_addr = le64_to_cpu(rxd->buffer); 1564 pci_unmap_page(gp->pdev, dma_addr, 1565 RX_BUF_ALLOC_SIZE(gp), 1566 PCI_DMA_FROMDEVICE); 1567 dev_kfree_skb_any(skb); 1568 gp->rx_skbs[i] = NULL; 1569 } 1570 rxd->status_word = 0; 1571 wmb(); 1572 rxd->buffer = 0; 1573 } 1574 1575 for (i = 0; i < TX_RING_SIZE; i++) { 1576 if (gp->tx_skbs[i] != NULL) { 1577 struct gem_txd *txd; 1578 int frag; 1579 1580 skb = gp->tx_skbs[i]; 1581 gp->tx_skbs[i] = NULL; 1582 1583 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 1584 int ent = i & (TX_RING_SIZE - 1); 1585 1586 txd = &gb->txd[ent]; 1587 dma_addr = le64_to_cpu(txd->buffer); 1588 pci_unmap_page(gp->pdev, dma_addr, 1589 le64_to_cpu(txd->control_word) & 1590 TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); 1591 1592 if (frag != skb_shinfo(skb)->nr_frags) 1593 i++; 1594 } 1595 dev_kfree_skb_any(skb); 1596 } 1597 } 1598} 1599 1600/* Must be invoked under gp->lock and gp->tx_lock. */ 1601static void gem_init_rings(struct gem *gp) 1602{ 1603 struct gem_init_block *gb = gp->init_block; 1604 struct net_device *dev = gp->dev; 1605 int i; 1606 dma_addr_t dma_addr; 1607 1608 gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; 1609 1610 gem_clean_rings(gp); 1611 1612 gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, 1613 (unsigned)VLAN_ETH_FRAME_LEN); 1614 1615 for (i = 0; i < RX_RING_SIZE; i++) { 1616 struct sk_buff *skb; 1617 struct gem_rxd *rxd = &gb->rxd[i]; 1618 1619 skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 1620 if (!skb) { 1621 rxd->buffer = 0; 1622 rxd->status_word = 0; 1623 continue; 1624 } 1625 1626 gp->rx_skbs[i] = skb; 1627 skb->dev = dev; 1628 skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); 1629 dma_addr = pci_map_page(gp->pdev, 1630 virt_to_page(skb->data), 1631 offset_in_page(skb->data), 1632 RX_BUF_ALLOC_SIZE(gp), 1633 PCI_DMA_FROMDEVICE); 1634 rxd->buffer = cpu_to_le64(dma_addr); 1635 wmb(); 1636 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 1637 skb_reserve(skb, RX_OFFSET); 1638 } 1639 1640 for (i = 0; i < TX_RING_SIZE; i++) { 1641 struct gem_txd *txd = &gb->txd[i]; 1642 1643 txd->control_word = 0; 1644 wmb(); 1645 txd->buffer = 0; 1646 } 1647 wmb(); 1648} 1649 1650/* Init PHY interface and start link poll state machine */ 1651static void gem_init_phy(struct gem *gp) 1652{ 1653 u32 mifcfg; 1654 1655 /* Revert MIF CFG setting done on stop_phy */ 1656 mifcfg = readl(gp->regs + MIF_CFG); 1657 mifcfg &= ~MIF_CFG_BBMODE; 1658 writel(mifcfg, gp->regs + MIF_CFG); 1659 1660 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { 1661 int i; 1662 1663 /* Those delay sucks, the HW seem to love them though, I'll 1664 * serisouly consider breaking some locks here to be able 1665 * to schedule instead 1666 */ 1667 for (i = 0; i < 3; i++) { 1668#ifdef CONFIG_PPC_PMAC 1669 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); 1670 msleep(20); 1671#endif 1672 /* Some PHYs used by apple have problem getting back to us, 1673 * we do an additional reset here 1674 */ 1675 phy_write(gp, MII_BMCR, BMCR_RESET); 1676 msleep(20); 1677 if (phy_read(gp, MII_BMCR) != 0xffff) 1678 break; 1679 if (i == 2) 1680 printk(KERN_WARNING "%s: GMAC PHY not responding !\n", 1681 gp->dev->name); 1682 } 1683 } 1684 1685 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 1686 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 1687 u32 val; 1688 1689 /* Init datapath mode register. */ 1690 if (gp->phy_type == phy_mii_mdio0 || 1691 gp->phy_type == phy_mii_mdio1) { 1692 val = PCS_DMODE_MGM; 1693 } else if (gp->phy_type == phy_serialink) { 1694 val = PCS_DMODE_SM | PCS_DMODE_GMOE; 1695 } else { 1696 val = PCS_DMODE_ESM; 1697 } 1698 1699 writel(val, gp->regs + PCS_DMODE); 1700 } 1701 1702 if (gp->phy_type == phy_mii_mdio0 || 1703 gp->phy_type == phy_mii_mdio1) { 1704 // XXX check for errors 1705 mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr); 1706 1707 /* Init PHY */ 1708 if (gp->phy_mii.def && gp->phy_mii.def->ops->init) 1709 gp->phy_mii.def->ops->init(&gp->phy_mii); 1710 } else { 1711 u32 val; 1712 int limit; 1713 1714 /* Reset PCS unit. */ 1715 val = readl(gp->regs + PCS_MIICTRL); 1716 val |= PCS_MIICTRL_RST; 1717 writeb(val, gp->regs + PCS_MIICTRL); 1718 1719 limit = 32; 1720 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { 1721 udelay(100); 1722 if (limit-- <= 0) 1723 break; 1724 } 1725 if (limit <= 0) 1726 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n", 1727 gp->dev->name); 1728 1729 /* Make sure PCS is disabled while changing advertisement 1730 * configuration. 1731 */ 1732 val = readl(gp->regs + PCS_CFG); 1733 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); 1734 writel(val, gp->regs + PCS_CFG); 1735 1736 /* Advertise all capabilities except assymetric 1737 * pause. 1738 */ 1739 val = readl(gp->regs + PCS_MIIADV); 1740 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | 1741 PCS_MIIADV_SP | PCS_MIIADV_AP); 1742 writel(val, gp->regs + PCS_MIIADV); 1743 1744 /* Enable and restart auto-negotiation, disable wrapback/loopback, 1745 * and re-enable PCS. 1746 */ 1747 val = readl(gp->regs + PCS_MIICTRL); 1748 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); 1749 val &= ~PCS_MIICTRL_WB; 1750 writel(val, gp->regs + PCS_MIICTRL); 1751 1752 val = readl(gp->regs + PCS_CFG); 1753 val |= PCS_CFG_ENABLE; 1754 writel(val, gp->regs + PCS_CFG); 1755 1756 /* Make sure serialink loopback is off. The meaning 1757 * of this bit is logically inverted based upon whether 1758 * you are in Serialink or SERDES mode. 1759 */ 1760 val = readl(gp->regs + PCS_SCTRL); 1761 if (gp->phy_type == phy_serialink) 1762 val &= ~PCS_SCTRL_LOOP; 1763 else 1764 val |= PCS_SCTRL_LOOP; 1765 writel(val, gp->regs + PCS_SCTRL); 1766 } 1767 1768 /* Default aneg parameters */ 1769 gp->timer_ticks = 0; 1770 gp->lstate = link_down; 1771 netif_carrier_off(gp->dev); 1772 1773 /* Can I advertise gigabit here ? I'd need BCM PHY docs... */ 1774 spin_lock_irq(&gp->lock); 1775 gem_begin_auto_negotiation(gp, NULL); 1776 spin_unlock_irq(&gp->lock); 1777} 1778 1779/* Must be invoked under gp->lock and gp->tx_lock. */ 1780static void gem_init_dma(struct gem *gp) 1781{ 1782 u64 desc_dma = (u64) gp->gblock_dvma; 1783 u32 val; 1784 1785 val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE); 1786 writel(val, gp->regs + TXDMA_CFG); 1787 1788 writel(desc_dma >> 32, gp->regs + TXDMA_DBHI); 1789 writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW); 1790 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); 1791 1792 writel(0, gp->regs + TXDMA_KICK); 1793 1794 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 1795 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 1796 writel(val, gp->regs + RXDMA_CFG); 1797 1798 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 1799 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 1800 1801 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 1802 1803 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); 1804 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); 1805 writel(val, gp->regs + RXDMA_PTHRESH); 1806 1807 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 1808 writel(((5 & RXDMA_BLANK_IPKTS) | 1809 ((8 << 12) & RXDMA_BLANK_ITIME)), 1810 gp->regs + RXDMA_BLANK); 1811 else 1812 writel(((5 & RXDMA_BLANK_IPKTS) | 1813 ((4 << 12) & RXDMA_BLANK_ITIME)), 1814 gp->regs + RXDMA_BLANK); 1815} 1816 1817/* Must be invoked under gp->lock and gp->tx_lock. */ 1818static u32 gem_setup_multicast(struct gem *gp) 1819{ 1820 u32 rxcfg = 0; 1821 int i; 1822 1823 if ((gp->dev->flags & IFF_ALLMULTI) || 1824 (gp->dev->mc_count > 256)) { 1825 for (i=0; i<16; i++) 1826 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2)); 1827 rxcfg |= MAC_RXCFG_HFE; 1828 } else if (gp->dev->flags & IFF_PROMISC) { 1829 rxcfg |= MAC_RXCFG_PROM; 1830 } else { 1831 u16 hash_table[16]; 1832 u32 crc; 1833 struct dev_mc_list *dmi = gp->dev->mc_list; 1834 int i; 1835 1836 for (i = 0; i < 16; i++) 1837 hash_table[i] = 0; 1838 1839 for (i = 0; i < gp->dev->mc_count; i++) { 1840 char *addrs = dmi->dmi_addr; 1841 1842 dmi = dmi->next; 1843 1844 if (!(*addrs & 1)) 1845 continue; 1846 1847 crc = ether_crc_le(6, addrs); 1848 crc >>= 24; 1849 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 1850 } 1851 for (i=0; i<16; i++) 1852 writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2)); 1853 rxcfg |= MAC_RXCFG_HFE; 1854 } 1855 1856 return rxcfg; 1857} 1858 1859/* Must be invoked under gp->lock and gp->tx_lock. */ 1860static void gem_init_mac(struct gem *gp) 1861{ 1862 unsigned char *e = &gp->dev->dev_addr[0]; 1863 1864 writel(0x1bf0, gp->regs + MAC_SNDPAUSE); 1865 1866 writel(0x00, gp->regs + MAC_IPG0); 1867 writel(0x08, gp->regs + MAC_IPG1); 1868 writel(0x04, gp->regs + MAC_IPG2); 1869 writel(0x40, gp->regs + MAC_STIME); 1870 writel(0x40, gp->regs + MAC_MINFSZ); 1871 1872 /* Ethernet payload + header + FCS + optional VLAN tag. */ 1873 writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ); 1874 1875 writel(0x07, gp->regs + MAC_PASIZE); 1876 writel(0x04, gp->regs + MAC_JAMSIZE); 1877 writel(0x10, gp->regs + MAC_ATTLIM); 1878 writel(0x8808, gp->regs + MAC_MCTYPE); 1879 1880 writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED); 1881 1882 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); 1883 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); 1884 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); 1885 1886 writel(0, gp->regs + MAC_ADDR3); 1887 writel(0, gp->regs + MAC_ADDR4); 1888 writel(0, gp->regs + MAC_ADDR5); 1889 1890 writel(0x0001, gp->regs + MAC_ADDR6); 1891 writel(0xc200, gp->regs + MAC_ADDR7); 1892 writel(0x0180, gp->regs + MAC_ADDR8); 1893 1894 writel(0, gp->regs + MAC_AFILT0); 1895 writel(0, gp->regs + MAC_AFILT1); 1896 writel(0, gp->regs + MAC_AFILT2); 1897 writel(0, gp->regs + MAC_AF21MSK); 1898 writel(0, gp->regs + MAC_AF0MSK); 1899 1900 gp->mac_rx_cfg = gem_setup_multicast(gp); 1901#ifdef STRIP_FCS 1902 gp->mac_rx_cfg |= MAC_RXCFG_SFCS; 1903#endif 1904 writel(0, gp->regs + MAC_NCOLL); 1905 writel(0, gp->regs + MAC_FASUCC); 1906 writel(0, gp->regs + MAC_ECOLL); 1907 writel(0, gp->regs + MAC_LCOLL); 1908 writel(0, gp->regs + MAC_DTIMER); 1909 writel(0, gp->regs + MAC_PATMPS); 1910 writel(0, gp->regs + MAC_RFCTR); 1911 writel(0, gp->regs + MAC_LERR); 1912 writel(0, gp->regs + MAC_AERR); 1913 writel(0, gp->regs + MAC_FCSERR); 1914 writel(0, gp->regs + MAC_RXCVERR); 1915 1916 /* Clear RX/TX/MAC/XIF config, we will set these up and enable 1917 * them once a link is established. 1918 */ 1919 writel(0, gp->regs + MAC_TXCFG); 1920 writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG); 1921 writel(0, gp->regs + MAC_MCCFG); 1922 writel(0, gp->regs + MAC_XIFCFG); 1923 1924 /* Setup MAC interrupts. We want to get all of the interesting 1925 * counter expiration events, but we do not want to hear about 1926 * normal rx/tx as the DMA engine tells us that. 1927 */ 1928 writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK); 1929 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); 1930 1931 /* Don't enable even the PAUSE interrupts for now, we 1932 * make no use of those events other than to record them. 1933 */ 1934 writel(0xffffffff, gp->regs + MAC_MCMASK); 1935 1936 /* Don't enable GEM's WOL in normal operations 1937 */ 1938 if (gp->has_wol) 1939 writel(0, gp->regs + WOL_WAKECSR); 1940} 1941 1942/* Must be invoked under gp->lock and gp->tx_lock. */ 1943static void gem_init_pause_thresholds(struct gem *gp) 1944{ 1945 u32 cfg; 1946 1947 /* Calculate pause thresholds. Setting the OFF threshold to the 1948 * full RX fifo size effectively disables PAUSE generation which 1949 * is what we do for 10/100 only GEMs which have FIFOs too small 1950 * to make real gains from PAUSE. 1951 */ 1952 if (gp->rx_fifo_sz <= (2 * 1024)) { 1953 gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; 1954 } else { 1955 int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; 1956 int off = (gp->rx_fifo_sz - (max_frame * 2)); 1957 int on = off - max_frame; 1958 1959 gp->rx_pause_off = off; 1960 gp->rx_pause_on = on; 1961 } 1962 1963 1964 /* Configure the chip "burst" DMA mode & enable some 1965 * HW bug fixes on Apple version 1966 */ 1967 cfg = 0; 1968 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) 1969 cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX; 1970#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) 1971 cfg |= GREG_CFG_IBURST; 1972#endif 1973 cfg |= ((31 << 1) & GREG_CFG_TXDMALIM); 1974 cfg |= ((31 << 6) & GREG_CFG_RXDMALIM); 1975 writel(cfg, gp->regs + GREG_CFG); 1976 1977 /* If Infinite Burst didn't stick, then use different 1978 * thresholds (and Apple bug fixes don't exist) 1979 */ 1980 if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) { 1981 cfg = ((2 << 1) & GREG_CFG_TXDMALIM); 1982 cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); 1983 writel(cfg, gp->regs + GREG_CFG); 1984 } 1985} 1986 1987static int gem_check_invariants(struct gem *gp) 1988{ 1989 struct pci_dev *pdev = gp->pdev; 1990 u32 mif_cfg; 1991 1992 /* On Apple's sungem, we can't rely on registers as the chip 1993 * was been powered down by the firmware. The PHY is looked 1994 * up later on. 1995 */ 1996 if (pdev->vendor == PCI_VENDOR_ID_APPLE) { 1997 gp->phy_type = phy_mii_mdio0; 1998 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; 1999 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; 2000 gp->swrst_base = 0; 2001 2002 mif_cfg = readl(gp->regs + MIF_CFG); 2003 mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); 2004 mif_cfg |= MIF_CFG_MDI0; 2005 writel(mif_cfg, gp->regs + MIF_CFG); 2006 writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE); 2007 writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG); 2008 2009 /* We hard-code the PHY address so we can properly bring it out of 2010 * reset later on, we can't really probe it at this point, though 2011 * that isn't an issue. 2012 */ 2013 if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC) 2014 gp->mii_phy_addr = 1; 2015 else 2016 gp->mii_phy_addr = 0; 2017 2018 return 0; 2019 } 2020 2021 mif_cfg = readl(gp->regs + MIF_CFG); 2022 2023 if (pdev->vendor == PCI_VENDOR_ID_SUN && 2024 pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) { 2025 /* One of the MII PHYs _must_ be present 2026 * as this chip has no gigabit PHY. 2027 */ 2028 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { 2029 printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n", 2030 mif_cfg); 2031 return -1; 2032 } 2033 } 2034 2035 /* Determine initial PHY interface type guess. MDIO1 is the 2036 * external PHY and thus takes precedence over MDIO0. 2037 */ 2038 2039 if (mif_cfg & MIF_CFG_MDI1) { 2040 gp->phy_type = phy_mii_mdio1; 2041 mif_cfg |= MIF_CFG_PSELECT; 2042 writel(mif_cfg, gp->regs + MIF_CFG); 2043 } else if (mif_cfg & MIF_CFG_MDI0) { 2044 gp->phy_type = phy_mii_mdio0; 2045 mif_cfg &= ~MIF_CFG_PSELECT; 2046 writel(mif_cfg, gp->regs + MIF_CFG); 2047 } else { 2048 gp->phy_type = phy_serialink; 2049 } 2050 if (gp->phy_type == phy_mii_mdio1 || 2051 gp->phy_type == phy_mii_mdio0) { 2052 int i; 2053 2054 for (i = 0; i < 32; i++) { 2055 gp->mii_phy_addr = i; 2056 if (phy_read(gp, MII_BMCR) != 0xffff) 2057 break; 2058 } 2059 if (i == 32) { 2060 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { 2061 printk(KERN_ERR PFX "RIO MII phy will not respond.\n"); 2062 return -1; 2063 } 2064 gp->phy_type = phy_serdes; 2065 } 2066 } 2067 2068 /* Fetch the FIFO configurations now too. */ 2069 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; 2070 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; 2071 2072 if (pdev->vendor == PCI_VENDOR_ID_SUN) { 2073 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { 2074 if (gp->tx_fifo_sz != (9 * 1024) || 2075 gp->rx_fifo_sz != (20 * 1024)) { 2076 printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2077 gp->tx_fifo_sz, gp->rx_fifo_sz); 2078 return -1; 2079 } 2080 gp->swrst_base = 0; 2081 } else { 2082 if (gp->tx_fifo_sz != (2 * 1024) || 2083 gp->rx_fifo_sz != (2 * 1024)) { 2084 printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2085 gp->tx_fifo_sz, gp->rx_fifo_sz); 2086 return -1; 2087 } 2088 gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT; 2089 } 2090 } 2091 2092 return 0; 2093} 2094 2095/* Must be invoked under gp->lock and gp->tx_lock. */ 2096static void gem_reinit_chip(struct gem *gp) 2097{ 2098 /* Reset the chip */ 2099 gem_reset(gp); 2100 2101 /* Make sure ints are disabled */ 2102 gem_disable_ints(gp); 2103 2104 /* Allocate & setup ring buffers */ 2105 gem_init_rings(gp); 2106 2107 /* Configure pause thresholds */ 2108 gem_init_pause_thresholds(gp); 2109 2110 /* Init DMA & MAC engines */ 2111 gem_init_dma(gp); 2112 gem_init_mac(gp); 2113} 2114 2115 2116/* Must be invoked with no lock held. */ 2117static void gem_stop_phy(struct gem *gp, int wol) 2118{ 2119 u32 mifcfg; 2120 unsigned long flags; 2121 2122 /* Let the chip settle down a bit, it seems that helps 2123 * for sleep mode on some models 2124 */ 2125 msleep(10); 2126 2127 /* Make sure we aren't polling PHY status change. We 2128 * don't currently use that feature though 2129 */ 2130 mifcfg = readl(gp->regs + MIF_CFG); 2131 mifcfg &= ~MIF_CFG_POLL; 2132 writel(mifcfg, gp->regs + MIF_CFG); 2133 2134 if (wol && gp->has_wol) { 2135 unsigned char *e = &gp->dev->dev_addr[0]; 2136 u32 csr; 2137 2138 /* Setup wake-on-lan for MAGIC packet */ 2139 writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, 2140 gp->regs + MAC_RXCFG); 2141 writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0); 2142 writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1); 2143 writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2); 2144 2145 writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT); 2146 csr = WOL_WAKECSR_ENABLE; 2147 if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0) 2148 csr |= WOL_WAKECSR_MII; 2149 writel(csr, gp->regs + WOL_WAKECSR); 2150 } else { 2151 writel(0, gp->regs + MAC_RXCFG); 2152 (void)readl(gp->regs + MAC_RXCFG); 2153 /* Machine sleep will die in strange ways if we 2154 * dont wait a bit here, looks like the chip takes 2155 * some time to really shut down 2156 */ 2157 msleep(10); 2158 } 2159 2160 writel(0, gp->regs + MAC_TXCFG); 2161 writel(0, gp->regs + MAC_XIFCFG); 2162 writel(0, gp->regs + TXDMA_CFG); 2163 writel(0, gp->regs + RXDMA_CFG); 2164 2165 if (!wol) { 2166 spin_lock_irqsave(&gp->lock, flags); 2167 spin_lock(&gp->tx_lock); 2168 gem_reset(gp); 2169 writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); 2170 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); 2171 spin_unlock(&gp->tx_lock); 2172 spin_unlock_irqrestore(&gp->lock, flags); 2173 2174 /* No need to take the lock here */ 2175 2176 if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) 2177 gp->phy_mii.def->ops->suspend(&gp->phy_mii); 2178 2179 /* According to Apple, we must set the MDIO pins to this begnign 2180 * state or we may 1) eat more current, 2) damage some PHYs 2181 */ 2182 writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); 2183 writel(0, gp->regs + MIF_BBCLK); 2184 writel(0, gp->regs + MIF_BBDATA); 2185 writel(0, gp->regs + MIF_BBOENAB); 2186 writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG); 2187 (void) readl(gp->regs + MAC_XIFCFG); 2188 } 2189} 2190 2191 2192static int gem_do_start(struct net_device *dev) 2193{ 2194 struct gem *gp = dev->priv; 2195 unsigned long flags; 2196 2197 spin_lock_irqsave(&gp->lock, flags); 2198 spin_lock(&gp->tx_lock); 2199 2200 /* Enable the cell */ 2201 gem_get_cell(gp); 2202 2203 /* Init & setup chip hardware */ 2204 gem_reinit_chip(gp); 2205 2206 gp->running = 1; 2207 2208 if (gp->lstate == link_up) { 2209 netif_carrier_on(gp->dev); 2210 gem_set_link_modes(gp); 2211 } 2212 2213 netif_wake_queue(gp->dev); 2214 2215 spin_unlock(&gp->tx_lock); 2216 spin_unlock_irqrestore(&gp->lock, flags); 2217 2218 if (request_irq(gp->pdev->irq, gem_interrupt, 2219 SA_SHIRQ, dev->name, (void *)dev)) { 2220 printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name); 2221 2222 spin_lock_irqsave(&gp->lock, flags); 2223 spin_lock(&gp->tx_lock); 2224 2225 gp->running = 0; 2226 gem_reset(gp); 2227 gem_clean_rings(gp); 2228 gem_put_cell(gp); 2229 2230 spin_unlock(&gp->tx_lock); 2231 spin_unlock_irqrestore(&gp->lock, flags); 2232 2233 return -EAGAIN; 2234 } 2235 2236 return 0; 2237} 2238 2239static void gem_do_stop(struct net_device *dev, int wol) 2240{ 2241 struct gem *gp = dev->priv; 2242 unsigned long flags; 2243 2244 spin_lock_irqsave(&gp->lock, flags); 2245 spin_lock(&gp->tx_lock); 2246 2247 gp->running = 0; 2248 2249 /* Stop netif queue */ 2250 netif_stop_queue(dev); 2251 2252 /* Make sure ints are disabled */ 2253 gem_disable_ints(gp); 2254 2255 /* We can drop the lock now */ 2256 spin_unlock(&gp->tx_lock); 2257 spin_unlock_irqrestore(&gp->lock, flags); 2258 2259 /* If we are going to sleep with WOL */ 2260 gem_stop_dma(gp); 2261 msleep(10); 2262 if (!wol) 2263 gem_reset(gp); 2264 msleep(10); 2265 2266 /* Get rid of rings */ 2267 gem_clean_rings(gp); 2268 2269 /* No irq needed anymore */ 2270 free_irq(gp->pdev->irq, (void *) dev); 2271 2272 /* Cell not needed neither if no WOL */ 2273 if (!wol) { 2274 spin_lock_irqsave(&gp->lock, flags); 2275 gem_put_cell(gp); 2276 spin_unlock_irqrestore(&gp->lock, flags); 2277 } 2278} 2279 2280static void gem_reset_task(void *data) 2281{ 2282 struct gem *gp = (struct gem *) data; 2283 2284 down(&gp->pm_sem); 2285 2286 netif_poll_disable(gp->dev); 2287 2288 spin_lock_irq(&gp->lock); 2289 spin_lock(&gp->tx_lock); 2290 2291 if (gp->running == 0) 2292 goto not_running; 2293 2294 if (gp->running) { 2295 netif_stop_queue(gp->dev); 2296 2297 /* Reset the chip & rings */ 2298 gem_reinit_chip(gp); 2299 if (gp->lstate == link_up) 2300 gem_set_link_modes(gp); 2301 netif_wake_queue(gp->dev); 2302 } 2303 not_running: 2304 gp->reset_task_pending = 0; 2305 2306 spin_unlock(&gp->tx_lock); 2307 spin_unlock_irq(&gp->lock); 2308 2309 netif_poll_enable(gp->dev); 2310 2311 up(&gp->pm_sem); 2312} 2313 2314 2315static int gem_open(struct net_device *dev) 2316{ 2317 struct gem *gp = dev->priv; 2318 int rc = 0; 2319 2320 down(&gp->pm_sem); 2321 2322 /* We need the cell enabled */ 2323 if (!gp->asleep) 2324 rc = gem_do_start(dev); 2325 gp->opened = (rc == 0); 2326 2327 up(&gp->pm_sem); 2328 2329 return rc; 2330} 2331 2332static int gem_close(struct net_device *dev) 2333{ 2334 struct gem *gp = dev->priv; 2335 2336 /* Note: we don't need to call netif_poll_disable() here because 2337 * our caller (dev_close) already did it for us 2338 */ 2339 2340 down(&gp->pm_sem); 2341 2342 gp->opened = 0; 2343 if (!gp->asleep) 2344 gem_do_stop(dev, 0); 2345 2346 up(&gp->pm_sem); 2347 2348 return 0; 2349} 2350 2351#ifdef CONFIG_PM 2352static int gem_suspend(struct pci_dev *pdev, pm_message_t state) 2353{ 2354 struct net_device *dev = pci_get_drvdata(pdev); 2355 struct gem *gp = dev->priv; 2356 unsigned long flags; 2357 2358 down(&gp->pm_sem); 2359 2360 netif_poll_disable(dev); 2361 2362 printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", 2363 dev->name, 2364 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled"); 2365 2366 /* Keep the cell enabled during the entire operation */ 2367 spin_lock_irqsave(&gp->lock, flags); 2368 spin_lock(&gp->tx_lock); 2369 gem_get_cell(gp); 2370 spin_unlock(&gp->tx_lock); 2371 spin_unlock_irqrestore(&gp->lock, flags); 2372 2373 /* If the driver is opened, we stop the MAC */ 2374 if (gp->opened) { 2375 /* Stop traffic, mark us closed */ 2376 netif_device_detach(dev); 2377 2378 /* Switch off MAC, remember WOL setting */ 2379 gp->asleep_wol = gp->wake_on_lan; 2380 gem_do_stop(dev, gp->asleep_wol); 2381 } else 2382 gp->asleep_wol = 0; 2383 2384 /* Mark us asleep */ 2385 gp->asleep = 1; 2386 wmb(); 2387 2388 /* Stop the link timer */ 2389 del_timer_sync(&gp->link_timer); 2390 2391 /* Now we release the semaphore to not block the reset task who 2392 * can take it too. We are marked asleep, so there will be no 2393 * conflict here 2394 */ 2395 up(&gp->pm_sem); 2396 2397 /* Wait for a pending reset task to complete */ 2398 while (gp->reset_task_pending) 2399 yield(); 2400 flush_scheduled_work(); 2401 2402 /* Shut the PHY down eventually and setup WOL */ 2403 gem_stop_phy(gp, gp->asleep_wol); 2404 2405 /* Make sure bus master is disabled */ 2406 pci_disable_device(gp->pdev); 2407 2408 /* Release the cell, no need to take a lock at this point since 2409 * nothing else can happen now 2410 */ 2411 gem_put_cell(gp); 2412 2413 return 0; 2414} 2415 2416static int gem_resume(struct pci_dev *pdev) 2417{ 2418 struct net_device *dev = pci_get_drvdata(pdev); 2419 struct gem *gp = dev->priv; 2420 unsigned long flags; 2421 2422 printk(KERN_INFO "%s: resuming\n", dev->name); 2423 2424 down(&gp->pm_sem); 2425 2426 /* Keep the cell enabled during the entire operation, no need to 2427 * take a lock here tho since nothing else can happen while we are 2428 * marked asleep 2429 */ 2430 gem_get_cell(gp); 2431 2432 /* Make sure PCI access and bus master are enabled */ 2433 if (pci_enable_device(gp->pdev)) { 2434 printk(KERN_ERR "%s: Can't re-enable chip !\n", 2435 dev->name); 2436 /* Put cell and forget it for now, it will be considered as 2437 * still asleep, a new sleep cycle may bring it back 2438 */ 2439 gem_put_cell(gp); 2440 up(&gp->pm_sem); 2441 return 0; 2442 } 2443 pci_set_master(gp->pdev); 2444 2445 /* Reset everything */ 2446 gem_reset(gp); 2447 2448 /* Mark us woken up */ 2449 gp->asleep = 0; 2450 wmb(); 2451 2452 /* Bring the PHY back. Again, lock is useless at this point as 2453 * nothing can be happening until we restart the whole thing 2454 */ 2455 gem_init_phy(gp); 2456 2457 /* If we were opened, bring everything back */ 2458 if (gp->opened) { 2459 /* Restart MAC */ 2460 gem_do_start(dev); 2461 2462 /* Re-attach net device */ 2463 netif_device_attach(dev); 2464 2465 } 2466 2467 spin_lock_irqsave(&gp->lock, flags); 2468 spin_lock(&gp->tx_lock); 2469 2470 /* If we had WOL enabled, the cell clock was never turned off during 2471 * sleep, so we end up beeing unbalanced. Fix that here 2472 */ 2473 if (gp->asleep_wol) 2474 gem_put_cell(gp); 2475 2476 /* This function doesn't need to hold the cell, it will be held if the 2477 * driver is open by gem_do_start(). 2478 */ 2479 gem_put_cell(gp); 2480 2481 spin_unlock(&gp->tx_lock); 2482 spin_unlock_irqrestore(&gp->lock, flags); 2483 2484 netif_poll_enable(dev); 2485 2486 up(&gp->pm_sem); 2487 2488 return 0; 2489} 2490#endif /* CONFIG_PM */ 2491 2492static struct net_device_stats *gem_get_stats(struct net_device *dev) 2493{ 2494 struct gem *gp = dev->priv; 2495 struct net_device_stats *stats = &gp->net_stats; 2496 2497 spin_lock_irq(&gp->lock); 2498 spin_lock(&gp->tx_lock); 2499 2500 /* I have seen this being called while the PM was in progress, 2501 * so we shield against this 2502 */ 2503 if (gp->running) { 2504 stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); 2505 writel(0, gp->regs + MAC_FCSERR); 2506 2507 stats->rx_frame_errors += readl(gp->regs + MAC_AERR); 2508 writel(0, gp->regs + MAC_AERR); 2509 2510 stats->rx_length_errors += readl(gp->regs + MAC_LERR); 2511 writel(0, gp->regs + MAC_LERR); 2512 2513 stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL); 2514 stats->collisions += 2515 (readl(gp->regs + MAC_ECOLL) + 2516 readl(gp->regs + MAC_LCOLL)); 2517 writel(0, gp->regs + MAC_ECOLL); 2518 writel(0, gp->regs + MAC_LCOLL); 2519 } 2520 2521 spin_unlock(&gp->tx_lock); 2522 spin_unlock_irq(&gp->lock); 2523 2524 return &gp->net_stats; 2525} 2526 2527static void gem_set_multicast(struct net_device *dev) 2528{ 2529 struct gem *gp = dev->priv; 2530 u32 rxcfg, rxcfg_new; 2531 int limit = 10000; 2532 2533 2534 spin_lock_irq(&gp->lock); 2535 spin_lock(&gp->tx_lock); 2536 2537 if (!gp->running) 2538 goto bail; 2539 2540 netif_stop_queue(dev); 2541 2542 rxcfg = readl(gp->regs + MAC_RXCFG); 2543 rxcfg_new = gem_setup_multicast(gp); 2544#ifdef STRIP_FCS 2545 rxcfg_new |= MAC_RXCFG_SFCS; 2546#endif 2547 gp->mac_rx_cfg = rxcfg_new; 2548 2549 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 2550 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { 2551 if (!limit--) 2552 break; 2553 udelay(10); 2554 } 2555 2556 rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE); 2557 rxcfg |= rxcfg_new; 2558 2559 writel(rxcfg, gp->regs + MAC_RXCFG); 2560 2561 netif_wake_queue(dev); 2562 2563 bail: 2564 spin_unlock(&gp->tx_lock); 2565 spin_unlock_irq(&gp->lock); 2566} 2567 2568/* Jumbo-grams don't seem to work :-( */ 2569#define GEM_MIN_MTU 68 2570#if 1 2571#define GEM_MAX_MTU 1500 2572#else 2573#define GEM_MAX_MTU 9000 2574#endif 2575 2576static int gem_change_mtu(struct net_device *dev, int new_mtu) 2577{ 2578 struct gem *gp = dev->priv; 2579 2580 if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) 2581 return -EINVAL; 2582 2583 if (!netif_running(dev) || !netif_device_present(dev)) { 2584 /* We'll just catch it later when the 2585 * device is up'd or resumed. 2586 */ 2587 dev->mtu = new_mtu; 2588 return 0; 2589 } 2590 2591 down(&gp->pm_sem); 2592 spin_lock_irq(&gp->lock); 2593 spin_lock(&gp->tx_lock); 2594 dev->mtu = new_mtu; 2595 if (gp->running) { 2596 gem_reinit_chip(gp); 2597 if (gp->lstate == link_up) 2598 gem_set_link_modes(gp); 2599 } 2600 spin_unlock(&gp->tx_lock); 2601 spin_unlock_irq(&gp->lock); 2602 up(&gp->pm_sem); 2603 2604 return 0; 2605} 2606 2607static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2608{ 2609 struct gem *gp = dev->priv; 2610 2611 strcpy(info->driver, DRV_NAME); 2612 strcpy(info->version, DRV_VERSION); 2613 strcpy(info->bus_info, pci_name(gp->pdev)); 2614} 2615 2616static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2617{ 2618 struct gem *gp = dev->priv; 2619 2620 if (gp->phy_type == phy_mii_mdio0 || 2621 gp->phy_type == phy_mii_mdio1) { 2622 if (gp->phy_mii.def) 2623 cmd->supported = gp->phy_mii.def->features; 2624 else 2625 cmd->supported = (SUPPORTED_10baseT_Half | 2626 SUPPORTED_10baseT_Full); 2627 2628 /* XXX hardcoded stuff for now */ 2629 cmd->port = PORT_MII; 2630 cmd->transceiver = XCVR_EXTERNAL; 2631 cmd->phy_address = 0; /* XXX fixed PHYAD */ 2632 2633 /* Return current PHY settings */ 2634 spin_lock_irq(&gp->lock); 2635 cmd->autoneg = gp->want_autoneg; 2636 cmd->speed = gp->phy_mii.speed; 2637 cmd->duplex = gp->phy_mii.duplex; 2638 cmd->advertising = gp->phy_mii.advertising; 2639 2640 /* If we started with a forced mode, we don't have a default 2641 * advertise set, we need to return something sensible so 2642 * userland can re-enable autoneg properly. 2643 */ 2644 if (cmd->advertising == 0) 2645 cmd->advertising = cmd->supported; 2646 spin_unlock_irq(&gp->lock); 2647 } else { // XXX PCS ? 2648 cmd->supported = 2649 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2650 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 2651 SUPPORTED_Autoneg); 2652 cmd->advertising = cmd->supported; 2653 cmd->speed = 0; 2654 cmd->duplex = cmd->port = cmd->phy_address = 2655 cmd->transceiver = cmd->autoneg = 0; 2656 } 2657 cmd->maxtxpkt = cmd->maxrxpkt = 0; 2658 2659 return 0; 2660} 2661 2662static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2663{ 2664 struct gem *gp = dev->priv; 2665 2666 /* Verify the settings we care about. */ 2667 if (cmd->autoneg != AUTONEG_ENABLE && 2668 cmd->autoneg != AUTONEG_DISABLE) 2669 return -EINVAL; 2670 2671 if (cmd->autoneg == AUTONEG_ENABLE && 2672 cmd->advertising == 0) 2673 return -EINVAL; 2674 2675 if (cmd->autoneg == AUTONEG_DISABLE && 2676 ((cmd->speed != SPEED_1000 && 2677 cmd->speed != SPEED_100 && 2678 cmd->speed != SPEED_10) || 2679 (cmd->duplex != DUPLEX_HALF && 2680 cmd->duplex != DUPLEX_FULL))) 2681 return -EINVAL; 2682 2683 /* Apply settings and restart link process. */ 2684 spin_lock_irq(&gp->lock); 2685 gem_get_cell(gp); 2686 gem_begin_auto_negotiation(gp, cmd); 2687 gem_put_cell(gp); 2688 spin_unlock_irq(&gp->lock); 2689 2690 return 0; 2691} 2692 2693static int gem_nway_reset(struct net_device *dev) 2694{ 2695 struct gem *gp = dev->priv; 2696 2697 if (!gp->want_autoneg) 2698 return -EINVAL; 2699 2700 /* Restart link process. */ 2701 spin_lock_irq(&gp->lock); 2702 gem_get_cell(gp); 2703 gem_begin_auto_negotiation(gp, NULL); 2704 gem_put_cell(gp); 2705 spin_unlock_irq(&gp->lock); 2706 2707 return 0; 2708} 2709 2710static u32 gem_get_msglevel(struct net_device *dev) 2711{ 2712 struct gem *gp = dev->priv; 2713 return gp->msg_enable; 2714} 2715 2716static void gem_set_msglevel(struct net_device *dev, u32 value) 2717{ 2718 struct gem *gp = dev->priv; 2719 gp->msg_enable = value; 2720} 2721 2722 2723/* Add more when I understand how to program the chip */ 2724/* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */ 2725 2726#define WOL_SUPPORTED_MASK (WAKE_MAGIC) 2727 2728static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2729{ 2730 struct gem *gp = dev->priv; 2731 2732 /* Add more when I understand how to program the chip */ 2733 if (gp->has_wol) { 2734 wol->supported = WOL_SUPPORTED_MASK; 2735 wol->wolopts = gp->wake_on_lan; 2736 } else { 2737 wol->supported = 0; 2738 wol->wolopts = 0; 2739 } 2740} 2741 2742static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2743{ 2744 struct gem *gp = dev->priv; 2745 2746 if (!gp->has_wol) 2747 return -EOPNOTSUPP; 2748 gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK; 2749 return 0; 2750} 2751 2752static struct ethtool_ops gem_ethtool_ops = { 2753 .get_drvinfo = gem_get_drvinfo, 2754 .get_link = ethtool_op_get_link, 2755 .get_settings = gem_get_settings, 2756 .set_settings = gem_set_settings, 2757 .nway_reset = gem_nway_reset, 2758 .get_msglevel = gem_get_msglevel, 2759 .set_msglevel = gem_set_msglevel, 2760 .get_wol = gem_get_wol, 2761 .set_wol = gem_set_wol, 2762}; 2763 2764static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2765{ 2766 struct gem *gp = dev->priv; 2767 struct mii_ioctl_data *data = if_mii(ifr); 2768 int rc = -EOPNOTSUPP; 2769 unsigned long flags; 2770 2771 /* Hold the PM semaphore while doing ioctl's or we may collide 2772 * with power management. 2773 */ 2774 down(&gp->pm_sem); 2775 2776 spin_lock_irqsave(&gp->lock, flags); 2777 gem_get_cell(gp); 2778 spin_unlock_irqrestore(&gp->lock, flags); 2779 2780 switch (cmd) { 2781 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 2782 data->phy_id = gp->mii_phy_addr; 2783 /* Fallthrough... */ 2784 2785 case SIOCGMIIREG: /* Read MII PHY register. */ 2786 if (!gp->running) 2787 rc = -EAGAIN; 2788 else { 2789 data->val_out = __phy_read(gp, data->phy_id & 0x1f, 2790 data->reg_num & 0x1f); 2791 rc = 0; 2792 } 2793 break; 2794 2795 case SIOCSMIIREG: /* Write MII PHY register. */ 2796 if (!capable(CAP_NET_ADMIN)) 2797 rc = -EPERM; 2798 else if (!gp->running) 2799 rc = -EAGAIN; 2800 else { 2801 __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, 2802 data->val_in); 2803 rc = 0; 2804 } 2805 break; 2806 }; 2807 2808 spin_lock_irqsave(&gp->lock, flags); 2809 gem_put_cell(gp); 2810 spin_unlock_irqrestore(&gp->lock, flags); 2811 2812 up(&gp->pm_sem); 2813 2814 return rc; 2815} 2816 2817#if (!defined(__sparc__) && !defined(CONFIG_PPC_PMAC)) 2818/* Fetch MAC address from vital product data of PCI ROM. */ 2819static void find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) 2820{ 2821 int this_offset; 2822 2823 for (this_offset = 0x20; this_offset < len; this_offset++) { 2824 void __iomem *p = rom_base + this_offset; 2825 int i; 2826 2827 if (readb(p + 0) != 0x90 || 2828 readb(p + 1) != 0x00 || 2829 readb(p + 2) != 0x09 || 2830 readb(p + 3) != 0x4e || 2831 readb(p + 4) != 0x41 || 2832 readb(p + 5) != 0x06) 2833 continue; 2834 2835 this_offset += 6; 2836 p += 6; 2837 2838 for (i = 0; i < 6; i++) 2839 dev_addr[i] = readb(p + i); 2840 break; 2841 } 2842} 2843 2844static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) 2845{ 2846 u32 rom_reg_orig; 2847 void __iomem *p; 2848 2849 if (pdev->resource[PCI_ROM_RESOURCE].parent == NULL) { 2850 if (pci_assign_resource(pdev, PCI_ROM_RESOURCE) < 0) 2851 goto use_random; 2852 } 2853 2854 pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_reg_orig); 2855 pci_write_config_dword(pdev, pdev->rom_base_reg, 2856 rom_reg_orig | PCI_ROM_ADDRESS_ENABLE); 2857 2858 p = ioremap(pci_resource_start(pdev, PCI_ROM_RESOURCE), (64 * 1024)); 2859 if (p != NULL && readb(p) == 0x55 && readb(p + 1) == 0xaa) 2860 find_eth_addr_in_vpd(p, (64 * 1024), dev_addr); 2861 2862 if (p != NULL) 2863 iounmap(p); 2864 2865 pci_write_config_dword(pdev, pdev->rom_base_reg, rom_reg_orig); 2866 return; 2867 2868use_random: 2869 /* Sun MAC prefix then 3 random bytes. */ 2870 dev_addr[0] = 0x08; 2871 dev_addr[1] = 0x00; 2872 dev_addr[2] = 0x20; 2873 get_random_bytes(dev_addr + 3, 3); 2874 return; 2875} 2876#endif /* not Sparc and not PPC */ 2877 2878static int __devinit gem_get_device_address(struct gem *gp) 2879{ 2880#if defined(__sparc__) || defined(CONFIG_PPC_PMAC) 2881 struct net_device *dev = gp->dev; 2882#endif 2883 2884#if defined(__sparc__) 2885 struct pci_dev *pdev = gp->pdev; 2886 struct pcidev_cookie *pcp = pdev->sysdata; 2887 int node = -1; 2888 2889 if (pcp != NULL) { 2890 node = pcp->prom_node; 2891 if (prom_getproplen(node, "local-mac-address") == 6) 2892 prom_getproperty(node, "local-mac-address", 2893 dev->dev_addr, 6); 2894 else 2895 node = -1; 2896 } 2897 if (node == -1) 2898 memcpy(dev->dev_addr, idprom->id_ethaddr, 6); 2899#elif defined(CONFIG_PPC_PMAC) 2900 unsigned char *addr; 2901 2902 addr = get_property(gp->of_node, "local-mac-address", NULL); 2903 if (addr == NULL) { 2904 printk("\n"); 2905 printk(KERN_ERR "%s: can't get mac-address\n", dev->name); 2906 return -1; 2907 } 2908 memcpy(dev->dev_addr, addr, 6); 2909#else 2910 get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); 2911#endif 2912 return 0; 2913} 2914 2915static void __devexit gem_remove_one(struct pci_dev *pdev) 2916{ 2917 struct net_device *dev = pci_get_drvdata(pdev); 2918 2919 if (dev) { 2920 struct gem *gp = dev->priv; 2921 2922 unregister_netdev(dev); 2923 2924 /* Stop the link timer */ 2925 del_timer_sync(&gp->link_timer); 2926 2927 /* We shouldn't need any locking here */ 2928 gem_get_cell(gp); 2929 2930 /* Wait for a pending reset task to complete */ 2931 while (gp->reset_task_pending) 2932 yield(); 2933 flush_scheduled_work(); 2934 2935 /* Shut the PHY down */ 2936 gem_stop_phy(gp, 0); 2937 2938 gem_put_cell(gp); 2939 2940 /* Make sure bus master is disabled */ 2941 pci_disable_device(gp->pdev); 2942 2943 /* Free resources */ 2944 pci_free_consistent(pdev, 2945 sizeof(struct gem_init_block), 2946 gp->init_block, 2947 gp->gblock_dvma); 2948 iounmap(gp->regs); 2949 pci_release_regions(pdev); 2950 free_netdev(dev); 2951 2952 pci_set_drvdata(pdev, NULL); 2953 } 2954} 2955 2956static int __devinit gem_init_one(struct pci_dev *pdev, 2957 const struct pci_device_id *ent) 2958{ 2959 static int gem_version_printed = 0; 2960 unsigned long gemreg_base, gemreg_len; 2961 struct net_device *dev; 2962 struct gem *gp; 2963 int i, err, pci_using_dac; 2964 2965 if (gem_version_printed++ == 0) 2966 printk(KERN_INFO "%s", version); 2967 2968 /* Apple gmac note: during probe, the chip is powered up by 2969 * the arch code to allow the code below to work (and to let 2970 * the chip be probed on the config space. It won't stay powered 2971 * up until the interface is brought up however, so we can't rely 2972 * on register configuration done at this point. 2973 */ 2974 err = pci_enable_device(pdev); 2975 if (err) { 2976 printk(KERN_ERR PFX "Cannot enable MMIO operation, " 2977 "aborting.\n"); 2978 return err; 2979 } 2980 pci_set_master(pdev); 2981 2982 /* Configure DMA attributes. */ 2983 2984 /* All of the GEM documentation states that 64-bit DMA addressing 2985 * is fully supported and should work just fine. However the 2986 * front end for RIO based GEMs is different and only supports 2987 * 32-bit addressing. 2988 * 2989 * For now we assume the various PPC GEMs are 32-bit only as well. 2990 */ 2991 if (pdev->vendor == PCI_VENDOR_ID_SUN && 2992 pdev->device == PCI_DEVICE_ID_SUN_GEM && 2993 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 2994 pci_using_dac = 1; 2995 } else { 2996 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 2997 if (err) { 2998 printk(KERN_ERR PFX "No usable DMA configuration, " 2999 "aborting.\n"); 3000 goto err_disable_device; 3001 } 3002 pci_using_dac = 0; 3003 } 3004 3005 gemreg_base = pci_resource_start(pdev, 0); 3006 gemreg_len = pci_resource_len(pdev, 0); 3007 3008 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { 3009 printk(KERN_ERR PFX "Cannot find proper PCI device " 3010 "base address, aborting.\n"); 3011 err = -ENODEV; 3012 goto err_disable_device; 3013 } 3014 3015 dev = alloc_etherdev(sizeof(*gp)); 3016 if (!dev) { 3017 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 3018 err = -ENOMEM; 3019 goto err_disable_device; 3020 } 3021 SET_MODULE_OWNER(dev); 3022 SET_NETDEV_DEV(dev, &pdev->dev); 3023 3024 gp = dev->priv; 3025 3026 err = pci_request_regions(pdev, DRV_NAME); 3027 if (err) { 3028 printk(KERN_ERR PFX "Cannot obtain PCI resources, " 3029 "aborting.\n"); 3030 goto err_out_free_netdev; 3031 } 3032 3033 gp->pdev = pdev; 3034 dev->base_addr = (long) pdev; 3035 gp->dev = dev; 3036 3037 gp->msg_enable = DEFAULT_MSG; 3038 3039 spin_lock_init(&gp->lock); 3040 spin_lock_init(&gp->tx_lock); 3041 init_MUTEX(&gp->pm_sem); 3042 3043 init_timer(&gp->link_timer); 3044 gp->link_timer.function = gem_link_timer; 3045 gp->link_timer.data = (unsigned long) gp; 3046 3047 INIT_WORK(&gp->reset_task, gem_reset_task, gp); 3048 3049 gp->lstate = link_down; 3050 gp->timer_ticks = 0; 3051 netif_carrier_off(dev); 3052 3053 gp->regs = ioremap(gemreg_base, gemreg_len); 3054 if (gp->regs == 0UL) { 3055 printk(KERN_ERR PFX "Cannot map device registers, " 3056 "aborting.\n"); 3057 err = -EIO; 3058 goto err_out_free_res; 3059 } 3060 3061 /* On Apple, we want a reference to the Open Firmware device-tree 3062 * node. We use it for clock control. 3063 */ 3064#ifdef CONFIG_PPC_PMAC 3065 gp->of_node = pci_device_to_OF_node(pdev); 3066#endif 3067 3068 /* Only Apple version supports WOL afaik */ 3069 if (pdev->vendor == PCI_VENDOR_ID_APPLE) 3070 gp->has_wol = 1; 3071 3072 /* Make sure cell is enabled */ 3073 gem_get_cell(gp); 3074 3075 /* Make sure everything is stopped and in init state */ 3076 gem_reset(gp); 3077 3078 /* Fill up the mii_phy structure (even if we won't use it) */ 3079 gp->phy_mii.dev = dev; 3080 gp->phy_mii.mdio_read = _phy_read; 3081 gp->phy_mii.mdio_write = _phy_write; 3082#ifdef CONFIG_PPC_PMAC 3083 gp->phy_mii.platform_data = gp->of_node; 3084#endif 3085 /* By default, we start with autoneg */ 3086 gp->want_autoneg = 1; 3087 3088 /* Check fifo sizes, PHY type, etc... */ 3089 if (gem_check_invariants(gp)) { 3090 err = -ENODEV; 3091 goto err_out_iounmap; 3092 } 3093 3094 /* It is guaranteed that the returned buffer will be at least 3095 * PAGE_SIZE aligned. 3096 */ 3097 gp->init_block = (struct gem_init_block *) 3098 pci_alloc_consistent(pdev, sizeof(struct gem_init_block), 3099 &gp->gblock_dvma); 3100 if (!gp->init_block) { 3101 printk(KERN_ERR PFX "Cannot allocate init block, " 3102 "aborting.\n"); 3103 err = -ENOMEM; 3104 goto err_out_iounmap; 3105 } 3106 3107 if (gem_get_device_address(gp)) 3108 goto err_out_free_consistent; 3109 3110 dev->open = gem_open; 3111 dev->stop = gem_close; 3112 dev->hard_start_xmit = gem_start_xmit; 3113 dev->get_stats = gem_get_stats; 3114 dev->set_multicast_list = gem_set_multicast; 3115 dev->do_ioctl = gem_ioctl; 3116 dev->poll = gem_poll; 3117 dev->weight = 64; 3118 dev->ethtool_ops = &gem_ethtool_ops; 3119 dev->tx_timeout = gem_tx_timeout; 3120 dev->watchdog_timeo = 5 * HZ; 3121 dev->change_mtu = gem_change_mtu; 3122 dev->irq = pdev->irq; 3123 dev->dma = 0; 3124#ifdef CONFIG_NET_POLL_CONTROLLER 3125 dev->poll_controller = gem_poll_controller; 3126#endif 3127 3128 /* Set that now, in case PM kicks in now */ 3129 pci_set_drvdata(pdev, dev); 3130 3131 /* Detect & init PHY, start autoneg, we release the cell now 3132 * too, it will be managed by whoever needs it 3133 */ 3134 gem_init_phy(gp); 3135 3136 spin_lock_irq(&gp->lock); 3137 gem_put_cell(gp); 3138 spin_unlock_irq(&gp->lock); 3139 3140 /* Register with kernel */ 3141 if (register_netdev(dev)) { 3142 printk(KERN_ERR PFX "Cannot register net device, " 3143 "aborting.\n"); 3144 err = -ENOMEM; 3145 goto err_out_free_consistent; 3146 } 3147 3148 printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet ", 3149 dev->name); 3150 for (i = 0; i < 6; i++) 3151 printk("%2.2x%c", dev->dev_addr[i], 3152 i == 5 ? ' ' : ':'); 3153 printk("\n"); 3154 3155 if (gp->phy_type == phy_mii_mdio0 || 3156 gp->phy_type == phy_mii_mdio1) 3157 printk(KERN_INFO "%s: Found %s PHY\n", dev->name, 3158 gp->phy_mii.def ? gp->phy_mii.def->name : "no"); 3159 3160 /* GEM can do it all... */ 3161 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; 3162 if (pci_using_dac) 3163 dev->features |= NETIF_F_HIGHDMA; 3164 3165 return 0; 3166 3167err_out_free_consistent: 3168 gem_remove_one(pdev); 3169err_out_iounmap: 3170 gem_put_cell(gp); 3171 iounmap(gp->regs); 3172 3173err_out_free_res: 3174 pci_release_regions(pdev); 3175 3176err_out_free_netdev: 3177 free_netdev(dev); 3178err_disable_device: 3179 pci_disable_device(pdev); 3180 return err; 3181 3182} 3183 3184 3185static struct pci_driver gem_driver = { 3186 .name = GEM_MODULE_NAME, 3187 .id_table = gem_pci_tbl, 3188 .probe = gem_init_one, 3189 .remove = __devexit_p(gem_remove_one), 3190#ifdef CONFIG_PM 3191 .suspend = gem_suspend, 3192 .resume = gem_resume, 3193#endif /* CONFIG_PM */ 3194}; 3195 3196static int __init gem_init(void) 3197{ 3198 return pci_module_init(&gem_driver); 3199} 3200 3201static void __exit gem_cleanup(void) 3202{ 3203 pci_unregister_driver(&gem_driver); 3204} 3205 3206module_init(gem_init); 3207module_exit(gem_cleanup);