at v2.6.17-rc2 3280 lines 99 kB view raw
1/* 2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. 3 * 4 * Note: This driver is a cleanroom reimplementation based on reverse 5 * engineered documentation written by Carl-Daniel Hailfinger 6 * and Andrew de Quincey. It's neither supported nor endorsed 7 * by NVIDIA Corp. Use at your own risk. 8 * 9 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered 10 * trademarks of NVIDIA Corporation in the United States and other 11 * countries. 12 * 13 * Copyright (C) 2003,4,5 Manfred Spraul 14 * Copyright (C) 2004 Andrew de Quincey (wol support) 15 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 16 * IRQ rate fixes, bigendian fixes, cleanups, verification) 17 * Copyright (c) 2004 NVIDIA Corporation 18 * 19 * This program is free software; you can redistribute it and/or modify 20 * it under the terms of the GNU General Public License as published by 21 * the Free Software Foundation; either version 2 of the License, or 22 * (at your option) any later version. 23 * 24 * This program is distributed in the hope that it will be useful, 25 * but WITHOUT ANY WARRANTY; without even the implied warranty of 26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 27 * GNU General Public License for more details. 28 * 29 * You should have received a copy of the GNU General Public License 30 * along with this program; if not, write to the Free Software 31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 32 * 33 * Changelog: 34 * 0.01: 05 Oct 2003: First release that compiles without warnings. 35 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs. 36 * Check all PCI BARs for the register window. 37 * udelay added to mii_rw. 38 * 0.03: 06 Oct 2003: Initialize dev->irq. 39 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks. 40 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout. 41 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated, 42 * irq mask updated 43 * 0.07: 14 Oct 2003: Further irq mask updates. 44 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill 45 * added into irq handler, NULL check for drain_ring. 46 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the 47 * requested interrupt sources. 48 * 0.10: 20 Oct 2003: First cleanup for release. 49 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased. 50 * MAC Address init fix, set_multicast cleanup. 51 * 0.12: 23 Oct 2003: Cleanups for release. 52 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10. 53 * Set link speed correctly. start rx before starting 54 * tx (nv_start_rx sets the link speed). 55 * 0.14: 25 Oct 2003: Nic dependant irq mask. 56 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during 57 * open. 58 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size 59 * increased to 1628 bytes. 60 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from 61 * the tx length. 62 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats 63 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac 64 * addresses, really stop rx if already running 65 * in nv_start_rx, clean up a bit. 66 * 0.20: 07 Dec 2003: alloc fixes 67 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix. 68 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup 69 * on close. 70 * 0.23: 26 Jan 2004: various small cleanups 71 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces 72 * 0.25: 09 Mar 2004: wol support 73 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes 74 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings, 75 * added CK804/MCP04 device IDs, code fixes 76 * for registers, link status and other minor fixes. 77 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe 78 * 0.29: 31 Aug 2004: Add backup timer for link change notification. 79 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset 80 * into nv_close, otherwise reenabling for wol can 81 * cause DMA to kfree'd memory. 82 * 0.31: 14 Nov 2004: ethtool support for getting/setting link 83 * capabilities. 84 * 0.32: 16 Apr 2005: RX_ERROR4 handling added. 85 * 0.33: 16 May 2005: Support for MCP51 added. 86 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. 87 * 0.35: 26 Jun 2005: Support for MCP55 added. 88 * 0.36: 28 Jun 2005: Add jumbo frame support. 89 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list 90 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of 91 * per-packet flags. 92 * 0.39: 18 Jul 2005: Add 64bit descriptor support. 93 * 0.40: 19 Jul 2005: Add support for mac address change. 94 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead 95 * of nv_remove 96 * 0.42: 06 Aug 2005: Fix lack of link speed initialization 97 * in the second (and later) nv_open call 98 * 0.43: 10 Aug 2005: Add support for tx checksum. 99 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation. 100 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check 101 * 0.46: 20 Oct 2005: Add irq optimization modes. 102 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. 103 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single 104 * 0.49: 10 Dec 2005: Fix tso for large buffers. 105 * 0.50: 20 Jan 2006: Add 8021pq tagging support. 106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. 107 * 0.52: 20 Jan 2006: Add MSI/MSIX support. 108 * 109 * Known bugs: 110 * We suspect that on some hardware no TX done interrupts are generated. 111 * This means recovery from netif_stop_queue only happens if the hw timer 112 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) 113 * and the timer is active in the IRQMask, or if a rx packet arrives by chance. 114 * If your hardware reliably generates tx done interrupts, then you can remove 115 * DEV_NEED_TIMERIRQ from the driver_data flags. 116 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 117 * superfluous timer interrupts from the nic. 118 */ 119#define FORCEDETH_VERSION "0.52" 120#define DRV_NAME "forcedeth" 121 122#include <linux/module.h> 123#include <linux/types.h> 124#include <linux/pci.h> 125#include <linux/interrupt.h> 126#include <linux/netdevice.h> 127#include <linux/etherdevice.h> 128#include <linux/delay.h> 129#include <linux/spinlock.h> 130#include <linux/ethtool.h> 131#include <linux/timer.h> 132#include <linux/skbuff.h> 133#include <linux/mii.h> 134#include <linux/random.h> 135#include <linux/init.h> 136#include <linux/if_vlan.h> 137#include <linux/dma-mapping.h> 138 139#include <asm/irq.h> 140#include <asm/io.h> 141#include <asm/uaccess.h> 142#include <asm/system.h> 143 144#if 0 145#define dprintk printk 146#else 147#define dprintk(x...) do { } while (0) 148#endif 149 150 151/* 152 * Hardware access: 153 */ 154 155#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ 156#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ 157#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ 158#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ 159#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ 160#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ 161#define DEV_HAS_MSI 0x0040 /* device supports MSI */ 162#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ 163 164enum { 165 NvRegIrqStatus = 0x000, 166#define NVREG_IRQSTAT_MIIEVENT 0x040 167#define NVREG_IRQSTAT_MASK 0x1ff 168 NvRegIrqMask = 0x004, 169#define NVREG_IRQ_RX_ERROR 0x0001 170#define NVREG_IRQ_RX 0x0002 171#define NVREG_IRQ_RX_NOBUF 0x0004 172#define NVREG_IRQ_TX_ERR 0x0008 173#define NVREG_IRQ_TX_OK 0x0010 174#define NVREG_IRQ_TIMER 0x0020 175#define NVREG_IRQ_LINK 0x0040 176#define NVREG_IRQ_RX_FORCED 0x0080 177#define NVREG_IRQ_TX_FORCED 0x0100 178#define NVREG_IRQMASK_THROUGHPUT 0x00df 179#define NVREG_IRQMASK_CPU 0x0040 180#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) 181#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) 182#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK) 183 184#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ 185 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ 186 NVREG_IRQ_TX_FORCED)) 187 188 NvRegUnknownSetupReg6 = 0x008, 189#define NVREG_UNKSETUP6_VAL 3 190 191/* 192 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic 193 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 194 */ 195 NvRegPollingInterval = 0x00c, 196#define NVREG_POLL_DEFAULT_THROUGHPUT 970 197#define NVREG_POLL_DEFAULT_CPU 13 198 NvRegMSIMap0 = 0x020, 199 NvRegMSIMap1 = 0x024, 200 NvRegMSIIrqMask = 0x030, 201#define NVREG_MSI_VECTOR_0_ENABLED 0x01 202 NvRegMisc1 = 0x080, 203#define NVREG_MISC1_HD 0x02 204#define NVREG_MISC1_FORCE 0x3b0f3c 205 206 NvRegTransmitterControl = 0x084, 207#define NVREG_XMITCTL_START 0x01 208 NvRegTransmitterStatus = 0x088, 209#define NVREG_XMITSTAT_BUSY 0x01 210 211 NvRegPacketFilterFlags = 0x8c, 212#define NVREG_PFF_ALWAYS 0x7F0008 213#define NVREG_PFF_PROMISC 0x80 214#define NVREG_PFF_MYADDR 0x20 215 216 NvRegOffloadConfig = 0x90, 217#define NVREG_OFFLOAD_HOMEPHY 0x601 218#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 219 NvRegReceiverControl = 0x094, 220#define NVREG_RCVCTL_START 0x01 221 NvRegReceiverStatus = 0x98, 222#define NVREG_RCVSTAT_BUSY 0x01 223 224 NvRegRandomSeed = 0x9c, 225#define NVREG_RNDSEED_MASK 0x00ff 226#define NVREG_RNDSEED_FORCE 0x7f00 227#define NVREG_RNDSEED_FORCE2 0x2d00 228#define NVREG_RNDSEED_FORCE3 0x7400 229 230 NvRegUnknownSetupReg1 = 0xA0, 231#define NVREG_UNKSETUP1_VAL 0x16070f 232 NvRegUnknownSetupReg2 = 0xA4, 233#define NVREG_UNKSETUP2_VAL 0x16 234 NvRegMacAddrA = 0xA8, 235 NvRegMacAddrB = 0xAC, 236 NvRegMulticastAddrA = 0xB0, 237#define NVREG_MCASTADDRA_FORCE 0x01 238 NvRegMulticastAddrB = 0xB4, 239 NvRegMulticastMaskA = 0xB8, 240 NvRegMulticastMaskB = 0xBC, 241 242 NvRegPhyInterface = 0xC0, 243#define PHY_RGMII 0x10000000 244 245 NvRegTxRingPhysAddr = 0x100, 246 NvRegRxRingPhysAddr = 0x104, 247 NvRegRingSizes = 0x108, 248#define NVREG_RINGSZ_TXSHIFT 0 249#define NVREG_RINGSZ_RXSHIFT 16 250 NvRegUnknownTransmitterReg = 0x10c, 251 NvRegLinkSpeed = 0x110, 252#define NVREG_LINKSPEED_FORCE 0x10000 253#define NVREG_LINKSPEED_10 1000 254#define NVREG_LINKSPEED_100 100 255#define NVREG_LINKSPEED_1000 50 256#define NVREG_LINKSPEED_MASK (0xFFF) 257 NvRegUnknownSetupReg5 = 0x130, 258#define NVREG_UNKSETUP5_BIT31 (1<<31) 259 NvRegUnknownSetupReg3 = 0x13c, 260#define NVREG_UNKSETUP3_VAL1 0x200010 261 NvRegTxRxControl = 0x144, 262#define NVREG_TXRXCTL_KICK 0x0001 263#define NVREG_TXRXCTL_BIT1 0x0002 264#define NVREG_TXRXCTL_BIT2 0x0004 265#define NVREG_TXRXCTL_IDLE 0x0008 266#define NVREG_TXRXCTL_RESET 0x0010 267#define NVREG_TXRXCTL_RXCHECK 0x0400 268#define NVREG_TXRXCTL_DESC_1 0 269#define NVREG_TXRXCTL_DESC_2 0x02100 270#define NVREG_TXRXCTL_DESC_3 0x02200 271#define NVREG_TXRXCTL_VLANSTRIP 0x00040 272#define NVREG_TXRXCTL_VLANINS 0x00080 273 NvRegTxRingPhysAddrHigh = 0x148, 274 NvRegRxRingPhysAddrHigh = 0x14C, 275 NvRegMIIStatus = 0x180, 276#define NVREG_MIISTAT_ERROR 0x0001 277#define NVREG_MIISTAT_LINKCHANGE 0x0008 278#define NVREG_MIISTAT_MASK 0x000f 279#define NVREG_MIISTAT_MASK2 0x000f 280 NvRegUnknownSetupReg4 = 0x184, 281#define NVREG_UNKSETUP4_VAL 8 282 283 NvRegAdapterControl = 0x188, 284#define NVREG_ADAPTCTL_START 0x02 285#define NVREG_ADAPTCTL_LINKUP 0x04 286#define NVREG_ADAPTCTL_PHYVALID 0x40000 287#define NVREG_ADAPTCTL_RUNNING 0x100000 288#define NVREG_ADAPTCTL_PHYSHIFT 24 289 NvRegMIISpeed = 0x18c, 290#define NVREG_MIISPEED_BIT8 (1<<8) 291#define NVREG_MIIDELAY 5 292 NvRegMIIControl = 0x190, 293#define NVREG_MIICTL_INUSE 0x08000 294#define NVREG_MIICTL_WRITE 0x00400 295#define NVREG_MIICTL_ADDRSHIFT 5 296 NvRegMIIData = 0x194, 297 NvRegWakeUpFlags = 0x200, 298#define NVREG_WAKEUPFLAGS_VAL 0x7770 299#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 300#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 301#define NVREG_WAKEUPFLAGS_D3SHIFT 12 302#define NVREG_WAKEUPFLAGS_D2SHIFT 8 303#define NVREG_WAKEUPFLAGS_D1SHIFT 4 304#define NVREG_WAKEUPFLAGS_D0SHIFT 0 305#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 306#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 307#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 308#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 309 310 NvRegPatternCRC = 0x204, 311 NvRegPatternMask = 0x208, 312 NvRegPowerCap = 0x268, 313#define NVREG_POWERCAP_D3SUPP (1<<30) 314#define NVREG_POWERCAP_D2SUPP (1<<26) 315#define NVREG_POWERCAP_D1SUPP (1<<25) 316 NvRegPowerState = 0x26c, 317#define NVREG_POWERSTATE_POWEREDUP 0x8000 318#define NVREG_POWERSTATE_VALID 0x0100 319#define NVREG_POWERSTATE_MASK 0x0003 320#define NVREG_POWERSTATE_D0 0x0000 321#define NVREG_POWERSTATE_D1 0x0001 322#define NVREG_POWERSTATE_D2 0x0002 323#define NVREG_POWERSTATE_D3 0x0003 324 NvRegVlanControl = 0x300, 325#define NVREG_VLANCONTROL_ENABLE 0x2000 326 NvRegMSIXMap0 = 0x3e0, 327 NvRegMSIXMap1 = 0x3e4, 328 NvRegMSIXIrqStatus = 0x3f0, 329}; 330 331/* Big endian: should work, but is untested */ 332struct ring_desc { 333 u32 PacketBuffer; 334 u32 FlagLen; 335}; 336 337struct ring_desc_ex { 338 u32 PacketBufferHigh; 339 u32 PacketBufferLow; 340 u32 TxVlan; 341 u32 FlagLen; 342}; 343 344typedef union _ring_type { 345 struct ring_desc* orig; 346 struct ring_desc_ex* ex; 347} ring_type; 348 349#define FLAG_MASK_V1 0xffff0000 350#define FLAG_MASK_V2 0xffffc000 351#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 352#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) 353 354#define NV_TX_LASTPACKET (1<<16) 355#define NV_TX_RETRYERROR (1<<19) 356#define NV_TX_FORCED_INTERRUPT (1<<24) 357#define NV_TX_DEFERRED (1<<26) 358#define NV_TX_CARRIERLOST (1<<27) 359#define NV_TX_LATECOLLISION (1<<28) 360#define NV_TX_UNDERFLOW (1<<29) 361#define NV_TX_ERROR (1<<30) 362#define NV_TX_VALID (1<<31) 363 364#define NV_TX2_LASTPACKET (1<<29) 365#define NV_TX2_RETRYERROR (1<<18) 366#define NV_TX2_FORCED_INTERRUPT (1<<30) 367#define NV_TX2_DEFERRED (1<<25) 368#define NV_TX2_CARRIERLOST (1<<26) 369#define NV_TX2_LATECOLLISION (1<<27) 370#define NV_TX2_UNDERFLOW (1<<28) 371/* error and valid are the same for both */ 372#define NV_TX2_ERROR (1<<30) 373#define NV_TX2_VALID (1<<31) 374#define NV_TX2_TSO (1<<28) 375#define NV_TX2_TSO_SHIFT 14 376#define NV_TX2_TSO_MAX_SHIFT 14 377#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) 378#define NV_TX2_CHECKSUM_L3 (1<<27) 379#define NV_TX2_CHECKSUM_L4 (1<<26) 380 381#define NV_TX3_VLAN_TAG_PRESENT (1<<18) 382 383#define NV_RX_DESCRIPTORVALID (1<<16) 384#define NV_RX_MISSEDFRAME (1<<17) 385#define NV_RX_SUBSTRACT1 (1<<18) 386#define NV_RX_ERROR1 (1<<23) 387#define NV_RX_ERROR2 (1<<24) 388#define NV_RX_ERROR3 (1<<25) 389#define NV_RX_ERROR4 (1<<26) 390#define NV_RX_CRCERR (1<<27) 391#define NV_RX_OVERFLOW (1<<28) 392#define NV_RX_FRAMINGERR (1<<29) 393#define NV_RX_ERROR (1<<30) 394#define NV_RX_AVAIL (1<<31) 395 396#define NV_RX2_CHECKSUMMASK (0x1C000000) 397#define NV_RX2_CHECKSUMOK1 (0x10000000) 398#define NV_RX2_CHECKSUMOK2 (0x14000000) 399#define NV_RX2_CHECKSUMOK3 (0x18000000) 400#define NV_RX2_DESCRIPTORVALID (1<<29) 401#define NV_RX2_SUBSTRACT1 (1<<25) 402#define NV_RX2_ERROR1 (1<<18) 403#define NV_RX2_ERROR2 (1<<19) 404#define NV_RX2_ERROR3 (1<<20) 405#define NV_RX2_ERROR4 (1<<21) 406#define NV_RX2_CRCERR (1<<22) 407#define NV_RX2_OVERFLOW (1<<23) 408#define NV_RX2_FRAMINGERR (1<<24) 409/* error and avail are the same for both */ 410#define NV_RX2_ERROR (1<<30) 411#define NV_RX2_AVAIL (1<<31) 412 413#define NV_RX3_VLAN_TAG_PRESENT (1<<16) 414#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 415 416/* Miscelaneous hardware related defines: */ 417#define NV_PCI_REGSZ 0x270 418 419/* various timeout delays: all in usec */ 420#define NV_TXRX_RESET_DELAY 4 421#define NV_TXSTOP_DELAY1 10 422#define NV_TXSTOP_DELAY1MAX 500000 423#define NV_TXSTOP_DELAY2 100 424#define NV_RXSTOP_DELAY1 10 425#define NV_RXSTOP_DELAY1MAX 500000 426#define NV_RXSTOP_DELAY2 100 427#define NV_SETUP5_DELAY 5 428#define NV_SETUP5_DELAYMAX 50000 429#define NV_POWERUP_DELAY 5 430#define NV_POWERUP_DELAYMAX 5000 431#define NV_MIIBUSY_DELAY 50 432#define NV_MIIPHY_DELAY 10 433#define NV_MIIPHY_DELAYMAX 10000 434 435#define NV_WAKEUPPATTERNS 5 436#define NV_WAKEUPMASKENTRIES 4 437 438/* General driver defaults */ 439#define NV_WATCHDOG_TIMEO (5*HZ) 440 441#define RX_RING 128 442#define TX_RING 256 443/* 444 * If your nic mysteriously hangs then try to reduce the limits 445 * to 1/0: It might be required to set NV_TX_LASTPACKET in the 446 * last valid ring entry. But this would be impossible to 447 * implement - probably a disassembly error. 448 */ 449#define TX_LIMIT_STOP 255 450#define TX_LIMIT_START 254 451 452/* rx/tx mac addr + type + vlan + align + slack*/ 453#define NV_RX_HEADERS (64) 454/* even more slack. */ 455#define NV_RX_ALLOC_PAD (64) 456 457/* maximum mtu size */ 458#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ 459#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ 460 461#define OOM_REFILL (1+HZ/20) 462#define POLL_WAIT (1+HZ/100) 463#define LINK_TIMEOUT (3*HZ) 464 465/* 466 * desc_ver values: 467 * The nic supports three different descriptor types: 468 * - DESC_VER_1: Original 469 * - DESC_VER_2: support for jumbo frames. 470 * - DESC_VER_3: 64-bit format. 471 */ 472#define DESC_VER_1 1 473#define DESC_VER_2 2 474#define DESC_VER_3 3 475 476/* PHY defines */ 477#define PHY_OUI_MARVELL 0x5043 478#define PHY_OUI_CICADA 0x03f1 479#define PHYID1_OUI_MASK 0x03ff 480#define PHYID1_OUI_SHFT 6 481#define PHYID2_OUI_MASK 0xfc00 482#define PHYID2_OUI_SHFT 10 483#define PHY_INIT1 0x0f000 484#define PHY_INIT2 0x0e00 485#define PHY_INIT3 0x01000 486#define PHY_INIT4 0x0200 487#define PHY_INIT5 0x0004 488#define PHY_INIT6 0x02000 489#define PHY_GIGABIT 0x0100 490 491#define PHY_TIMEOUT 0x1 492#define PHY_ERROR 0x2 493 494#define PHY_100 0x1 495#define PHY_1000 0x2 496#define PHY_HALF 0x100 497 498/* FIXME: MII defines that should be added to <linux/mii.h> */ 499#define MII_1000BT_CR 0x09 500#define MII_1000BT_SR 0x0a 501#define ADVERTISE_1000FULL 0x0200 502#define ADVERTISE_1000HALF 0x0100 503#define LPA_1000FULL 0x0800 504#define LPA_1000HALF 0x0400 505 506/* MSI/MSI-X defines */ 507#define NV_MSI_X_MAX_VECTORS 8 508#define NV_MSI_X_VECTORS_MASK 0x000f 509#define NV_MSI_CAPABLE 0x0010 510#define NV_MSI_X_CAPABLE 0x0020 511#define NV_MSI_ENABLED 0x0040 512#define NV_MSI_X_ENABLED 0x0080 513 514#define NV_MSI_X_VECTOR_ALL 0x0 515#define NV_MSI_X_VECTOR_RX 0x0 516#define NV_MSI_X_VECTOR_TX 0x1 517#define NV_MSI_X_VECTOR_OTHER 0x2 518 519/* 520 * SMP locking: 521 * All hardware access under dev->priv->lock, except the performance 522 * critical parts: 523 * - rx is (pseudo-) lockless: it relies on the single-threading provided 524 * by the arch code for interrupts. 525 * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission 526 * needs dev->priv->lock :-( 527 * - set_multicast_list: preparation lockless, relies on dev->xmit_lock. 528 */ 529 530/* in dev: base, irq */ 531struct fe_priv { 532 spinlock_t lock; 533 534 /* General data: 535 * Locking: spin_lock(&np->lock); */ 536 struct net_device_stats stats; 537 int in_shutdown; 538 u32 linkspeed; 539 int duplex; 540 int autoneg; 541 int fixed_mode; 542 int phyaddr; 543 int wolenabled; 544 unsigned int phy_oui; 545 u16 gigabit; 546 547 /* General data: RO fields */ 548 dma_addr_t ring_addr; 549 struct pci_dev *pci_dev; 550 u32 orig_mac[2]; 551 u32 irqmask; 552 u32 desc_ver; 553 u32 txrxctl_bits; 554 u32 vlanctl_bits; 555 556 void __iomem *base; 557 558 /* rx specific fields. 559 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 560 */ 561 ring_type rx_ring; 562 unsigned int cur_rx, refill_rx; 563 struct sk_buff *rx_skbuff[RX_RING]; 564 dma_addr_t rx_dma[RX_RING]; 565 unsigned int rx_buf_sz; 566 unsigned int pkt_limit; 567 struct timer_list oom_kick; 568 struct timer_list nic_poll; 569 u32 nic_poll_irq; 570 571 /* media detection workaround. 572 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 573 */ 574 int need_linktimer; 575 unsigned long link_timeout; 576 /* 577 * tx specific fields. 578 */ 579 ring_type tx_ring; 580 unsigned int next_tx, nic_tx; 581 struct sk_buff *tx_skbuff[TX_RING]; 582 dma_addr_t tx_dma[TX_RING]; 583 unsigned int tx_dma_len[TX_RING]; 584 u32 tx_flags; 585 586 /* vlan fields */ 587 struct vlan_group *vlangrp; 588 589 /* msi/msi-x fields */ 590 u32 msi_flags; 591 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; 592}; 593 594/* 595 * Maximum number of loops until we assume that a bit in the irq mask 596 * is stuck. Overridable with module param. 597 */ 598static int max_interrupt_work = 5; 599 600/* 601 * Optimization can be either throuput mode or cpu mode 602 * 603 * Throughput Mode: Every tx and rx packet will generate an interrupt. 604 * CPU Mode: Interrupts are controlled by a timer. 605 */ 606#define NV_OPTIMIZATION_MODE_THROUGHPUT 0 607#define NV_OPTIMIZATION_MODE_CPU 1 608static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; 609 610/* 611 * Poll interval for timer irq 612 * 613 * This interval determines how frequent an interrupt is generated. 614 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] 615 * Min = 0, and Max = 65535 616 */ 617static int poll_interval = -1; 618 619/* 620 * Disable MSI interrupts 621 */ 622static int disable_msi = 0; 623 624/* 625 * Disable MSIX interrupts 626 */ 627static int disable_msix = 0; 628 629static inline struct fe_priv *get_nvpriv(struct net_device *dev) 630{ 631 return netdev_priv(dev); 632} 633 634static inline u8 __iomem *get_hwbase(struct net_device *dev) 635{ 636 return ((struct fe_priv *)netdev_priv(dev))->base; 637} 638 639static inline void pci_push(u8 __iomem *base) 640{ 641 /* force out pending posted writes */ 642 readl(base); 643} 644 645static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 646{ 647 return le32_to_cpu(prd->FlagLen) 648 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 649} 650 651static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 652{ 653 return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2; 654} 655 656static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 657 int delay, int delaymax, const char *msg) 658{ 659 u8 __iomem *base = get_hwbase(dev); 660 661 pci_push(base); 662 do { 663 udelay(delay); 664 delaymax -= delay; 665 if (delaymax < 0) { 666 if (msg) 667 printk(msg); 668 return 1; 669 } 670 } while ((readl(base + offset) & mask) != target); 671 return 0; 672} 673 674#define NV_SETUP_RX_RING 0x01 675#define NV_SETUP_TX_RING 0x02 676 677static void setup_hw_rings(struct net_device *dev, int rxtx_flags) 678{ 679 struct fe_priv *np = get_nvpriv(dev); 680 u8 __iomem *base = get_hwbase(dev); 681 682 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 683 if (rxtx_flags & NV_SETUP_RX_RING) { 684 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); 685 } 686 if (rxtx_flags & NV_SETUP_TX_RING) { 687 writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 688 } 689 } else { 690 if (rxtx_flags & NV_SETUP_RX_RING) { 691 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); 692 writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); 693 } 694 if (rxtx_flags & NV_SETUP_TX_RING) { 695 writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); 696 writel((u32) (cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); 697 } 698 } 699} 700 701#define MII_READ (-1) 702/* mii_rw: read/write a register on the PHY. 703 * 704 * Caller must guarantee serialization 705 */ 706static int mii_rw(struct net_device *dev, int addr, int miireg, int value) 707{ 708 u8 __iomem *base = get_hwbase(dev); 709 u32 reg; 710 int retval; 711 712 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 713 714 reg = readl(base + NvRegMIIControl); 715 if (reg & NVREG_MIICTL_INUSE) { 716 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); 717 udelay(NV_MIIBUSY_DELAY); 718 } 719 720 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; 721 if (value != MII_READ) { 722 writel(value, base + NvRegMIIData); 723 reg |= NVREG_MIICTL_WRITE; 724 } 725 writel(reg, base + NvRegMIIControl); 726 727 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 728 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { 729 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", 730 dev->name, miireg, addr); 731 retval = -1; 732 } else if (value != MII_READ) { 733 /* it was a write operation - fewer failures are detectable */ 734 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", 735 dev->name, value, miireg, addr); 736 retval = 0; 737 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 738 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", 739 dev->name, miireg, addr); 740 retval = -1; 741 } else { 742 retval = readl(base + NvRegMIIData); 743 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", 744 dev->name, miireg, addr, retval); 745 } 746 747 return retval; 748} 749 750static int phy_reset(struct net_device *dev) 751{ 752 struct fe_priv *np = netdev_priv(dev); 753 u32 miicontrol; 754 unsigned int tries = 0; 755 756 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 757 miicontrol |= BMCR_RESET; 758 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 759 return -1; 760 } 761 762 /* wait for 500ms */ 763 msleep(500); 764 765 /* must wait till reset is deasserted */ 766 while (miicontrol & BMCR_RESET) { 767 msleep(10); 768 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 769 /* FIXME: 100 tries seem excessive */ 770 if (tries++ > 100) 771 return -1; 772 } 773 return 0; 774} 775 776static int phy_init(struct net_device *dev) 777{ 778 struct fe_priv *np = get_nvpriv(dev); 779 u8 __iomem *base = get_hwbase(dev); 780 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 781 782 /* set advertise register */ 783 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 784 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400); 785 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 786 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); 787 return PHY_ERROR; 788 } 789 790 /* get phy interface type */ 791 phyinterface = readl(base + NvRegPhyInterface); 792 793 /* see if gigabit phy */ 794 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 795 if (mii_status & PHY_GIGABIT) { 796 np->gigabit = PHY_GIGABIT; 797 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); 798 mii_control_1000 &= ~ADVERTISE_1000HALF; 799 if (phyinterface & PHY_RGMII) 800 mii_control_1000 |= ADVERTISE_1000FULL; 801 else 802 mii_control_1000 &= ~ADVERTISE_1000FULL; 803 804 if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) { 805 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 806 return PHY_ERROR; 807 } 808 } 809 else 810 np->gigabit = 0; 811 812 /* reset the phy */ 813 if (phy_reset(dev)) { 814 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 815 return PHY_ERROR; 816 } 817 818 /* phy vendor specific configuration */ 819 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 820 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 821 phy_reserved &= ~(PHY_INIT1 | PHY_INIT2); 822 phy_reserved |= (PHY_INIT3 | PHY_INIT4); 823 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { 824 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 825 return PHY_ERROR; 826 } 827 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 828 phy_reserved |= PHY_INIT5; 829 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { 830 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 831 return PHY_ERROR; 832 } 833 } 834 if (np->phy_oui == PHY_OUI_CICADA) { 835 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); 836 phy_reserved |= PHY_INIT6; 837 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { 838 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 839 return PHY_ERROR; 840 } 841 } 842 843 /* restart auto negotiation */ 844 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 845 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 846 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 847 return PHY_ERROR; 848 } 849 850 return 0; 851} 852 853static void nv_start_rx(struct net_device *dev) 854{ 855 struct fe_priv *np = netdev_priv(dev); 856 u8 __iomem *base = get_hwbase(dev); 857 858 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 859 /* Already running? Stop it. */ 860 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { 861 writel(0, base + NvRegReceiverControl); 862 pci_push(base); 863 } 864 writel(np->linkspeed, base + NvRegLinkSpeed); 865 pci_push(base); 866 writel(NVREG_RCVCTL_START, base + NvRegReceiverControl); 867 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 868 dev->name, np->duplex, np->linkspeed); 869 pci_push(base); 870} 871 872static void nv_stop_rx(struct net_device *dev) 873{ 874 u8 __iomem *base = get_hwbase(dev); 875 876 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 877 writel(0, base + NvRegReceiverControl); 878 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 879 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 880 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 881 882 udelay(NV_RXSTOP_DELAY2); 883 writel(0, base + NvRegLinkSpeed); 884} 885 886static void nv_start_tx(struct net_device *dev) 887{ 888 u8 __iomem *base = get_hwbase(dev); 889 890 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 891 writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl); 892 pci_push(base); 893} 894 895static void nv_stop_tx(struct net_device *dev) 896{ 897 u8 __iomem *base = get_hwbase(dev); 898 899 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 900 writel(0, base + NvRegTransmitterControl); 901 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 902 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 903 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 904 905 udelay(NV_TXSTOP_DELAY2); 906 writel(0, base + NvRegUnknownTransmitterReg); 907} 908 909static void nv_txrx_reset(struct net_device *dev) 910{ 911 struct fe_priv *np = netdev_priv(dev); 912 u8 __iomem *base = get_hwbase(dev); 913 914 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); 915 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 916 pci_push(base); 917 udelay(NV_TXRX_RESET_DELAY); 918 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 919 pci_push(base); 920} 921 922/* 923 * nv_get_stats: dev->get_stats function 924 * Get latest stats value from the nic. 925 * Called with read_lock(&dev_base_lock) held for read - 926 * only synchronized against unregister_netdevice. 927 */ 928static struct net_device_stats *nv_get_stats(struct net_device *dev) 929{ 930 struct fe_priv *np = netdev_priv(dev); 931 932 /* It seems that the nic always generates interrupts and doesn't 933 * accumulate errors internally. Thus the current values in np->stats 934 * are already up to date. 935 */ 936 return &np->stats; 937} 938 939/* 940 * nv_alloc_rx: fill rx ring entries. 941 * Return 1 if the allocations for the skbs failed and the 942 * rx engine is without Available descriptors 943 */ 944static int nv_alloc_rx(struct net_device *dev) 945{ 946 struct fe_priv *np = netdev_priv(dev); 947 unsigned int refill_rx = np->refill_rx; 948 int nr; 949 950 while (np->cur_rx != refill_rx) { 951 struct sk_buff *skb; 952 953 nr = refill_rx % RX_RING; 954 if (np->rx_skbuff[nr] == NULL) { 955 956 skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 957 if (!skb) 958 break; 959 960 skb->dev = dev; 961 np->rx_skbuff[nr] = skb; 962 } else { 963 skb = np->rx_skbuff[nr]; 964 } 965 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, 966 skb->end-skb->data, PCI_DMA_FROMDEVICE); 967 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 968 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); 969 wmb(); 970 np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 971 } else { 972 np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; 973 np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; 974 wmb(); 975 np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 976 } 977 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", 978 dev->name, refill_rx); 979 refill_rx++; 980 } 981 np->refill_rx = refill_rx; 982 if (np->cur_rx - refill_rx == RX_RING) 983 return 1; 984 return 0; 985} 986 987static void nv_do_rx_refill(unsigned long data) 988{ 989 struct net_device *dev = (struct net_device *) data; 990 struct fe_priv *np = netdev_priv(dev); 991 992 993 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 994 ((np->msi_flags & NV_MSI_X_ENABLED) && 995 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 996 disable_irq(dev->irq); 997 } else { 998 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 999 } 1000 if (nv_alloc_rx(dev)) { 1001 spin_lock(&np->lock); 1002 if (!np->in_shutdown) 1003 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1004 spin_unlock(&np->lock); 1005 } 1006 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1007 ((np->msi_flags & NV_MSI_X_ENABLED) && 1008 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 1009 enable_irq(dev->irq); 1010 } else { 1011 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1012 } 1013} 1014 1015static void nv_init_rx(struct net_device *dev) 1016{ 1017 struct fe_priv *np = netdev_priv(dev); 1018 int i; 1019 1020 np->cur_rx = RX_RING; 1021 np->refill_rx = 0; 1022 for (i = 0; i < RX_RING; i++) 1023 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1024 np->rx_ring.orig[i].FlagLen = 0; 1025 else 1026 np->rx_ring.ex[i].FlagLen = 0; 1027} 1028 1029static void nv_init_tx(struct net_device *dev) 1030{ 1031 struct fe_priv *np = netdev_priv(dev); 1032 int i; 1033 1034 np->next_tx = np->nic_tx = 0; 1035 for (i = 0; i < TX_RING; i++) { 1036 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1037 np->tx_ring.orig[i].FlagLen = 0; 1038 else 1039 np->tx_ring.ex[i].FlagLen = 0; 1040 np->tx_skbuff[i] = NULL; 1041 np->tx_dma[i] = 0; 1042 } 1043} 1044 1045static int nv_init_ring(struct net_device *dev) 1046{ 1047 nv_init_tx(dev); 1048 nv_init_rx(dev); 1049 return nv_alloc_rx(dev); 1050} 1051 1052static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) 1053{ 1054 struct fe_priv *np = netdev_priv(dev); 1055 1056 dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n", 1057 dev->name, skbnr); 1058 1059 if (np->tx_dma[skbnr]) { 1060 pci_unmap_page(np->pci_dev, np->tx_dma[skbnr], 1061 np->tx_dma_len[skbnr], 1062 PCI_DMA_TODEVICE); 1063 np->tx_dma[skbnr] = 0; 1064 } 1065 1066 if (np->tx_skbuff[skbnr]) { 1067 dev_kfree_skb_any(np->tx_skbuff[skbnr]); 1068 np->tx_skbuff[skbnr] = NULL; 1069 return 1; 1070 } else { 1071 return 0; 1072 } 1073} 1074 1075static void nv_drain_tx(struct net_device *dev) 1076{ 1077 struct fe_priv *np = netdev_priv(dev); 1078 unsigned int i; 1079 1080 for (i = 0; i < TX_RING; i++) { 1081 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1082 np->tx_ring.orig[i].FlagLen = 0; 1083 else 1084 np->tx_ring.ex[i].FlagLen = 0; 1085 if (nv_release_txskb(dev, i)) 1086 np->stats.tx_dropped++; 1087 } 1088} 1089 1090static void nv_drain_rx(struct net_device *dev) 1091{ 1092 struct fe_priv *np = netdev_priv(dev); 1093 int i; 1094 for (i = 0; i < RX_RING; i++) { 1095 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1096 np->rx_ring.orig[i].FlagLen = 0; 1097 else 1098 np->rx_ring.ex[i].FlagLen = 0; 1099 wmb(); 1100 if (np->rx_skbuff[i]) { 1101 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1102 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, 1103 PCI_DMA_FROMDEVICE); 1104 dev_kfree_skb(np->rx_skbuff[i]); 1105 np->rx_skbuff[i] = NULL; 1106 } 1107 } 1108} 1109 1110static void drain_ring(struct net_device *dev) 1111{ 1112 nv_drain_tx(dev); 1113 nv_drain_rx(dev); 1114} 1115 1116/* 1117 * nv_start_xmit: dev->hard_start_xmit function 1118 * Called with dev->xmit_lock held. 1119 */ 1120static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 1121{ 1122 struct fe_priv *np = netdev_priv(dev); 1123 u32 tx_flags = 0; 1124 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 1125 unsigned int fragments = skb_shinfo(skb)->nr_frags; 1126 unsigned int nr = (np->next_tx - 1) % TX_RING; 1127 unsigned int start_nr = np->next_tx % TX_RING; 1128 unsigned int i; 1129 u32 offset = 0; 1130 u32 bcnt; 1131 u32 size = skb->len-skb->data_len; 1132 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1133 u32 tx_flags_vlan = 0; 1134 1135 /* add fragments to entries count */ 1136 for (i = 0; i < fragments; i++) { 1137 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 1138 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1139 } 1140 1141 spin_lock_irq(&np->lock); 1142 1143 if ((np->next_tx - np->nic_tx + entries - 1) > TX_LIMIT_STOP) { 1144 spin_unlock_irq(&np->lock); 1145 netif_stop_queue(dev); 1146 return NETDEV_TX_BUSY; 1147 } 1148 1149 /* setup the header buffer */ 1150 do { 1151 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1152 nr = (nr + 1) % TX_RING; 1153 1154 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 1155 PCI_DMA_TODEVICE); 1156 np->tx_dma_len[nr] = bcnt; 1157 1158 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1159 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1160 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1161 } else { 1162 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1163 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1164 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1165 } 1166 tx_flags = np->tx_flags; 1167 offset += bcnt; 1168 size -= bcnt; 1169 } while(size); 1170 1171 /* setup the fragments */ 1172 for (i = 0; i < fragments; i++) { 1173 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1174 u32 size = frag->size; 1175 offset = 0; 1176 1177 do { 1178 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1179 nr = (nr + 1) % TX_RING; 1180 1181 np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 1182 PCI_DMA_TODEVICE); 1183 np->tx_dma_len[nr] = bcnt; 1184 1185 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1186 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1187 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1188 } else { 1189 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1190 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1191 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1192 } 1193 offset += bcnt; 1194 size -= bcnt; 1195 } while (size); 1196 } 1197 1198 /* set last fragment flag */ 1199 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1200 np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1201 } else { 1202 np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1203 } 1204 1205 np->tx_skbuff[nr] = skb; 1206 1207#ifdef NETIF_F_TSO 1208 if (skb_shinfo(skb)->tso_size) 1209 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT); 1210 else 1211#endif 1212 tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); 1213 1214 /* vlan tag */ 1215 if (np->vlangrp && vlan_tx_tag_present(skb)) { 1216 tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); 1217 } 1218 1219 /* set tx flags */ 1220 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1221 np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1222 } else { 1223 np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); 1224 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1225 } 1226 1227 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", 1228 dev->name, np->next_tx, entries, tx_flags_extra); 1229 { 1230 int j; 1231 for (j=0; j<64; j++) { 1232 if ((j%16) == 0) 1233 dprintk("\n%03x:", j); 1234 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 1235 } 1236 dprintk("\n"); 1237 } 1238 1239 np->next_tx += entries; 1240 1241 dev->trans_start = jiffies; 1242 spin_unlock_irq(&np->lock); 1243 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 1244 pci_push(get_hwbase(dev)); 1245 return NETDEV_TX_OK; 1246} 1247 1248/* 1249 * nv_tx_done: check for completed packets, release the skbs. 1250 * 1251 * Caller must own np->lock. 1252 */ 1253static void nv_tx_done(struct net_device *dev) 1254{ 1255 struct fe_priv *np = netdev_priv(dev); 1256 u32 Flags; 1257 unsigned int i; 1258 struct sk_buff *skb; 1259 1260 while (np->nic_tx != np->next_tx) { 1261 i = np->nic_tx % TX_RING; 1262 1263 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1264 Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); 1265 else 1266 Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); 1267 1268 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", 1269 dev->name, np->nic_tx, Flags); 1270 if (Flags & NV_TX_VALID) 1271 break; 1272 if (np->desc_ver == DESC_VER_1) { 1273 if (Flags & NV_TX_LASTPACKET) { 1274 skb = np->tx_skbuff[i]; 1275 if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| 1276 NV_TX_UNDERFLOW|NV_TX_ERROR)) { 1277 if (Flags & NV_TX_UNDERFLOW) 1278 np->stats.tx_fifo_errors++; 1279 if (Flags & NV_TX_CARRIERLOST) 1280 np->stats.tx_carrier_errors++; 1281 np->stats.tx_errors++; 1282 } else { 1283 np->stats.tx_packets++; 1284 np->stats.tx_bytes += skb->len; 1285 } 1286 } 1287 } else { 1288 if (Flags & NV_TX2_LASTPACKET) { 1289 skb = np->tx_skbuff[i]; 1290 if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| 1291 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { 1292 if (Flags & NV_TX2_UNDERFLOW) 1293 np->stats.tx_fifo_errors++; 1294 if (Flags & NV_TX2_CARRIERLOST) 1295 np->stats.tx_carrier_errors++; 1296 np->stats.tx_errors++; 1297 } else { 1298 np->stats.tx_packets++; 1299 np->stats.tx_bytes += skb->len; 1300 } 1301 } 1302 } 1303 nv_release_txskb(dev, i); 1304 np->nic_tx++; 1305 } 1306 if (np->next_tx - np->nic_tx < TX_LIMIT_START) 1307 netif_wake_queue(dev); 1308} 1309 1310/* 1311 * nv_tx_timeout: dev->tx_timeout function 1312 * Called with dev->xmit_lock held. 1313 */ 1314static void nv_tx_timeout(struct net_device *dev) 1315{ 1316 struct fe_priv *np = netdev_priv(dev); 1317 u8 __iomem *base = get_hwbase(dev); 1318 u32 status; 1319 1320 if (np->msi_flags & NV_MSI_X_ENABLED) 1321 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 1322 else 1323 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 1324 1325 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); 1326 1327 { 1328 int i; 1329 1330 printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n", 1331 dev->name, (unsigned long)np->ring_addr, 1332 np->next_tx, np->nic_tx); 1333 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 1334 for (i=0;i<0x400;i+= 32) { 1335 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 1336 i, 1337 readl(base + i + 0), readl(base + i + 4), 1338 readl(base + i + 8), readl(base + i + 12), 1339 readl(base + i + 16), readl(base + i + 20), 1340 readl(base + i + 24), readl(base + i + 28)); 1341 } 1342 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 1343 for (i=0;i<TX_RING;i+= 4) { 1344 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1345 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 1346 i, 1347 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), 1348 le32_to_cpu(np->tx_ring.orig[i].FlagLen), 1349 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), 1350 le32_to_cpu(np->tx_ring.orig[i+1].FlagLen), 1351 le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer), 1352 le32_to_cpu(np->tx_ring.orig[i+2].FlagLen), 1353 le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer), 1354 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); 1355 } else { 1356 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 1357 i, 1358 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), 1359 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), 1360 le32_to_cpu(np->tx_ring.ex[i].FlagLen), 1361 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh), 1362 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow), 1363 le32_to_cpu(np->tx_ring.ex[i+1].FlagLen), 1364 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh), 1365 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow), 1366 le32_to_cpu(np->tx_ring.ex[i+2].FlagLen), 1367 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh), 1368 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow), 1369 le32_to_cpu(np->tx_ring.ex[i+3].FlagLen)); 1370 } 1371 } 1372 } 1373 1374 spin_lock_irq(&np->lock); 1375 1376 /* 1) stop tx engine */ 1377 nv_stop_tx(dev); 1378 1379 /* 2) check that the packets were not sent already: */ 1380 nv_tx_done(dev); 1381 1382 /* 3) if there are dead entries: clear everything */ 1383 if (np->next_tx != np->nic_tx) { 1384 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 1385 nv_drain_tx(dev); 1386 np->next_tx = np->nic_tx = 0; 1387 setup_hw_rings(dev, NV_SETUP_TX_RING); 1388 netif_wake_queue(dev); 1389 } 1390 1391 /* 4) restart tx engine */ 1392 nv_start_tx(dev); 1393 spin_unlock_irq(&np->lock); 1394} 1395 1396/* 1397 * Called when the nic notices a mismatch between the actual data len on the 1398 * wire and the len indicated in the 802 header 1399 */ 1400static int nv_getlen(struct net_device *dev, void *packet, int datalen) 1401{ 1402 int hdrlen; /* length of the 802 header */ 1403 int protolen; /* length as stored in the proto field */ 1404 1405 /* 1) calculate len according to header */ 1406 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) { 1407 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 1408 hdrlen = VLAN_HLEN; 1409 } else { 1410 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 1411 hdrlen = ETH_HLEN; 1412 } 1413 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", 1414 dev->name, datalen, protolen, hdrlen); 1415 if (protolen > ETH_DATA_LEN) 1416 return datalen; /* Value in proto field not a len, no checks possible */ 1417 1418 protolen += hdrlen; 1419 /* consistency checks: */ 1420 if (datalen > ETH_ZLEN) { 1421 if (datalen >= protolen) { 1422 /* more data on wire than in 802 header, trim of 1423 * additional data. 1424 */ 1425 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 1426 dev->name, protolen); 1427 return protolen; 1428 } else { 1429 /* less data on wire than mentioned in header. 1430 * Discard the packet. 1431 */ 1432 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", 1433 dev->name); 1434 return -1; 1435 } 1436 } else { 1437 /* short packet. Accept only if 802 values are also short */ 1438 if (protolen > ETH_ZLEN) { 1439 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", 1440 dev->name); 1441 return -1; 1442 } 1443 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 1444 dev->name, datalen); 1445 return datalen; 1446 } 1447} 1448 1449static void nv_rx_process(struct net_device *dev) 1450{ 1451 struct fe_priv *np = netdev_priv(dev); 1452 u32 Flags; 1453 u32 vlanflags = 0; 1454 1455 1456 for (;;) { 1457 struct sk_buff *skb; 1458 int len; 1459 int i; 1460 if (np->cur_rx - np->refill_rx >= RX_RING) 1461 break; /* we scanned the whole ring - do not continue */ 1462 1463 i = np->cur_rx % RX_RING; 1464 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1465 Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); 1466 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); 1467 } else { 1468 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); 1469 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); 1470 vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); 1471 } 1472 1473 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", 1474 dev->name, np->cur_rx, Flags); 1475 1476 if (Flags & NV_RX_AVAIL) 1477 break; /* still owned by hardware, */ 1478 1479 /* 1480 * the packet is for us - immediately tear down the pci mapping. 1481 * TODO: check if a prefetch of the first cacheline improves 1482 * the performance. 1483 */ 1484 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1485 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, 1486 PCI_DMA_FROMDEVICE); 1487 1488 { 1489 int j; 1490 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags); 1491 for (j=0; j<64; j++) { 1492 if ((j%16) == 0) 1493 dprintk("\n%03x:", j); 1494 dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]); 1495 } 1496 dprintk("\n"); 1497 } 1498 /* look at what we actually got: */ 1499 if (np->desc_ver == DESC_VER_1) { 1500 if (!(Flags & NV_RX_DESCRIPTORVALID)) 1501 goto next_pkt; 1502 1503 if (Flags & NV_RX_ERROR) { 1504 if (Flags & NV_RX_MISSEDFRAME) { 1505 np->stats.rx_missed_errors++; 1506 np->stats.rx_errors++; 1507 goto next_pkt; 1508 } 1509 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { 1510 np->stats.rx_errors++; 1511 goto next_pkt; 1512 } 1513 if (Flags & NV_RX_CRCERR) { 1514 np->stats.rx_crc_errors++; 1515 np->stats.rx_errors++; 1516 goto next_pkt; 1517 } 1518 if (Flags & NV_RX_OVERFLOW) { 1519 np->stats.rx_over_errors++; 1520 np->stats.rx_errors++; 1521 goto next_pkt; 1522 } 1523 if (Flags & NV_RX_ERROR4) { 1524 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1525 if (len < 0) { 1526 np->stats.rx_errors++; 1527 goto next_pkt; 1528 } 1529 } 1530 /* framing errors are soft errors. */ 1531 if (Flags & NV_RX_FRAMINGERR) { 1532 if (Flags & NV_RX_SUBSTRACT1) { 1533 len--; 1534 } 1535 } 1536 } 1537 } else { 1538 if (!(Flags & NV_RX2_DESCRIPTORVALID)) 1539 goto next_pkt; 1540 1541 if (Flags & NV_RX2_ERROR) { 1542 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { 1543 np->stats.rx_errors++; 1544 goto next_pkt; 1545 } 1546 if (Flags & NV_RX2_CRCERR) { 1547 np->stats.rx_crc_errors++; 1548 np->stats.rx_errors++; 1549 goto next_pkt; 1550 } 1551 if (Flags & NV_RX2_OVERFLOW) { 1552 np->stats.rx_over_errors++; 1553 np->stats.rx_errors++; 1554 goto next_pkt; 1555 } 1556 if (Flags & NV_RX2_ERROR4) { 1557 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1558 if (len < 0) { 1559 np->stats.rx_errors++; 1560 goto next_pkt; 1561 } 1562 } 1563 /* framing errors are soft errors */ 1564 if (Flags & NV_RX2_FRAMINGERR) { 1565 if (Flags & NV_RX2_SUBSTRACT1) { 1566 len--; 1567 } 1568 } 1569 } 1570 Flags &= NV_RX2_CHECKSUMMASK; 1571 if (Flags == NV_RX2_CHECKSUMOK1 || 1572 Flags == NV_RX2_CHECKSUMOK2 || 1573 Flags == NV_RX2_CHECKSUMOK3) { 1574 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); 1575 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; 1576 } else { 1577 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name); 1578 } 1579 } 1580 /* got a valid packet - forward it to the network core */ 1581 skb = np->rx_skbuff[i]; 1582 np->rx_skbuff[i] = NULL; 1583 1584 skb_put(skb, len); 1585 skb->protocol = eth_type_trans(skb, dev); 1586 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", 1587 dev->name, np->cur_rx, len, skb->protocol); 1588 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) { 1589 vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); 1590 } else { 1591 netif_rx(skb); 1592 } 1593 dev->last_rx = jiffies; 1594 np->stats.rx_packets++; 1595 np->stats.rx_bytes += len; 1596next_pkt: 1597 np->cur_rx++; 1598 } 1599} 1600 1601static void set_bufsize(struct net_device *dev) 1602{ 1603 struct fe_priv *np = netdev_priv(dev); 1604 1605 if (dev->mtu <= ETH_DATA_LEN) 1606 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; 1607 else 1608 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; 1609} 1610 1611/* 1612 * nv_change_mtu: dev->change_mtu function 1613 * Called with dev_base_lock held for read. 1614 */ 1615static int nv_change_mtu(struct net_device *dev, int new_mtu) 1616{ 1617 struct fe_priv *np = netdev_priv(dev); 1618 int old_mtu; 1619 1620 if (new_mtu < 64 || new_mtu > np->pkt_limit) 1621 return -EINVAL; 1622 1623 old_mtu = dev->mtu; 1624 dev->mtu = new_mtu; 1625 1626 /* return early if the buffer sizes will not change */ 1627 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) 1628 return 0; 1629 if (old_mtu == new_mtu) 1630 return 0; 1631 1632 /* synchronized against open : rtnl_lock() held by caller */ 1633 if (netif_running(dev)) { 1634 u8 __iomem *base = get_hwbase(dev); 1635 /* 1636 * It seems that the nic preloads valid ring entries into an 1637 * internal buffer. The procedure for flushing everything is 1638 * guessed, there is probably a simpler approach. 1639 * Changing the MTU is a rare event, it shouldn't matter. 1640 */ 1641 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1642 ((np->msi_flags & NV_MSI_X_ENABLED) && 1643 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 1644 disable_irq(dev->irq); 1645 } else { 1646 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1647 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1648 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1649 } 1650 spin_lock_bh(&dev->xmit_lock); 1651 spin_lock(&np->lock); 1652 /* stop engines */ 1653 nv_stop_rx(dev); 1654 nv_stop_tx(dev); 1655 nv_txrx_reset(dev); 1656 /* drain rx queue */ 1657 nv_drain_rx(dev); 1658 nv_drain_tx(dev); 1659 /* reinit driver view of the rx queue */ 1660 nv_init_rx(dev); 1661 nv_init_tx(dev); 1662 /* alloc new rx buffers */ 1663 set_bufsize(dev); 1664 if (nv_alloc_rx(dev)) { 1665 if (!np->in_shutdown) 1666 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1667 } 1668 /* reinit nic view of the rx queue */ 1669 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 1670 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 1671 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), 1672 base + NvRegRingSizes); 1673 pci_push(base); 1674 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 1675 pci_push(base); 1676 1677 /* restart rx engine */ 1678 nv_start_rx(dev); 1679 nv_start_tx(dev); 1680 spin_unlock(&np->lock); 1681 spin_unlock_bh(&dev->xmit_lock); 1682 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1683 ((np->msi_flags & NV_MSI_X_ENABLED) && 1684 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 1685 enable_irq(dev->irq); 1686 } else { 1687 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1688 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1689 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1690 } 1691 } 1692 return 0; 1693} 1694 1695static void nv_copy_mac_to_hw(struct net_device *dev) 1696{ 1697 u8 __iomem *base = get_hwbase(dev); 1698 u32 mac[2]; 1699 1700 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 1701 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 1702 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 1703 1704 writel(mac[0], base + NvRegMacAddrA); 1705 writel(mac[1], base + NvRegMacAddrB); 1706} 1707 1708/* 1709 * nv_set_mac_address: dev->set_mac_address function 1710 * Called with rtnl_lock() held. 1711 */ 1712static int nv_set_mac_address(struct net_device *dev, void *addr) 1713{ 1714 struct fe_priv *np = netdev_priv(dev); 1715 struct sockaddr *macaddr = (struct sockaddr*)addr; 1716 1717 if(!is_valid_ether_addr(macaddr->sa_data)) 1718 return -EADDRNOTAVAIL; 1719 1720 /* synchronized against open : rtnl_lock() held by caller */ 1721 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 1722 1723 if (netif_running(dev)) { 1724 spin_lock_bh(&dev->xmit_lock); 1725 spin_lock_irq(&np->lock); 1726 1727 /* stop rx engine */ 1728 nv_stop_rx(dev); 1729 1730 /* set mac address */ 1731 nv_copy_mac_to_hw(dev); 1732 1733 /* restart rx engine */ 1734 nv_start_rx(dev); 1735 spin_unlock_irq(&np->lock); 1736 spin_unlock_bh(&dev->xmit_lock); 1737 } else { 1738 nv_copy_mac_to_hw(dev); 1739 } 1740 return 0; 1741} 1742 1743/* 1744 * nv_set_multicast: dev->set_multicast function 1745 * Called with dev->xmit_lock held. 1746 */ 1747static void nv_set_multicast(struct net_device *dev) 1748{ 1749 struct fe_priv *np = netdev_priv(dev); 1750 u8 __iomem *base = get_hwbase(dev); 1751 u32 addr[2]; 1752 u32 mask[2]; 1753 u32 pff; 1754 1755 memset(addr, 0, sizeof(addr)); 1756 memset(mask, 0, sizeof(mask)); 1757 1758 if (dev->flags & IFF_PROMISC) { 1759 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); 1760 pff = NVREG_PFF_PROMISC; 1761 } else { 1762 pff = NVREG_PFF_MYADDR; 1763 1764 if (dev->flags & IFF_ALLMULTI || dev->mc_list) { 1765 u32 alwaysOff[2]; 1766 u32 alwaysOn[2]; 1767 1768 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; 1769 if (dev->flags & IFF_ALLMULTI) { 1770 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 1771 } else { 1772 struct dev_mc_list *walk; 1773 1774 walk = dev->mc_list; 1775 while (walk != NULL) { 1776 u32 a, b; 1777 a = le32_to_cpu(*(u32 *) walk->dmi_addr); 1778 b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4])); 1779 alwaysOn[0] &= a; 1780 alwaysOff[0] &= ~a; 1781 alwaysOn[1] &= b; 1782 alwaysOff[1] &= ~b; 1783 walk = walk->next; 1784 } 1785 } 1786 addr[0] = alwaysOn[0]; 1787 addr[1] = alwaysOn[1]; 1788 mask[0] = alwaysOn[0] | alwaysOff[0]; 1789 mask[1] = alwaysOn[1] | alwaysOff[1]; 1790 } 1791 } 1792 addr[0] |= NVREG_MCASTADDRA_FORCE; 1793 pff |= NVREG_PFF_ALWAYS; 1794 spin_lock_irq(&np->lock); 1795 nv_stop_rx(dev); 1796 writel(addr[0], base + NvRegMulticastAddrA); 1797 writel(addr[1], base + NvRegMulticastAddrB); 1798 writel(mask[0], base + NvRegMulticastMaskA); 1799 writel(mask[1], base + NvRegMulticastMaskB); 1800 writel(pff, base + NvRegPacketFilterFlags); 1801 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", 1802 dev->name); 1803 nv_start_rx(dev); 1804 spin_unlock_irq(&np->lock); 1805} 1806 1807/** 1808 * nv_update_linkspeed: Setup the MAC according to the link partner 1809 * @dev: Network device to be configured 1810 * 1811 * The function queries the PHY and checks if there is a link partner. 1812 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is 1813 * set to 10 MBit HD. 1814 * 1815 * The function returns 0 if there is no link partner and 1 if there is 1816 * a good link partner. 1817 */ 1818static int nv_update_linkspeed(struct net_device *dev) 1819{ 1820 struct fe_priv *np = netdev_priv(dev); 1821 u8 __iomem *base = get_hwbase(dev); 1822 int adv, lpa; 1823 int newls = np->linkspeed; 1824 int newdup = np->duplex; 1825 int mii_status; 1826 int retval = 0; 1827 u32 control_1000, status_1000, phyreg; 1828 1829 /* BMSR_LSTATUS is latched, read it twice: 1830 * we want the current value. 1831 */ 1832 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1833 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1834 1835 if (!(mii_status & BMSR_LSTATUS)) { 1836 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", 1837 dev->name); 1838 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 1839 newdup = 0; 1840 retval = 0; 1841 goto set_speed; 1842 } 1843 1844 if (np->autoneg == 0) { 1845 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", 1846 dev->name, np->fixed_mode); 1847 if (np->fixed_mode & LPA_100FULL) { 1848 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 1849 newdup = 1; 1850 } else if (np->fixed_mode & LPA_100HALF) { 1851 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 1852 newdup = 0; 1853 } else if (np->fixed_mode & LPA_10FULL) { 1854 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 1855 newdup = 1; 1856 } else { 1857 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 1858 newdup = 0; 1859 } 1860 retval = 1; 1861 goto set_speed; 1862 } 1863 /* check auto negotiation is complete */ 1864 if (!(mii_status & BMSR_ANEGCOMPLETE)) { 1865 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ 1866 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 1867 newdup = 0; 1868 retval = 0; 1869 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); 1870 goto set_speed; 1871 } 1872 1873 retval = 1; 1874 if (np->gigabit == PHY_GIGABIT) { 1875 control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); 1876 status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ); 1877 1878 if ((control_1000 & ADVERTISE_1000FULL) && 1879 (status_1000 & LPA_1000FULL)) { 1880 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", 1881 dev->name); 1882 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 1883 newdup = 1; 1884 goto set_speed; 1885 } 1886 } 1887 1888 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1889 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 1890 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", 1891 dev->name, adv, lpa); 1892 1893 /* FIXME: handle parallel detection properly */ 1894 lpa = lpa & adv; 1895 if (lpa & LPA_100FULL) { 1896 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 1897 newdup = 1; 1898 } else if (lpa & LPA_100HALF) { 1899 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 1900 newdup = 0; 1901 } else if (lpa & LPA_10FULL) { 1902 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 1903 newdup = 1; 1904 } else if (lpa & LPA_10HALF) { 1905 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 1906 newdup = 0; 1907 } else { 1908 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa); 1909 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 1910 newdup = 0; 1911 } 1912 1913set_speed: 1914 if (np->duplex == newdup && np->linkspeed == newls) 1915 return retval; 1916 1917 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", 1918 dev->name, np->linkspeed, np->duplex, newls, newdup); 1919 1920 np->duplex = newdup; 1921 np->linkspeed = newls; 1922 1923 if (np->gigabit == PHY_GIGABIT) { 1924 phyreg = readl(base + NvRegRandomSeed); 1925 phyreg &= ~(0x3FF00); 1926 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) 1927 phyreg |= NVREG_RNDSEED_FORCE3; 1928 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) 1929 phyreg |= NVREG_RNDSEED_FORCE2; 1930 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 1931 phyreg |= NVREG_RNDSEED_FORCE; 1932 writel(phyreg, base + NvRegRandomSeed); 1933 } 1934 1935 phyreg = readl(base + NvRegPhyInterface); 1936 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 1937 if (np->duplex == 0) 1938 phyreg |= PHY_HALF; 1939 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 1940 phyreg |= PHY_100; 1941 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 1942 phyreg |= PHY_1000; 1943 writel(phyreg, base + NvRegPhyInterface); 1944 1945 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 1946 base + NvRegMisc1); 1947 pci_push(base); 1948 writel(np->linkspeed, base + NvRegLinkSpeed); 1949 pci_push(base); 1950 1951 return retval; 1952} 1953 1954static void nv_linkchange(struct net_device *dev) 1955{ 1956 if (nv_update_linkspeed(dev)) { 1957 if (!netif_carrier_ok(dev)) { 1958 netif_carrier_on(dev); 1959 printk(KERN_INFO "%s: link up.\n", dev->name); 1960 nv_start_rx(dev); 1961 } 1962 } else { 1963 if (netif_carrier_ok(dev)) { 1964 netif_carrier_off(dev); 1965 printk(KERN_INFO "%s: link down.\n", dev->name); 1966 nv_stop_rx(dev); 1967 } 1968 } 1969} 1970 1971static void nv_link_irq(struct net_device *dev) 1972{ 1973 u8 __iomem *base = get_hwbase(dev); 1974 u32 miistat; 1975 1976 miistat = readl(base + NvRegMIIStatus); 1977 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 1978 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); 1979 1980 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 1981 nv_linkchange(dev); 1982 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); 1983} 1984 1985static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) 1986{ 1987 struct net_device *dev = (struct net_device *) data; 1988 struct fe_priv *np = netdev_priv(dev); 1989 u8 __iomem *base = get_hwbase(dev); 1990 u32 events; 1991 int i; 1992 1993 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 1994 1995 for (i=0; ; i++) { 1996 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 1997 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 1998 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 1999 } else { 2000 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2001 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 2002 } 2003 pci_push(base); 2004 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2005 if (!(events & np->irqmask)) 2006 break; 2007 2008 spin_lock(&np->lock); 2009 nv_tx_done(dev); 2010 spin_unlock(&np->lock); 2011 2012 nv_rx_process(dev); 2013 if (nv_alloc_rx(dev)) { 2014 spin_lock(&np->lock); 2015 if (!np->in_shutdown) 2016 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2017 spin_unlock(&np->lock); 2018 } 2019 2020 if (events & NVREG_IRQ_LINK) { 2021 spin_lock(&np->lock); 2022 nv_link_irq(dev); 2023 spin_unlock(&np->lock); 2024 } 2025 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2026 spin_lock(&np->lock); 2027 nv_linkchange(dev); 2028 spin_unlock(&np->lock); 2029 np->link_timeout = jiffies + LINK_TIMEOUT; 2030 } 2031 if (events & (NVREG_IRQ_TX_ERR)) { 2032 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2033 dev->name, events); 2034 } 2035 if (events & (NVREG_IRQ_UNKNOWN)) { 2036 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2037 dev->name, events); 2038 } 2039 if (i > max_interrupt_work) { 2040 spin_lock(&np->lock); 2041 /* disable interrupts on the nic */ 2042 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 2043 writel(0, base + NvRegIrqMask); 2044 else 2045 writel(np->irqmask, base + NvRegIrqMask); 2046 pci_push(base); 2047 2048 if (!np->in_shutdown) { 2049 np->nic_poll_irq = np->irqmask; 2050 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2051 } 2052 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 2053 spin_unlock(&np->lock); 2054 break; 2055 } 2056 2057 } 2058 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 2059 2060 return IRQ_RETVAL(i); 2061} 2062 2063static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) 2064{ 2065 struct net_device *dev = (struct net_device *) data; 2066 struct fe_priv *np = netdev_priv(dev); 2067 u8 __iomem *base = get_hwbase(dev); 2068 u32 events; 2069 int i; 2070 2071 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 2072 2073 for (i=0; ; i++) { 2074 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 2075 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 2076 pci_push(base); 2077 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 2078 if (!(events & np->irqmask)) 2079 break; 2080 2081 spin_lock(&np->lock); 2082 nv_tx_done(dev); 2083 spin_unlock(&np->lock); 2084 2085 if (events & (NVREG_IRQ_TX_ERR)) { 2086 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2087 dev->name, events); 2088 } 2089 if (i > max_interrupt_work) { 2090 spin_lock(&np->lock); 2091 /* disable interrupts on the nic */ 2092 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 2093 pci_push(base); 2094 2095 if (!np->in_shutdown) { 2096 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; 2097 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2098 } 2099 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 2100 spin_unlock(&np->lock); 2101 break; 2102 } 2103 2104 } 2105 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); 2106 2107 return IRQ_RETVAL(i); 2108} 2109 2110static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) 2111{ 2112 struct net_device *dev = (struct net_device *) data; 2113 struct fe_priv *np = netdev_priv(dev); 2114 u8 __iomem *base = get_hwbase(dev); 2115 u32 events; 2116 int i; 2117 2118 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 2119 2120 for (i=0; ; i++) { 2121 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 2122 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 2123 pci_push(base); 2124 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 2125 if (!(events & np->irqmask)) 2126 break; 2127 2128 nv_rx_process(dev); 2129 if (nv_alloc_rx(dev)) { 2130 spin_lock(&np->lock); 2131 if (!np->in_shutdown) 2132 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2133 spin_unlock(&np->lock); 2134 } 2135 2136 if (i > max_interrupt_work) { 2137 spin_lock(&np->lock); 2138 /* disable interrupts on the nic */ 2139 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2140 pci_push(base); 2141 2142 if (!np->in_shutdown) { 2143 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; 2144 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2145 } 2146 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 2147 spin_unlock(&np->lock); 2148 break; 2149 } 2150 2151 } 2152 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 2153 2154 return IRQ_RETVAL(i); 2155} 2156 2157static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) 2158{ 2159 struct net_device *dev = (struct net_device *) data; 2160 struct fe_priv *np = netdev_priv(dev); 2161 u8 __iomem *base = get_hwbase(dev); 2162 u32 events; 2163 int i; 2164 2165 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 2166 2167 for (i=0; ; i++) { 2168 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 2169 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 2170 pci_push(base); 2171 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2172 if (!(events & np->irqmask)) 2173 break; 2174 2175 if (events & NVREG_IRQ_LINK) { 2176 spin_lock(&np->lock); 2177 nv_link_irq(dev); 2178 spin_unlock(&np->lock); 2179 } 2180 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2181 spin_lock(&np->lock); 2182 nv_linkchange(dev); 2183 spin_unlock(&np->lock); 2184 np->link_timeout = jiffies + LINK_TIMEOUT; 2185 } 2186 if (events & (NVREG_IRQ_UNKNOWN)) { 2187 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2188 dev->name, events); 2189 } 2190 if (i > max_interrupt_work) { 2191 spin_lock(&np->lock); 2192 /* disable interrupts on the nic */ 2193 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 2194 pci_push(base); 2195 2196 if (!np->in_shutdown) { 2197 np->nic_poll_irq |= NVREG_IRQ_OTHER; 2198 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2199 } 2200 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 2201 spin_unlock(&np->lock); 2202 break; 2203 } 2204 2205 } 2206 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); 2207 2208 return IRQ_RETVAL(i); 2209} 2210 2211static void nv_do_nic_poll(unsigned long data) 2212{ 2213 struct net_device *dev = (struct net_device *) data; 2214 struct fe_priv *np = netdev_priv(dev); 2215 u8 __iomem *base = get_hwbase(dev); 2216 u32 mask = 0; 2217 2218 /* 2219 * First disable irq(s) and then 2220 * reenable interrupts on the nic, we have to do this before calling 2221 * nv_nic_irq because that may decide to do otherwise 2222 */ 2223 2224 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 2225 ((np->msi_flags & NV_MSI_X_ENABLED) && 2226 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 2227 disable_irq(dev->irq); 2228 mask = np->irqmask; 2229 } else { 2230 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2231 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 2232 mask |= NVREG_IRQ_RX_ALL; 2233 } 2234 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 2235 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 2236 mask |= NVREG_IRQ_TX_ALL; 2237 } 2238 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 2239 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 2240 mask |= NVREG_IRQ_OTHER; 2241 } 2242 } 2243 np->nic_poll_irq = 0; 2244 2245 /* FIXME: Do we need synchronize_irq(dev->irq) here? */ 2246 2247 writel(mask, base + NvRegIrqMask); 2248 pci_push(base); 2249 2250 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 2251 ((np->msi_flags & NV_MSI_X_ENABLED) && 2252 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 2253 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); 2254 enable_irq(dev->irq); 2255 } else { 2256 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2257 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); 2258 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 2259 } 2260 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 2261 nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL); 2262 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 2263 } 2264 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 2265 nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL); 2266 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 2267 } 2268 } 2269} 2270 2271#ifdef CONFIG_NET_POLL_CONTROLLER 2272static void nv_poll_controller(struct net_device *dev) 2273{ 2274 nv_do_nic_poll((unsigned long) dev); 2275} 2276#endif 2277 2278static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2279{ 2280 struct fe_priv *np = netdev_priv(dev); 2281 strcpy(info->driver, "forcedeth"); 2282 strcpy(info->version, FORCEDETH_VERSION); 2283 strcpy(info->bus_info, pci_name(np->pci_dev)); 2284} 2285 2286static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 2287{ 2288 struct fe_priv *np = netdev_priv(dev); 2289 wolinfo->supported = WAKE_MAGIC; 2290 2291 spin_lock_irq(&np->lock); 2292 if (np->wolenabled) 2293 wolinfo->wolopts = WAKE_MAGIC; 2294 spin_unlock_irq(&np->lock); 2295} 2296 2297static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 2298{ 2299 struct fe_priv *np = netdev_priv(dev); 2300 u8 __iomem *base = get_hwbase(dev); 2301 2302 spin_lock_irq(&np->lock); 2303 if (wolinfo->wolopts == 0) { 2304 writel(0, base + NvRegWakeUpFlags); 2305 np->wolenabled = 0; 2306 } 2307 if (wolinfo->wolopts & WAKE_MAGIC) { 2308 writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags); 2309 np->wolenabled = 1; 2310 } 2311 spin_unlock_irq(&np->lock); 2312 return 0; 2313} 2314 2315static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 2316{ 2317 struct fe_priv *np = netdev_priv(dev); 2318 int adv; 2319 2320 spin_lock_irq(&np->lock); 2321 ecmd->port = PORT_MII; 2322 if (!netif_running(dev)) { 2323 /* We do not track link speed / duplex setting if the 2324 * interface is disabled. Force a link check */ 2325 nv_update_linkspeed(dev); 2326 } 2327 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 2328 case NVREG_LINKSPEED_10: 2329 ecmd->speed = SPEED_10; 2330 break; 2331 case NVREG_LINKSPEED_100: 2332 ecmd->speed = SPEED_100; 2333 break; 2334 case NVREG_LINKSPEED_1000: 2335 ecmd->speed = SPEED_1000; 2336 break; 2337 } 2338 ecmd->duplex = DUPLEX_HALF; 2339 if (np->duplex) 2340 ecmd->duplex = DUPLEX_FULL; 2341 2342 ecmd->autoneg = np->autoneg; 2343 2344 ecmd->advertising = ADVERTISED_MII; 2345 if (np->autoneg) { 2346 ecmd->advertising |= ADVERTISED_Autoneg; 2347 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 2348 } else { 2349 adv = np->fixed_mode; 2350 } 2351 if (adv & ADVERTISE_10HALF) 2352 ecmd->advertising |= ADVERTISED_10baseT_Half; 2353 if (adv & ADVERTISE_10FULL) 2354 ecmd->advertising |= ADVERTISED_10baseT_Full; 2355 if (adv & ADVERTISE_100HALF) 2356 ecmd->advertising |= ADVERTISED_100baseT_Half; 2357 if (adv & ADVERTISE_100FULL) 2358 ecmd->advertising |= ADVERTISED_100baseT_Full; 2359 if (np->autoneg && np->gigabit == PHY_GIGABIT) { 2360 adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); 2361 if (adv & ADVERTISE_1000FULL) 2362 ecmd->advertising |= ADVERTISED_1000baseT_Full; 2363 } 2364 2365 ecmd->supported = (SUPPORTED_Autoneg | 2366 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2367 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 2368 SUPPORTED_MII); 2369 if (np->gigabit == PHY_GIGABIT) 2370 ecmd->supported |= SUPPORTED_1000baseT_Full; 2371 2372 ecmd->phy_address = np->phyaddr; 2373 ecmd->transceiver = XCVR_EXTERNAL; 2374 2375 /* ignore maxtxpkt, maxrxpkt for now */ 2376 spin_unlock_irq(&np->lock); 2377 return 0; 2378} 2379 2380static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 2381{ 2382 struct fe_priv *np = netdev_priv(dev); 2383 2384 if (ecmd->port != PORT_MII) 2385 return -EINVAL; 2386 if (ecmd->transceiver != XCVR_EXTERNAL) 2387 return -EINVAL; 2388 if (ecmd->phy_address != np->phyaddr) { 2389 /* TODO: support switching between multiple phys. Should be 2390 * trivial, but not enabled due to lack of test hardware. */ 2391 return -EINVAL; 2392 } 2393 if (ecmd->autoneg == AUTONEG_ENABLE) { 2394 u32 mask; 2395 2396 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 2397 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 2398 if (np->gigabit == PHY_GIGABIT) 2399 mask |= ADVERTISED_1000baseT_Full; 2400 2401 if ((ecmd->advertising & mask) == 0) 2402 return -EINVAL; 2403 2404 } else if (ecmd->autoneg == AUTONEG_DISABLE) { 2405 /* Note: autonegotiation disable, speed 1000 intentionally 2406 * forbidden - noone should need that. */ 2407 2408 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) 2409 return -EINVAL; 2410 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 2411 return -EINVAL; 2412 } else { 2413 return -EINVAL; 2414 } 2415 2416 spin_lock_irq(&np->lock); 2417 if (ecmd->autoneg == AUTONEG_ENABLE) { 2418 int adv, bmcr; 2419 2420 np->autoneg = 1; 2421 2422 /* advertise only what has been requested */ 2423 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 2424 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); 2425 if (ecmd->advertising & ADVERTISED_10baseT_Half) 2426 adv |= ADVERTISE_10HALF; 2427 if (ecmd->advertising & ADVERTISED_10baseT_Full) 2428 adv |= ADVERTISE_10FULL; 2429 if (ecmd->advertising & ADVERTISED_100baseT_Half) 2430 adv |= ADVERTISE_100HALF; 2431 if (ecmd->advertising & ADVERTISED_100baseT_Full) 2432 adv |= ADVERTISE_100FULL; 2433 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 2434 2435 if (np->gigabit == PHY_GIGABIT) { 2436 adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); 2437 adv &= ~ADVERTISE_1000FULL; 2438 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 2439 adv |= ADVERTISE_1000FULL; 2440 mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv); 2441 } 2442 2443 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 2444 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 2445 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 2446 2447 } else { 2448 int adv, bmcr; 2449 2450 np->autoneg = 0; 2451 2452 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 2453 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); 2454 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) 2455 adv |= ADVERTISE_10HALF; 2456 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) 2457 adv |= ADVERTISE_10FULL; 2458 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) 2459 adv |= ADVERTISE_100HALF; 2460 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) 2461 adv |= ADVERTISE_100FULL; 2462 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 2463 np->fixed_mode = adv; 2464 2465 if (np->gigabit == PHY_GIGABIT) { 2466 adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); 2467 adv &= ~ADVERTISE_1000FULL; 2468 mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv); 2469 } 2470 2471 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 2472 bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX); 2473 if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL)) 2474 bmcr |= BMCR_FULLDPLX; 2475 if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 2476 bmcr |= BMCR_SPEED100; 2477 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 2478 2479 if (netif_running(dev)) { 2480 /* Wait a bit and then reconfigure the nic. */ 2481 udelay(10); 2482 nv_linkchange(dev); 2483 } 2484 } 2485 spin_unlock_irq(&np->lock); 2486 2487 return 0; 2488} 2489 2490#define FORCEDETH_REGS_VER 1 2491#define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */ 2492 2493static int nv_get_regs_len(struct net_device *dev) 2494{ 2495 return FORCEDETH_REGS_SIZE; 2496} 2497 2498static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 2499{ 2500 struct fe_priv *np = netdev_priv(dev); 2501 u8 __iomem *base = get_hwbase(dev); 2502 u32 *rbuf = buf; 2503 int i; 2504 2505 regs->version = FORCEDETH_REGS_VER; 2506 spin_lock_irq(&np->lock); 2507 for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++) 2508 rbuf[i] = readl(base + i*sizeof(u32)); 2509 spin_unlock_irq(&np->lock); 2510} 2511 2512static int nv_nway_reset(struct net_device *dev) 2513{ 2514 struct fe_priv *np = netdev_priv(dev); 2515 int ret; 2516 2517 spin_lock_irq(&np->lock); 2518 if (np->autoneg) { 2519 int bmcr; 2520 2521 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 2522 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 2523 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 2524 2525 ret = 0; 2526 } else { 2527 ret = -EINVAL; 2528 } 2529 spin_unlock_irq(&np->lock); 2530 2531 return ret; 2532} 2533 2534static struct ethtool_ops ops = { 2535 .get_drvinfo = nv_get_drvinfo, 2536 .get_link = ethtool_op_get_link, 2537 .get_wol = nv_get_wol, 2538 .set_wol = nv_set_wol, 2539 .get_settings = nv_get_settings, 2540 .set_settings = nv_set_settings, 2541 .get_regs_len = nv_get_regs_len, 2542 .get_regs = nv_get_regs, 2543 .nway_reset = nv_nway_reset, 2544 .get_perm_addr = ethtool_op_get_perm_addr, 2545}; 2546 2547static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 2548{ 2549 struct fe_priv *np = get_nvpriv(dev); 2550 2551 spin_lock_irq(&np->lock); 2552 2553 /* save vlan group */ 2554 np->vlangrp = grp; 2555 2556 if (grp) { 2557 /* enable vlan on MAC */ 2558 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; 2559 } else { 2560 /* disable vlan on MAC */ 2561 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; 2562 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; 2563 } 2564 2565 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2566 2567 spin_unlock_irq(&np->lock); 2568}; 2569 2570static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 2571{ 2572 /* nothing to do */ 2573}; 2574 2575static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) 2576{ 2577 u8 __iomem *base = get_hwbase(dev); 2578 int i; 2579 u32 msixmap = 0; 2580 2581 /* Each interrupt bit can be mapped to a MSIX vector (4 bits). 2582 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents 2583 * the remaining 8 interrupts. 2584 */ 2585 for (i = 0; i < 8; i++) { 2586 if ((irqmask >> i) & 0x1) { 2587 msixmap |= vector << (i << 2); 2588 } 2589 } 2590 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 2591 2592 msixmap = 0; 2593 for (i = 0; i < 8; i++) { 2594 if ((irqmask >> (i + 8)) & 0x1) { 2595 msixmap |= vector << (i << 2); 2596 } 2597 } 2598 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 2599} 2600 2601static int nv_open(struct net_device *dev) 2602{ 2603 struct fe_priv *np = netdev_priv(dev); 2604 u8 __iomem *base = get_hwbase(dev); 2605 int ret = 1; 2606 int oom, i; 2607 2608 dprintk(KERN_DEBUG "nv_open: begin\n"); 2609 2610 /* 1) erase previous misconfiguration */ 2611 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */ 2612 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 2613 writel(0, base + NvRegMulticastAddrB); 2614 writel(0, base + NvRegMulticastMaskA); 2615 writel(0, base + NvRegMulticastMaskB); 2616 writel(0, base + NvRegPacketFilterFlags); 2617 2618 writel(0, base + NvRegTransmitterControl); 2619 writel(0, base + NvRegReceiverControl); 2620 2621 writel(0, base + NvRegAdapterControl); 2622 2623 /* 2) initialize descriptor rings */ 2624 set_bufsize(dev); 2625 oom = nv_init_ring(dev); 2626 2627 writel(0, base + NvRegLinkSpeed); 2628 writel(0, base + NvRegUnknownTransmitterReg); 2629 nv_txrx_reset(dev); 2630 writel(0, base + NvRegUnknownSetupReg6); 2631 2632 np->in_shutdown = 0; 2633 2634 /* 3) set mac address */ 2635 nv_copy_mac_to_hw(dev); 2636 2637 /* 4) give hw rings */ 2638 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 2639 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), 2640 base + NvRegRingSizes); 2641 2642 /* 5) continue setup */ 2643 writel(np->linkspeed, base + NvRegLinkSpeed); 2644 writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); 2645 writel(np->txrxctl_bits, base + NvRegTxRxControl); 2646 writel(np->vlanctl_bits, base + NvRegVlanControl); 2647 pci_push(base); 2648 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 2649 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 2650 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 2651 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 2652 2653 writel(0, base + NvRegUnknownSetupReg4); 2654 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 2655 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 2656 2657 /* 6) continue setup */ 2658 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 2659 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 2660 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 2661 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 2662 2663 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 2664 get_random_bytes(&i, sizeof(i)); 2665 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); 2666 writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1); 2667 writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2); 2668 if (poll_interval == -1) { 2669 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 2670 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 2671 else 2672 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 2673 } 2674 else 2675 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 2676 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 2677 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 2678 base + NvRegAdapterControl); 2679 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); 2680 writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4); 2681 writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags); 2682 2683 i = readl(base + NvRegPowerState); 2684 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 2685 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 2686 2687 pci_push(base); 2688 udelay(10); 2689 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 2690 2691 writel(0, base + NvRegIrqMask); 2692 pci_push(base); 2693 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 2694 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 2695 pci_push(base); 2696 2697 if (np->msi_flags & NV_MSI_X_CAPABLE) { 2698 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 2699 np->msi_x_entry[i].entry = i; 2700 } 2701 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 2702 np->msi_flags |= NV_MSI_X_ENABLED; 2703 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { 2704 /* Request irq for rx handling */ 2705 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { 2706 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 2707 pci_disable_msix(np->pci_dev); 2708 np->msi_flags &= ~NV_MSI_X_ENABLED; 2709 goto out_drain; 2710 } 2711 /* Request irq for tx handling */ 2712 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { 2713 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 2714 pci_disable_msix(np->pci_dev); 2715 np->msi_flags &= ~NV_MSI_X_ENABLED; 2716 goto out_drain; 2717 } 2718 /* Request irq for link and timer handling */ 2719 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { 2720 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 2721 pci_disable_msix(np->pci_dev); 2722 np->msi_flags &= ~NV_MSI_X_ENABLED; 2723 goto out_drain; 2724 } 2725 2726 /* map interrupts to their respective vector */ 2727 writel(0, base + NvRegMSIXMap0); 2728 writel(0, base + NvRegMSIXMap1); 2729 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); 2730 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); 2731 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 2732 } else { 2733 /* Request irq for all interrupts */ 2734 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { 2735 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 2736 pci_disable_msix(np->pci_dev); 2737 np->msi_flags &= ~NV_MSI_X_ENABLED; 2738 goto out_drain; 2739 } 2740 2741 /* map interrupts to vector 0 */ 2742 writel(0, base + NvRegMSIXMap0); 2743 writel(0, base + NvRegMSIXMap1); 2744 } 2745 } 2746 } 2747 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 2748 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 2749 np->msi_flags |= NV_MSI_ENABLED; 2750 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { 2751 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 2752 pci_disable_msi(np->pci_dev); 2753 np->msi_flags &= ~NV_MSI_ENABLED; 2754 goto out_drain; 2755 } 2756 2757 /* map interrupts to vector 0 */ 2758 writel(0, base + NvRegMSIMap0); 2759 writel(0, base + NvRegMSIMap1); 2760 /* enable msi vector 0 */ 2761 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 2762 } 2763 } 2764 if (ret != 0) { 2765 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) 2766 goto out_drain; 2767 } 2768 2769 /* ask for interrupts */ 2770 writel(np->irqmask, base + NvRegIrqMask); 2771 2772 spin_lock_irq(&np->lock); 2773 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 2774 writel(0, base + NvRegMulticastAddrB); 2775 writel(0, base + NvRegMulticastMaskA); 2776 writel(0, base + NvRegMulticastMaskB); 2777 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 2778 /* One manual link speed update: Interrupts are enabled, future link 2779 * speed changes cause interrupts and are handled by nv_link_irq(). 2780 */ 2781 { 2782 u32 miistat; 2783 miistat = readl(base + NvRegMIIStatus); 2784 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 2785 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); 2786 } 2787 /* set linkspeed to invalid value, thus force nv_update_linkspeed 2788 * to init hw */ 2789 np->linkspeed = 0; 2790 ret = nv_update_linkspeed(dev); 2791 nv_start_rx(dev); 2792 nv_start_tx(dev); 2793 netif_start_queue(dev); 2794 if (ret) { 2795 netif_carrier_on(dev); 2796 } else { 2797 printk("%s: no link during initialization.\n", dev->name); 2798 netif_carrier_off(dev); 2799 } 2800 if (oom) 2801 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2802 spin_unlock_irq(&np->lock); 2803 2804 return 0; 2805out_drain: 2806 drain_ring(dev); 2807 return ret; 2808} 2809 2810static int nv_close(struct net_device *dev) 2811{ 2812 struct fe_priv *np = netdev_priv(dev); 2813 u8 __iomem *base; 2814 int i; 2815 2816 spin_lock_irq(&np->lock); 2817 np->in_shutdown = 1; 2818 spin_unlock_irq(&np->lock); 2819 synchronize_irq(dev->irq); 2820 2821 del_timer_sync(&np->oom_kick); 2822 del_timer_sync(&np->nic_poll); 2823 2824 netif_stop_queue(dev); 2825 spin_lock_irq(&np->lock); 2826 nv_stop_tx(dev); 2827 nv_stop_rx(dev); 2828 nv_txrx_reset(dev); 2829 2830 /* disable interrupts on the nic or we will lock up */ 2831 base = get_hwbase(dev); 2832 if (np->msi_flags & NV_MSI_X_ENABLED) { 2833 writel(np->irqmask, base + NvRegIrqMask); 2834 } else { 2835 if (np->msi_flags & NV_MSI_ENABLED) 2836 writel(0, base + NvRegMSIIrqMask); 2837 writel(0, base + NvRegIrqMask); 2838 } 2839 pci_push(base); 2840 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 2841 2842 spin_unlock_irq(&np->lock); 2843 2844 if (np->msi_flags & NV_MSI_X_ENABLED) { 2845 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 2846 free_irq(np->msi_x_entry[i].vector, dev); 2847 } 2848 pci_disable_msix(np->pci_dev); 2849 np->msi_flags &= ~NV_MSI_X_ENABLED; 2850 } else { 2851 free_irq(np->pci_dev->irq, dev); 2852 if (np->msi_flags & NV_MSI_ENABLED) { 2853 pci_disable_msi(np->pci_dev); 2854 np->msi_flags &= ~NV_MSI_ENABLED; 2855 } 2856 } 2857 2858 drain_ring(dev); 2859 2860 if (np->wolenabled) 2861 nv_start_rx(dev); 2862 2863 /* special op: write back the misordered MAC address - otherwise 2864 * the next nv_probe would see a wrong address. 2865 */ 2866 writel(np->orig_mac[0], base + NvRegMacAddrA); 2867 writel(np->orig_mac[1], base + NvRegMacAddrB); 2868 2869 /* FIXME: power down nic */ 2870 2871 return 0; 2872} 2873 2874static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 2875{ 2876 struct net_device *dev; 2877 struct fe_priv *np; 2878 unsigned long addr; 2879 u8 __iomem *base; 2880 int err, i; 2881 2882 dev = alloc_etherdev(sizeof(struct fe_priv)); 2883 err = -ENOMEM; 2884 if (!dev) 2885 goto out; 2886 2887 np = netdev_priv(dev); 2888 np->pci_dev = pci_dev; 2889 spin_lock_init(&np->lock); 2890 SET_MODULE_OWNER(dev); 2891 SET_NETDEV_DEV(dev, &pci_dev->dev); 2892 2893 init_timer(&np->oom_kick); 2894 np->oom_kick.data = (unsigned long) dev; 2895 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ 2896 init_timer(&np->nic_poll); 2897 np->nic_poll.data = (unsigned long) dev; 2898 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ 2899 2900 err = pci_enable_device(pci_dev); 2901 if (err) { 2902 printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n", 2903 err, pci_name(pci_dev)); 2904 goto out_free; 2905 } 2906 2907 pci_set_master(pci_dev); 2908 2909 err = pci_request_regions(pci_dev, DRV_NAME); 2910 if (err < 0) 2911 goto out_disable; 2912 2913 err = -EINVAL; 2914 addr = 0; 2915 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 2916 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", 2917 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), 2918 pci_resource_len(pci_dev, i), 2919 pci_resource_flags(pci_dev, i)); 2920 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 2921 pci_resource_len(pci_dev, i) >= NV_PCI_REGSZ) { 2922 addr = pci_resource_start(pci_dev, i); 2923 break; 2924 } 2925 } 2926 if (i == DEVICE_COUNT_RESOURCE) { 2927 printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n", 2928 pci_name(pci_dev)); 2929 goto out_relreg; 2930 } 2931 2932 /* handle different descriptor versions */ 2933 if (id->driver_data & DEV_HAS_HIGH_DMA) { 2934 /* packet format 3: supports 40-bit addressing */ 2935 np->desc_ver = DESC_VER_3; 2936 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { 2937 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", 2938 pci_name(pci_dev)); 2939 } else { 2940 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { 2941 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", 2942 pci_name(pci_dev)); 2943 goto out_relreg; 2944 } else { 2945 dev->features |= NETIF_F_HIGHDMA; 2946 printk(KERN_INFO "forcedeth: using HIGHDMA\n"); 2947 } 2948 } 2949 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 2950 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 2951 /* packet format 2: supports jumbo frames */ 2952 np->desc_ver = DESC_VER_2; 2953 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; 2954 } else { 2955 /* original packet format */ 2956 np->desc_ver = DESC_VER_1; 2957 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; 2958 } 2959 2960 np->pkt_limit = NV_PKTLIMIT_1; 2961 if (id->driver_data & DEV_HAS_LARGEDESC) 2962 np->pkt_limit = NV_PKTLIMIT_2; 2963 2964 if (id->driver_data & DEV_HAS_CHECKSUM) { 2965 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 2966 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 2967#ifdef NETIF_F_TSO 2968 dev->features |= NETIF_F_TSO; 2969#endif 2970 } 2971 2972 np->vlanctl_bits = 0; 2973 if (id->driver_data & DEV_HAS_VLAN) { 2974 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 2975 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; 2976 dev->vlan_rx_register = nv_vlan_rx_register; 2977 dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid; 2978 } 2979 2980 np->msi_flags = 0; 2981 if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) { 2982 np->msi_flags |= NV_MSI_CAPABLE; 2983 } 2984 if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) { 2985 np->msi_flags |= NV_MSI_X_CAPABLE; 2986 } 2987 2988 err = -ENOMEM; 2989 np->base = ioremap(addr, NV_PCI_REGSZ); 2990 if (!np->base) 2991 goto out_relreg; 2992 dev->base_addr = (unsigned long)np->base; 2993 2994 dev->irq = pci_dev->irq; 2995 2996 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 2997 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 2998 sizeof(struct ring_desc) * (RX_RING + TX_RING), 2999 &np->ring_addr); 3000 if (!np->rx_ring.orig) 3001 goto out_unmap; 3002 np->tx_ring.orig = &np->rx_ring.orig[RX_RING]; 3003 } else { 3004 np->rx_ring.ex = pci_alloc_consistent(pci_dev, 3005 sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), 3006 &np->ring_addr); 3007 if (!np->rx_ring.ex) 3008 goto out_unmap; 3009 np->tx_ring.ex = &np->rx_ring.ex[RX_RING]; 3010 } 3011 3012 dev->open = nv_open; 3013 dev->stop = nv_close; 3014 dev->hard_start_xmit = nv_start_xmit; 3015 dev->get_stats = nv_get_stats; 3016 dev->change_mtu = nv_change_mtu; 3017 dev->set_mac_address = nv_set_mac_address; 3018 dev->set_multicast_list = nv_set_multicast; 3019#ifdef CONFIG_NET_POLL_CONTROLLER 3020 dev->poll_controller = nv_poll_controller; 3021#endif 3022 SET_ETHTOOL_OPS(dev, &ops); 3023 dev->tx_timeout = nv_tx_timeout; 3024 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 3025 3026 pci_set_drvdata(pci_dev, dev); 3027 3028 /* read the mac address */ 3029 base = get_hwbase(dev); 3030 np->orig_mac[0] = readl(base + NvRegMacAddrA); 3031 np->orig_mac[1] = readl(base + NvRegMacAddrB); 3032 3033 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 3034 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 3035 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 3036 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 3037 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 3038 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 3039 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 3040 3041 if (!is_valid_ether_addr(dev->perm_addr)) { 3042 /* 3043 * Bad mac address. At least one bios sets the mac address 3044 * to 01:23:45:67:89:ab 3045 */ 3046 printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n", 3047 pci_name(pci_dev), 3048 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 3049 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 3050 printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n"); 3051 dev->dev_addr[0] = 0x00; 3052 dev->dev_addr[1] = 0x00; 3053 dev->dev_addr[2] = 0x6c; 3054 get_random_bytes(&dev->dev_addr[3], 3); 3055 } 3056 3057 dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev), 3058 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 3059 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 3060 3061 /* disable WOL */ 3062 writel(0, base + NvRegWakeUpFlags); 3063 np->wolenabled = 0; 3064 3065 if (np->desc_ver == DESC_VER_1) { 3066 np->tx_flags = NV_TX_VALID; 3067 } else { 3068 np->tx_flags = NV_TX2_VALID; 3069 } 3070 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { 3071 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 3072 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 3073 np->msi_flags |= 0x0003; 3074 } else { 3075 np->irqmask = NVREG_IRQMASK_CPU; 3076 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 3077 np->msi_flags |= 0x0001; 3078 } 3079 3080 if (id->driver_data & DEV_NEED_TIMERIRQ) 3081 np->irqmask |= NVREG_IRQ_TIMER; 3082 if (id->driver_data & DEV_NEED_LINKTIMER) { 3083 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); 3084 np->need_linktimer = 1; 3085 np->link_timeout = jiffies + LINK_TIMEOUT; 3086 } else { 3087 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); 3088 np->need_linktimer = 0; 3089 } 3090 3091 /* find a suitable phy */ 3092 for (i = 1; i <= 32; i++) { 3093 int id1, id2; 3094 int phyaddr = i & 0x1F; 3095 3096 spin_lock_irq(&np->lock); 3097 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); 3098 spin_unlock_irq(&np->lock); 3099 if (id1 < 0 || id1 == 0xffff) 3100 continue; 3101 spin_lock_irq(&np->lock); 3102 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); 3103 spin_unlock_irq(&np->lock); 3104 if (id2 < 0 || id2 == 0xffff) 3105 continue; 3106 3107 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 3108 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 3109 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 3110 pci_name(pci_dev), id1, id2, phyaddr); 3111 np->phyaddr = phyaddr; 3112 np->phy_oui = id1 | id2; 3113 break; 3114 } 3115 if (i == 33) { 3116 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", 3117 pci_name(pci_dev)); 3118 goto out_freering; 3119 } 3120 3121 /* reset it */ 3122 phy_init(dev); 3123 3124 /* set default link speed settings */ 3125 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3126 np->duplex = 0; 3127 np->autoneg = 1; 3128 3129 err = register_netdev(dev); 3130 if (err) { 3131 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); 3132 goto out_freering; 3133 } 3134 printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n", 3135 dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device, 3136 pci_name(pci_dev)); 3137 3138 return 0; 3139 3140out_freering: 3141 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 3142 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), 3143 np->rx_ring.orig, np->ring_addr); 3144 else 3145 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), 3146 np->rx_ring.ex, np->ring_addr); 3147 pci_set_drvdata(pci_dev, NULL); 3148out_unmap: 3149 iounmap(get_hwbase(dev)); 3150out_relreg: 3151 pci_release_regions(pci_dev); 3152out_disable: 3153 pci_disable_device(pci_dev); 3154out_free: 3155 free_netdev(dev); 3156out: 3157 return err; 3158} 3159 3160static void __devexit nv_remove(struct pci_dev *pci_dev) 3161{ 3162 struct net_device *dev = pci_get_drvdata(pci_dev); 3163 struct fe_priv *np = netdev_priv(dev); 3164 3165 unregister_netdev(dev); 3166 3167 /* free all structures */ 3168 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 3169 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr); 3170 else 3171 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr); 3172 iounmap(get_hwbase(dev)); 3173 pci_release_regions(pci_dev); 3174 pci_disable_device(pci_dev); 3175 free_netdev(dev); 3176 pci_set_drvdata(pci_dev, NULL); 3177} 3178 3179static struct pci_device_id pci_tbl[] = { 3180 { /* nForce Ethernet Controller */ 3181 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), 3182 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 3183 }, 3184 { /* nForce2 Ethernet Controller */ 3185 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), 3186 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 3187 }, 3188 { /* nForce3 Ethernet Controller */ 3189 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), 3190 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 3191 }, 3192 { /* nForce3 Ethernet Controller */ 3193 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), 3194 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 3195 }, 3196 { /* nForce3 Ethernet Controller */ 3197 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), 3198 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 3199 }, 3200 { /* nForce3 Ethernet Controller */ 3201 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), 3202 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 3203 }, 3204 { /* nForce3 Ethernet Controller */ 3205 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), 3206 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 3207 }, 3208 { /* CK804 Ethernet Controller */ 3209 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), 3210 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 3211 }, 3212 { /* CK804 Ethernet Controller */ 3213 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), 3214 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 3215 }, 3216 { /* MCP04 Ethernet Controller */ 3217 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), 3218 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 3219 }, 3220 { /* MCP04 Ethernet Controller */ 3221 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), 3222 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 3223 }, 3224 { /* MCP51 Ethernet Controller */ 3225 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 3226 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA, 3227 }, 3228 { /* MCP51 Ethernet Controller */ 3229 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 3230 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA, 3231 }, 3232 { /* MCP55 Ethernet Controller */ 3233 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 3234 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, 3235 }, 3236 { /* MCP55 Ethernet Controller */ 3237 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 3238 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, 3239 }, 3240 {0,}, 3241}; 3242 3243static struct pci_driver driver = { 3244 .name = "forcedeth", 3245 .id_table = pci_tbl, 3246 .probe = nv_probe, 3247 .remove = __devexit_p(nv_remove), 3248}; 3249 3250 3251static int __init init_nic(void) 3252{ 3253 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); 3254 return pci_module_init(&driver); 3255} 3256 3257static void __exit exit_nic(void) 3258{ 3259 pci_unregister_driver(&driver); 3260} 3261 3262module_param(max_interrupt_work, int, 0); 3263MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 3264module_param(optimization_mode, int, 0); 3265MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); 3266module_param(poll_interval, int, 0); 3267MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 3268module_param(disable_msi, int, 0); 3269MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1."); 3270module_param(disable_msix, int, 0); 3271MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1."); 3272 3273MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 3274MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 3275MODULE_LICENSE("GPL"); 3276 3277MODULE_DEVICE_TABLE(pci, pci_tbl); 3278 3279module_init(init_nic); 3280module_exit(exit_nic);