at v2.6.21 5454 lines 164 kB view raw
1/* 2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. 3 * 4 * Note: This driver is a cleanroom reimplementation based on reverse 5 * engineered documentation written by Carl-Daniel Hailfinger 6 * and Andrew de Quincey. 7 * 8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered 9 * trademarks of NVIDIA Corporation in the United States and other 10 * countries. 11 * 12 * Copyright (C) 2003,4,5 Manfred Spraul 13 * Copyright (C) 2004 Andrew de Quincey (wol support) 14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 15 * IRQ rate fixes, bigendian fixes, cleanups, verification) 16 * Copyright (c) 2004,5,6 NVIDIA Corporation 17 * 18 * This program is free software; you can redistribute it and/or modify 19 * it under the terms of the GNU General Public License as published by 20 * the Free Software Foundation; either version 2 of the License, or 21 * (at your option) any later version. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, write to the Free Software 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 31 * 32 * Changelog: 33 * 0.01: 05 Oct 2003: First release that compiles without warnings. 34 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs. 35 * Check all PCI BARs for the register window. 36 * udelay added to mii_rw. 37 * 0.03: 06 Oct 2003: Initialize dev->irq. 38 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks. 39 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout. 40 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated, 41 * irq mask updated 42 * 0.07: 14 Oct 2003: Further irq mask updates. 43 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill 44 * added into irq handler, NULL check for drain_ring. 45 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the 46 * requested interrupt sources. 47 * 0.10: 20 Oct 2003: First cleanup for release. 48 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased. 49 * MAC Address init fix, set_multicast cleanup. 50 * 0.12: 23 Oct 2003: Cleanups for release. 51 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10. 52 * Set link speed correctly. start rx before starting 53 * tx (nv_start_rx sets the link speed). 54 * 0.14: 25 Oct 2003: Nic dependant irq mask. 55 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during 56 * open. 57 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size 58 * increased to 1628 bytes. 59 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from 60 * the tx length. 61 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats 62 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac 63 * addresses, really stop rx if already running 64 * in nv_start_rx, clean up a bit. 65 * 0.20: 07 Dec 2003: alloc fixes 66 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix. 67 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup 68 * on close. 69 * 0.23: 26 Jan 2004: various small cleanups 70 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces 71 * 0.25: 09 Mar 2004: wol support 72 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes 73 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings, 74 * added CK804/MCP04 device IDs, code fixes 75 * for registers, link status and other minor fixes. 76 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe 77 * 0.29: 31 Aug 2004: Add backup timer for link change notification. 78 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset 79 * into nv_close, otherwise reenabling for wol can 80 * cause DMA to kfree'd memory. 81 * 0.31: 14 Nov 2004: ethtool support for getting/setting link 82 * capabilities. 83 * 0.32: 16 Apr 2005: RX_ERROR4 handling added. 84 * 0.33: 16 May 2005: Support for MCP51 added. 85 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. 86 * 0.35: 26 Jun 2005: Support for MCP55 added. 87 * 0.36: 28 Jun 2005: Add jumbo frame support. 88 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list 89 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of 90 * per-packet flags. 91 * 0.39: 18 Jul 2005: Add 64bit descriptor support. 92 * 0.40: 19 Jul 2005: Add support for mac address change. 93 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead 94 * of nv_remove 95 * 0.42: 06 Aug 2005: Fix lack of link speed initialization 96 * in the second (and later) nv_open call 97 * 0.43: 10 Aug 2005: Add support for tx checksum. 98 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation. 99 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check 100 * 0.46: 20 Oct 2005: Add irq optimization modes. 101 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. 102 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single 103 * 0.49: 10 Dec 2005: Fix tso for large buffers. 104 * 0.50: 20 Jan 2006: Add 8021pq tagging support. 105 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. 106 * 0.52: 20 Jan 2006: Add MSI/MSIX support. 107 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. 108 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. 109 * 0.55: 22 Mar 2006: Add flow control (pause frame). 110 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. 111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections. 112 * 0.58: 30 Oct 2006: Added support for sideband management unit. 113 * 0.59: 30 Oct 2006: Added support for recoverable error. 114 * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats. 115 * 116 * Known bugs: 117 * We suspect that on some hardware no TX done interrupts are generated. 118 * This means recovery from netif_stop_queue only happens if the hw timer 119 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) 120 * and the timer is active in the IRQMask, or if a rx packet arrives by chance. 121 * If your hardware reliably generates tx done interrupts, then you can remove 122 * DEV_NEED_TIMERIRQ from the driver_data flags. 123 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 124 * superfluous timer interrupts from the nic. 125 */ 126#ifdef CONFIG_FORCEDETH_NAPI 127#define DRIVERNAPI "-NAPI" 128#else 129#define DRIVERNAPI 130#endif 131#define FORCEDETH_VERSION "0.60" 132#define DRV_NAME "forcedeth" 133 134#include <linux/module.h> 135#include <linux/types.h> 136#include <linux/pci.h> 137#include <linux/interrupt.h> 138#include <linux/netdevice.h> 139#include <linux/etherdevice.h> 140#include <linux/delay.h> 141#include <linux/spinlock.h> 142#include <linux/ethtool.h> 143#include <linux/timer.h> 144#include <linux/skbuff.h> 145#include <linux/mii.h> 146#include <linux/random.h> 147#include <linux/init.h> 148#include <linux/if_vlan.h> 149#include <linux/dma-mapping.h> 150 151#include <asm/irq.h> 152#include <asm/io.h> 153#include <asm/uaccess.h> 154#include <asm/system.h> 155 156#if 0 157#define dprintk printk 158#else 159#define dprintk(x...) do { } while (0) 160#endif 161 162 163/* 164 * Hardware access: 165 */ 166 167#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ 168#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ 169#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ 170#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ 171#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ 172#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ 173#define DEV_HAS_MSI 0x0040 /* device supports MSI */ 174#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ 175#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ 176#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ 177#define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */ 178#define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */ 179#define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */ 180#define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */ 181 182enum { 183 NvRegIrqStatus = 0x000, 184#define NVREG_IRQSTAT_MIIEVENT 0x040 185#define NVREG_IRQSTAT_MASK 0x81ff 186 NvRegIrqMask = 0x004, 187#define NVREG_IRQ_RX_ERROR 0x0001 188#define NVREG_IRQ_RX 0x0002 189#define NVREG_IRQ_RX_NOBUF 0x0004 190#define NVREG_IRQ_TX_ERR 0x0008 191#define NVREG_IRQ_TX_OK 0x0010 192#define NVREG_IRQ_TIMER 0x0020 193#define NVREG_IRQ_LINK 0x0040 194#define NVREG_IRQ_RX_FORCED 0x0080 195#define NVREG_IRQ_TX_FORCED 0x0100 196#define NVREG_IRQ_RECOVER_ERROR 0x8000 197#define NVREG_IRQMASK_THROUGHPUT 0x00df 198#define NVREG_IRQMASK_CPU 0x0040 199#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) 200#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) 201#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) 202 203#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ 204 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ 205 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR)) 206 207 NvRegUnknownSetupReg6 = 0x008, 208#define NVREG_UNKSETUP6_VAL 3 209 210/* 211 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic 212 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 213 */ 214 NvRegPollingInterval = 0x00c, 215#define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */ 216#define NVREG_POLL_DEFAULT_CPU 13 217 NvRegMSIMap0 = 0x020, 218 NvRegMSIMap1 = 0x024, 219 NvRegMSIIrqMask = 0x030, 220#define NVREG_MSI_VECTOR_0_ENABLED 0x01 221 NvRegMisc1 = 0x080, 222#define NVREG_MISC1_PAUSE_TX 0x01 223#define NVREG_MISC1_HD 0x02 224#define NVREG_MISC1_FORCE 0x3b0f3c 225 226 NvRegMacReset = 0x3c, 227#define NVREG_MAC_RESET_ASSERT 0x0F3 228 NvRegTransmitterControl = 0x084, 229#define NVREG_XMITCTL_START 0x01 230#define NVREG_XMITCTL_MGMT_ST 0x40000000 231#define NVREG_XMITCTL_SYNC_MASK 0x000f0000 232#define NVREG_XMITCTL_SYNC_NOT_READY 0x0 233#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 234#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 235#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 236#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 237#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 238#define NVREG_XMITCTL_HOST_LOADED 0x00004000 239#define NVREG_XMITCTL_TX_PATH_EN 0x01000000 240 NvRegTransmitterStatus = 0x088, 241#define NVREG_XMITSTAT_BUSY 0x01 242 243 NvRegPacketFilterFlags = 0x8c, 244#define NVREG_PFF_PAUSE_RX 0x08 245#define NVREG_PFF_ALWAYS 0x7F0000 246#define NVREG_PFF_PROMISC 0x80 247#define NVREG_PFF_MYADDR 0x20 248#define NVREG_PFF_LOOPBACK 0x10 249 250 NvRegOffloadConfig = 0x90, 251#define NVREG_OFFLOAD_HOMEPHY 0x601 252#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 253 NvRegReceiverControl = 0x094, 254#define NVREG_RCVCTL_START 0x01 255#define NVREG_RCVCTL_RX_PATH_EN 0x01000000 256 NvRegReceiverStatus = 0x98, 257#define NVREG_RCVSTAT_BUSY 0x01 258 259 NvRegRandomSeed = 0x9c, 260#define NVREG_RNDSEED_MASK 0x00ff 261#define NVREG_RNDSEED_FORCE 0x7f00 262#define NVREG_RNDSEED_FORCE2 0x2d00 263#define NVREG_RNDSEED_FORCE3 0x7400 264 265 NvRegTxDeferral = 0xA0, 266#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 267#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 268#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 269 NvRegRxDeferral = 0xA4, 270#define NVREG_RX_DEFERRAL_DEFAULT 0x16 271 NvRegMacAddrA = 0xA8, 272 NvRegMacAddrB = 0xAC, 273 NvRegMulticastAddrA = 0xB0, 274#define NVREG_MCASTADDRA_FORCE 0x01 275 NvRegMulticastAddrB = 0xB4, 276 NvRegMulticastMaskA = 0xB8, 277 NvRegMulticastMaskB = 0xBC, 278 279 NvRegPhyInterface = 0xC0, 280#define PHY_RGMII 0x10000000 281 282 NvRegTxRingPhysAddr = 0x100, 283 NvRegRxRingPhysAddr = 0x104, 284 NvRegRingSizes = 0x108, 285#define NVREG_RINGSZ_TXSHIFT 0 286#define NVREG_RINGSZ_RXSHIFT 16 287 NvRegTransmitPoll = 0x10c, 288#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 289 NvRegLinkSpeed = 0x110, 290#define NVREG_LINKSPEED_FORCE 0x10000 291#define NVREG_LINKSPEED_10 1000 292#define NVREG_LINKSPEED_100 100 293#define NVREG_LINKSPEED_1000 50 294#define NVREG_LINKSPEED_MASK (0xFFF) 295 NvRegUnknownSetupReg5 = 0x130, 296#define NVREG_UNKSETUP5_BIT31 (1<<31) 297 NvRegTxWatermark = 0x13c, 298#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 299#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 300#define NVREG_TX_WM_DESC2_3_1000 0xfe08000 301 NvRegTxRxControl = 0x144, 302#define NVREG_TXRXCTL_KICK 0x0001 303#define NVREG_TXRXCTL_BIT1 0x0002 304#define NVREG_TXRXCTL_BIT2 0x0004 305#define NVREG_TXRXCTL_IDLE 0x0008 306#define NVREG_TXRXCTL_RESET 0x0010 307#define NVREG_TXRXCTL_RXCHECK 0x0400 308#define NVREG_TXRXCTL_DESC_1 0 309#define NVREG_TXRXCTL_DESC_2 0x002100 310#define NVREG_TXRXCTL_DESC_3 0xc02200 311#define NVREG_TXRXCTL_VLANSTRIP 0x00040 312#define NVREG_TXRXCTL_VLANINS 0x00080 313 NvRegTxRingPhysAddrHigh = 0x148, 314 NvRegRxRingPhysAddrHigh = 0x14C, 315 NvRegTxPauseFrame = 0x170, 316#define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080 317#define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030 318 NvRegMIIStatus = 0x180, 319#define NVREG_MIISTAT_ERROR 0x0001 320#define NVREG_MIISTAT_LINKCHANGE 0x0008 321#define NVREG_MIISTAT_MASK 0x000f 322#define NVREG_MIISTAT_MASK2 0x000f 323 NvRegMIIMask = 0x184, 324#define NVREG_MII_LINKCHANGE 0x0008 325 326 NvRegAdapterControl = 0x188, 327#define NVREG_ADAPTCTL_START 0x02 328#define NVREG_ADAPTCTL_LINKUP 0x04 329#define NVREG_ADAPTCTL_PHYVALID 0x40000 330#define NVREG_ADAPTCTL_RUNNING 0x100000 331#define NVREG_ADAPTCTL_PHYSHIFT 24 332 NvRegMIISpeed = 0x18c, 333#define NVREG_MIISPEED_BIT8 (1<<8) 334#define NVREG_MIIDELAY 5 335 NvRegMIIControl = 0x190, 336#define NVREG_MIICTL_INUSE 0x08000 337#define NVREG_MIICTL_WRITE 0x00400 338#define NVREG_MIICTL_ADDRSHIFT 5 339 NvRegMIIData = 0x194, 340 NvRegWakeUpFlags = 0x200, 341#define NVREG_WAKEUPFLAGS_VAL 0x7770 342#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 343#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 344#define NVREG_WAKEUPFLAGS_D3SHIFT 12 345#define NVREG_WAKEUPFLAGS_D2SHIFT 8 346#define NVREG_WAKEUPFLAGS_D1SHIFT 4 347#define NVREG_WAKEUPFLAGS_D0SHIFT 0 348#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 349#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 350#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 351#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 352 353 NvRegPatternCRC = 0x204, 354 NvRegPatternMask = 0x208, 355 NvRegPowerCap = 0x268, 356#define NVREG_POWERCAP_D3SUPP (1<<30) 357#define NVREG_POWERCAP_D2SUPP (1<<26) 358#define NVREG_POWERCAP_D1SUPP (1<<25) 359 NvRegPowerState = 0x26c, 360#define NVREG_POWERSTATE_POWEREDUP 0x8000 361#define NVREG_POWERSTATE_VALID 0x0100 362#define NVREG_POWERSTATE_MASK 0x0003 363#define NVREG_POWERSTATE_D0 0x0000 364#define NVREG_POWERSTATE_D1 0x0001 365#define NVREG_POWERSTATE_D2 0x0002 366#define NVREG_POWERSTATE_D3 0x0003 367 NvRegTxCnt = 0x280, 368 NvRegTxZeroReXmt = 0x284, 369 NvRegTxOneReXmt = 0x288, 370 NvRegTxManyReXmt = 0x28c, 371 NvRegTxLateCol = 0x290, 372 NvRegTxUnderflow = 0x294, 373 NvRegTxLossCarrier = 0x298, 374 NvRegTxExcessDef = 0x29c, 375 NvRegTxRetryErr = 0x2a0, 376 NvRegRxFrameErr = 0x2a4, 377 NvRegRxExtraByte = 0x2a8, 378 NvRegRxLateCol = 0x2ac, 379 NvRegRxRunt = 0x2b0, 380 NvRegRxFrameTooLong = 0x2b4, 381 NvRegRxOverflow = 0x2b8, 382 NvRegRxFCSErr = 0x2bc, 383 NvRegRxFrameAlignErr = 0x2c0, 384 NvRegRxLenErr = 0x2c4, 385 NvRegRxUnicast = 0x2c8, 386 NvRegRxMulticast = 0x2cc, 387 NvRegRxBroadcast = 0x2d0, 388 NvRegTxDef = 0x2d4, 389 NvRegTxFrame = 0x2d8, 390 NvRegRxCnt = 0x2dc, 391 NvRegTxPause = 0x2e0, 392 NvRegRxPause = 0x2e4, 393 NvRegRxDropFrame = 0x2e8, 394 NvRegVlanControl = 0x300, 395#define NVREG_VLANCONTROL_ENABLE 0x2000 396 NvRegMSIXMap0 = 0x3e0, 397 NvRegMSIXMap1 = 0x3e4, 398 NvRegMSIXIrqStatus = 0x3f0, 399 400 NvRegPowerState2 = 0x600, 401#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11 402#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 403}; 404 405/* Big endian: should work, but is untested */ 406struct ring_desc { 407 __le32 buf; 408 __le32 flaglen; 409}; 410 411struct ring_desc_ex { 412 __le32 bufhigh; 413 __le32 buflow; 414 __le32 txvlan; 415 __le32 flaglen; 416}; 417 418union ring_type { 419 struct ring_desc* orig; 420 struct ring_desc_ex* ex; 421}; 422 423#define FLAG_MASK_V1 0xffff0000 424#define FLAG_MASK_V2 0xffffc000 425#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 426#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) 427 428#define NV_TX_LASTPACKET (1<<16) 429#define NV_TX_RETRYERROR (1<<19) 430#define NV_TX_FORCED_INTERRUPT (1<<24) 431#define NV_TX_DEFERRED (1<<26) 432#define NV_TX_CARRIERLOST (1<<27) 433#define NV_TX_LATECOLLISION (1<<28) 434#define NV_TX_UNDERFLOW (1<<29) 435#define NV_TX_ERROR (1<<30) 436#define NV_TX_VALID (1<<31) 437 438#define NV_TX2_LASTPACKET (1<<29) 439#define NV_TX2_RETRYERROR (1<<18) 440#define NV_TX2_FORCED_INTERRUPT (1<<30) 441#define NV_TX2_DEFERRED (1<<25) 442#define NV_TX2_CARRIERLOST (1<<26) 443#define NV_TX2_LATECOLLISION (1<<27) 444#define NV_TX2_UNDERFLOW (1<<28) 445/* error and valid are the same for both */ 446#define NV_TX2_ERROR (1<<30) 447#define NV_TX2_VALID (1<<31) 448#define NV_TX2_TSO (1<<28) 449#define NV_TX2_TSO_SHIFT 14 450#define NV_TX2_TSO_MAX_SHIFT 14 451#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) 452#define NV_TX2_CHECKSUM_L3 (1<<27) 453#define NV_TX2_CHECKSUM_L4 (1<<26) 454 455#define NV_TX3_VLAN_TAG_PRESENT (1<<18) 456 457#define NV_RX_DESCRIPTORVALID (1<<16) 458#define NV_RX_MISSEDFRAME (1<<17) 459#define NV_RX_SUBSTRACT1 (1<<18) 460#define NV_RX_ERROR1 (1<<23) 461#define NV_RX_ERROR2 (1<<24) 462#define NV_RX_ERROR3 (1<<25) 463#define NV_RX_ERROR4 (1<<26) 464#define NV_RX_CRCERR (1<<27) 465#define NV_RX_OVERFLOW (1<<28) 466#define NV_RX_FRAMINGERR (1<<29) 467#define NV_RX_ERROR (1<<30) 468#define NV_RX_AVAIL (1<<31) 469 470#define NV_RX2_CHECKSUMMASK (0x1C000000) 471#define NV_RX2_CHECKSUMOK1 (0x10000000) 472#define NV_RX2_CHECKSUMOK2 (0x14000000) 473#define NV_RX2_CHECKSUMOK3 (0x18000000) 474#define NV_RX2_DESCRIPTORVALID (1<<29) 475#define NV_RX2_SUBSTRACT1 (1<<25) 476#define NV_RX2_ERROR1 (1<<18) 477#define NV_RX2_ERROR2 (1<<19) 478#define NV_RX2_ERROR3 (1<<20) 479#define NV_RX2_ERROR4 (1<<21) 480#define NV_RX2_CRCERR (1<<22) 481#define NV_RX2_OVERFLOW (1<<23) 482#define NV_RX2_FRAMINGERR (1<<24) 483/* error and avail are the same for both */ 484#define NV_RX2_ERROR (1<<30) 485#define NV_RX2_AVAIL (1<<31) 486 487#define NV_RX3_VLAN_TAG_PRESENT (1<<16) 488#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 489 490/* Miscelaneous hardware related defines: */ 491#define NV_PCI_REGSZ_VER1 0x270 492#define NV_PCI_REGSZ_VER2 0x2d4 493#define NV_PCI_REGSZ_VER3 0x604 494 495/* various timeout delays: all in usec */ 496#define NV_TXRX_RESET_DELAY 4 497#define NV_TXSTOP_DELAY1 10 498#define NV_TXSTOP_DELAY1MAX 500000 499#define NV_TXSTOP_DELAY2 100 500#define NV_RXSTOP_DELAY1 10 501#define NV_RXSTOP_DELAY1MAX 500000 502#define NV_RXSTOP_DELAY2 100 503#define NV_SETUP5_DELAY 5 504#define NV_SETUP5_DELAYMAX 50000 505#define NV_POWERUP_DELAY 5 506#define NV_POWERUP_DELAYMAX 5000 507#define NV_MIIBUSY_DELAY 50 508#define NV_MIIPHY_DELAY 10 509#define NV_MIIPHY_DELAYMAX 10000 510#define NV_MAC_RESET_DELAY 64 511 512#define NV_WAKEUPPATTERNS 5 513#define NV_WAKEUPMASKENTRIES 4 514 515/* General driver defaults */ 516#define NV_WATCHDOG_TIMEO (5*HZ) 517 518#define RX_RING_DEFAULT 128 519#define TX_RING_DEFAULT 256 520#define RX_RING_MIN 128 521#define TX_RING_MIN 64 522#define RING_MAX_DESC_VER_1 1024 523#define RING_MAX_DESC_VER_2_3 16384 524 525/* rx/tx mac addr + type + vlan + align + slack*/ 526#define NV_RX_HEADERS (64) 527/* even more slack. */ 528#define NV_RX_ALLOC_PAD (64) 529 530/* maximum mtu size */ 531#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ 532#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ 533 534#define OOM_REFILL (1+HZ/20) 535#define POLL_WAIT (1+HZ/100) 536#define LINK_TIMEOUT (3*HZ) 537#define STATS_INTERVAL (10*HZ) 538 539/* 540 * desc_ver values: 541 * The nic supports three different descriptor types: 542 * - DESC_VER_1: Original 543 * - DESC_VER_2: support for jumbo frames. 544 * - DESC_VER_3: 64-bit format. 545 */ 546#define DESC_VER_1 1 547#define DESC_VER_2 2 548#define DESC_VER_3 3 549 550/* PHY defines */ 551#define PHY_OUI_MARVELL 0x5043 552#define PHY_OUI_CICADA 0x03f1 553#define PHYID1_OUI_MASK 0x03ff 554#define PHYID1_OUI_SHFT 6 555#define PHYID2_OUI_MASK 0xfc00 556#define PHYID2_OUI_SHFT 10 557#define PHYID2_MODEL_MASK 0x03f0 558#define PHY_MODEL_MARVELL_E3016 0x220 559#define PHY_MARVELL_E3016_INITMASK 0x0300 560#define PHY_INIT1 0x0f000 561#define PHY_INIT2 0x0e00 562#define PHY_INIT3 0x01000 563#define PHY_INIT4 0x0200 564#define PHY_INIT5 0x0004 565#define PHY_INIT6 0x02000 566#define PHY_GIGABIT 0x0100 567 568#define PHY_TIMEOUT 0x1 569#define PHY_ERROR 0x2 570 571#define PHY_100 0x1 572#define PHY_1000 0x2 573#define PHY_HALF 0x100 574 575#define NV_PAUSEFRAME_RX_CAPABLE 0x0001 576#define NV_PAUSEFRAME_TX_CAPABLE 0x0002 577#define NV_PAUSEFRAME_RX_ENABLE 0x0004 578#define NV_PAUSEFRAME_TX_ENABLE 0x0008 579#define NV_PAUSEFRAME_RX_REQ 0x0010 580#define NV_PAUSEFRAME_TX_REQ 0x0020 581#define NV_PAUSEFRAME_AUTONEG 0x0040 582 583/* MSI/MSI-X defines */ 584#define NV_MSI_X_MAX_VECTORS 8 585#define NV_MSI_X_VECTORS_MASK 0x000f 586#define NV_MSI_CAPABLE 0x0010 587#define NV_MSI_X_CAPABLE 0x0020 588#define NV_MSI_ENABLED 0x0040 589#define NV_MSI_X_ENABLED 0x0080 590 591#define NV_MSI_X_VECTOR_ALL 0x0 592#define NV_MSI_X_VECTOR_RX 0x0 593#define NV_MSI_X_VECTOR_TX 0x1 594#define NV_MSI_X_VECTOR_OTHER 0x2 595 596/* statistics */ 597struct nv_ethtool_str { 598 char name[ETH_GSTRING_LEN]; 599}; 600 601static const struct nv_ethtool_str nv_estats_str[] = { 602 { "tx_bytes" }, 603 { "tx_zero_rexmt" }, 604 { "tx_one_rexmt" }, 605 { "tx_many_rexmt" }, 606 { "tx_late_collision" }, 607 { "tx_fifo_errors" }, 608 { "tx_carrier_errors" }, 609 { "tx_excess_deferral" }, 610 { "tx_retry_error" }, 611 { "rx_frame_error" }, 612 { "rx_extra_byte" }, 613 { "rx_late_collision" }, 614 { "rx_runt" }, 615 { "rx_frame_too_long" }, 616 { "rx_over_errors" }, 617 { "rx_crc_errors" }, 618 { "rx_frame_align_error" }, 619 { "rx_length_error" }, 620 { "rx_unicast" }, 621 { "rx_multicast" }, 622 { "rx_broadcast" }, 623 { "rx_packets" }, 624 { "rx_errors_total" }, 625 { "tx_errors_total" }, 626 627 /* version 2 stats */ 628 { "tx_deferral" }, 629 { "tx_packets" }, 630 { "rx_bytes" }, 631 { "tx_pause" }, 632 { "rx_pause" }, 633 { "rx_drop_frame" } 634}; 635 636struct nv_ethtool_stats { 637 u64 tx_bytes; 638 u64 tx_zero_rexmt; 639 u64 tx_one_rexmt; 640 u64 tx_many_rexmt; 641 u64 tx_late_collision; 642 u64 tx_fifo_errors; 643 u64 tx_carrier_errors; 644 u64 tx_excess_deferral; 645 u64 tx_retry_error; 646 u64 rx_frame_error; 647 u64 rx_extra_byte; 648 u64 rx_late_collision; 649 u64 rx_runt; 650 u64 rx_frame_too_long; 651 u64 rx_over_errors; 652 u64 rx_crc_errors; 653 u64 rx_frame_align_error; 654 u64 rx_length_error; 655 u64 rx_unicast; 656 u64 rx_multicast; 657 u64 rx_broadcast; 658 u64 rx_packets; 659 u64 rx_errors_total; 660 u64 tx_errors_total; 661 662 /* version 2 stats */ 663 u64 tx_deferral; 664 u64 tx_packets; 665 u64 rx_bytes; 666 u64 tx_pause; 667 u64 rx_pause; 668 u64 rx_drop_frame; 669}; 670 671#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 672#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 673 674/* diagnostics */ 675#define NV_TEST_COUNT_BASE 3 676#define NV_TEST_COUNT_EXTENDED 4 677 678static const struct nv_ethtool_str nv_etests_str[] = { 679 { "link (online/offline)" }, 680 { "register (offline) " }, 681 { "interrupt (offline) " }, 682 { "loopback (offline) " } 683}; 684 685struct register_test { 686 __le32 reg; 687 __le32 mask; 688}; 689 690static const struct register_test nv_registers_test[] = { 691 { NvRegUnknownSetupReg6, 0x01 }, 692 { NvRegMisc1, 0x03c }, 693 { NvRegOffloadConfig, 0x03ff }, 694 { NvRegMulticastAddrA, 0xffffffff }, 695 { NvRegTxWatermark, 0x0ff }, 696 { NvRegWakeUpFlags, 0x07777 }, 697 { 0,0 } 698}; 699 700struct nv_skb_map { 701 struct sk_buff *skb; 702 dma_addr_t dma; 703 unsigned int dma_len; 704}; 705 706/* 707 * SMP locking: 708 * All hardware access under dev->priv->lock, except the performance 709 * critical parts: 710 * - rx is (pseudo-) lockless: it relies on the single-threading provided 711 * by the arch code for interrupts. 712 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 713 * needs dev->priv->lock :-( 714 * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 715 */ 716 717/* in dev: base, irq */ 718struct fe_priv { 719 spinlock_t lock; 720 721 /* General data: 722 * Locking: spin_lock(&np->lock); */ 723 struct net_device_stats stats; 724 struct nv_ethtool_stats estats; 725 int in_shutdown; 726 u32 linkspeed; 727 int duplex; 728 int autoneg; 729 int fixed_mode; 730 int phyaddr; 731 int wolenabled; 732 unsigned int phy_oui; 733 unsigned int phy_model; 734 u16 gigabit; 735 int intr_test; 736 int recover_error; 737 738 /* General data: RO fields */ 739 dma_addr_t ring_addr; 740 struct pci_dev *pci_dev; 741 u32 orig_mac[2]; 742 u32 irqmask; 743 u32 desc_ver; 744 u32 txrxctl_bits; 745 u32 vlanctl_bits; 746 u32 driver_data; 747 u32 register_size; 748 int rx_csum; 749 u32 mac_in_use; 750 751 void __iomem *base; 752 753 /* rx specific fields. 754 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 755 */ 756 union ring_type get_rx, put_rx, first_rx, last_rx; 757 struct nv_skb_map *get_rx_ctx, *put_rx_ctx; 758 struct nv_skb_map *first_rx_ctx, *last_rx_ctx; 759 struct nv_skb_map *rx_skb; 760 761 union ring_type rx_ring; 762 unsigned int rx_buf_sz; 763 unsigned int pkt_limit; 764 struct timer_list oom_kick; 765 struct timer_list nic_poll; 766 struct timer_list stats_poll; 767 u32 nic_poll_irq; 768 int rx_ring_size; 769 770 /* media detection workaround. 771 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 772 */ 773 int need_linktimer; 774 unsigned long link_timeout; 775 /* 776 * tx specific fields. 777 */ 778 union ring_type get_tx, put_tx, first_tx, last_tx; 779 struct nv_skb_map *get_tx_ctx, *put_tx_ctx; 780 struct nv_skb_map *first_tx_ctx, *last_tx_ctx; 781 struct nv_skb_map *tx_skb; 782 783 union ring_type tx_ring; 784 u32 tx_flags; 785 int tx_ring_size; 786 int tx_stop; 787 788 /* vlan fields */ 789 struct vlan_group *vlangrp; 790 791 /* msi/msi-x fields */ 792 u32 msi_flags; 793 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; 794 795 /* flow control */ 796 u32 pause_flags; 797}; 798 799/* 800 * Maximum number of loops until we assume that a bit in the irq mask 801 * is stuck. Overridable with module param. 802 */ 803static int max_interrupt_work = 5; 804 805/* 806 * Optimization can be either throuput mode or cpu mode 807 * 808 * Throughput Mode: Every tx and rx packet will generate an interrupt. 809 * CPU Mode: Interrupts are controlled by a timer. 810 */ 811enum { 812 NV_OPTIMIZATION_MODE_THROUGHPUT, 813 NV_OPTIMIZATION_MODE_CPU 814}; 815static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; 816 817/* 818 * Poll interval for timer irq 819 * 820 * This interval determines how frequent an interrupt is generated. 821 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] 822 * Min = 0, and Max = 65535 823 */ 824static int poll_interval = -1; 825 826/* 827 * MSI interrupts 828 */ 829enum { 830 NV_MSI_INT_DISABLED, 831 NV_MSI_INT_ENABLED 832}; 833static int msi = NV_MSI_INT_ENABLED; 834 835/* 836 * MSIX interrupts 837 */ 838enum { 839 NV_MSIX_INT_DISABLED, 840 NV_MSIX_INT_ENABLED 841}; 842static int msix = NV_MSIX_INT_DISABLED; 843 844/* 845 * DMA 64bit 846 */ 847enum { 848 NV_DMA_64BIT_DISABLED, 849 NV_DMA_64BIT_ENABLED 850}; 851static int dma_64bit = NV_DMA_64BIT_ENABLED; 852 853static inline struct fe_priv *get_nvpriv(struct net_device *dev) 854{ 855 return netdev_priv(dev); 856} 857 858static inline u8 __iomem *get_hwbase(struct net_device *dev) 859{ 860 return ((struct fe_priv *)netdev_priv(dev))->base; 861} 862 863static inline void pci_push(u8 __iomem *base) 864{ 865 /* force out pending posted writes */ 866 readl(base); 867} 868 869static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 870{ 871 return le32_to_cpu(prd->flaglen) 872 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 873} 874 875static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 876{ 877 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; 878} 879 880static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 881 int delay, int delaymax, const char *msg) 882{ 883 u8 __iomem *base = get_hwbase(dev); 884 885 pci_push(base); 886 do { 887 udelay(delay); 888 delaymax -= delay; 889 if (delaymax < 0) { 890 if (msg) 891 printk(msg); 892 return 1; 893 } 894 } while ((readl(base + offset) & mask) != target); 895 return 0; 896} 897 898#define NV_SETUP_RX_RING 0x01 899#define NV_SETUP_TX_RING 0x02 900 901static void setup_hw_rings(struct net_device *dev, int rxtx_flags) 902{ 903 struct fe_priv *np = get_nvpriv(dev); 904 u8 __iomem *base = get_hwbase(dev); 905 906 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 907 if (rxtx_flags & NV_SETUP_RX_RING) { 908 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); 909 } 910 if (rxtx_flags & NV_SETUP_TX_RING) { 911 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 912 } 913 } else { 914 if (rxtx_flags & NV_SETUP_RX_RING) { 915 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); 916 writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); 917 } 918 if (rxtx_flags & NV_SETUP_TX_RING) { 919 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); 920 writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); 921 } 922 } 923} 924 925static void free_rings(struct net_device *dev) 926{ 927 struct fe_priv *np = get_nvpriv(dev); 928 929 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 930 if (np->rx_ring.orig) 931 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 932 np->rx_ring.orig, np->ring_addr); 933 } else { 934 if (np->rx_ring.ex) 935 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 936 np->rx_ring.ex, np->ring_addr); 937 } 938 if (np->rx_skb) 939 kfree(np->rx_skb); 940 if (np->tx_skb) 941 kfree(np->tx_skb); 942} 943 944static int using_multi_irqs(struct net_device *dev) 945{ 946 struct fe_priv *np = get_nvpriv(dev); 947 948 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 949 ((np->msi_flags & NV_MSI_X_ENABLED) && 950 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) 951 return 0; 952 else 953 return 1; 954} 955 956static void nv_enable_irq(struct net_device *dev) 957{ 958 struct fe_priv *np = get_nvpriv(dev); 959 960 if (!using_multi_irqs(dev)) { 961 if (np->msi_flags & NV_MSI_X_ENABLED) 962 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 963 else 964 enable_irq(dev->irq); 965 } else { 966 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 967 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 968 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 969 } 970} 971 972static void nv_disable_irq(struct net_device *dev) 973{ 974 struct fe_priv *np = get_nvpriv(dev); 975 976 if (!using_multi_irqs(dev)) { 977 if (np->msi_flags & NV_MSI_X_ENABLED) 978 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 979 else 980 disable_irq(dev->irq); 981 } else { 982 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 983 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 984 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 985 } 986} 987 988/* In MSIX mode, a write to irqmask behaves as XOR */ 989static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) 990{ 991 u8 __iomem *base = get_hwbase(dev); 992 993 writel(mask, base + NvRegIrqMask); 994} 995 996static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) 997{ 998 struct fe_priv *np = get_nvpriv(dev); 999 u8 __iomem *base = get_hwbase(dev); 1000 1001 if (np->msi_flags & NV_MSI_X_ENABLED) { 1002 writel(mask, base + NvRegIrqMask); 1003 } else { 1004 if (np->msi_flags & NV_MSI_ENABLED) 1005 writel(0, base + NvRegMSIIrqMask); 1006 writel(0, base + NvRegIrqMask); 1007 } 1008} 1009 1010#define MII_READ (-1) 1011/* mii_rw: read/write a register on the PHY. 1012 * 1013 * Caller must guarantee serialization 1014 */ 1015static int mii_rw(struct net_device *dev, int addr, int miireg, int value) 1016{ 1017 u8 __iomem *base = get_hwbase(dev); 1018 u32 reg; 1019 int retval; 1020 1021 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 1022 1023 reg = readl(base + NvRegMIIControl); 1024 if (reg & NVREG_MIICTL_INUSE) { 1025 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); 1026 udelay(NV_MIIBUSY_DELAY); 1027 } 1028 1029 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; 1030 if (value != MII_READ) { 1031 writel(value, base + NvRegMIIData); 1032 reg |= NVREG_MIICTL_WRITE; 1033 } 1034 writel(reg, base + NvRegMIIControl); 1035 1036 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1037 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { 1038 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", 1039 dev->name, miireg, addr); 1040 retval = -1; 1041 } else if (value != MII_READ) { 1042 /* it was a write operation - fewer failures are detectable */ 1043 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", 1044 dev->name, value, miireg, addr); 1045 retval = 0; 1046 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1047 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", 1048 dev->name, miireg, addr); 1049 retval = -1; 1050 } else { 1051 retval = readl(base + NvRegMIIData); 1052 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", 1053 dev->name, miireg, addr, retval); 1054 } 1055 1056 return retval; 1057} 1058 1059static int phy_reset(struct net_device *dev, u32 bmcr_setup) 1060{ 1061 struct fe_priv *np = netdev_priv(dev); 1062 u32 miicontrol; 1063 unsigned int tries = 0; 1064 1065 miicontrol = BMCR_RESET | bmcr_setup; 1066 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1067 return -1; 1068 } 1069 1070 /* wait for 500ms */ 1071 msleep(500); 1072 1073 /* must wait till reset is deasserted */ 1074 while (miicontrol & BMCR_RESET) { 1075 msleep(10); 1076 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1077 /* FIXME: 100 tries seem excessive */ 1078 if (tries++ > 100) 1079 return -1; 1080 } 1081 return 0; 1082} 1083 1084static int phy_init(struct net_device *dev) 1085{ 1086 struct fe_priv *np = get_nvpriv(dev); 1087 u8 __iomem *base = get_hwbase(dev); 1088 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1089 1090 /* phy errata for E3016 phy */ 1091 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1092 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1093 reg &= ~PHY_MARVELL_E3016_INITMASK; 1094 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { 1095 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); 1096 return PHY_ERROR; 1097 } 1098 } 1099 1100 /* set advertise register */ 1101 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1102 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1103 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1104 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); 1105 return PHY_ERROR; 1106 } 1107 1108 /* get phy interface type */ 1109 phyinterface = readl(base + NvRegPhyInterface); 1110 1111 /* see if gigabit phy */ 1112 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1113 if (mii_status & PHY_GIGABIT) { 1114 np->gigabit = PHY_GIGABIT; 1115 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 1116 mii_control_1000 &= ~ADVERTISE_1000HALF; 1117 if (phyinterface & PHY_RGMII) 1118 mii_control_1000 |= ADVERTISE_1000FULL; 1119 else 1120 mii_control_1000 &= ~ADVERTISE_1000FULL; 1121 1122 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1123 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1124 return PHY_ERROR; 1125 } 1126 } 1127 else 1128 np->gigabit = 0; 1129 1130 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1131 mii_control |= BMCR_ANENABLE; 1132 1133 /* reset the phy 1134 * (certain phys need bmcr to be setup with reset) 1135 */ 1136 if (phy_reset(dev, mii_control)) { 1137 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1138 return PHY_ERROR; 1139 } 1140 1141 /* phy vendor specific configuration */ 1142 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1143 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1144 phy_reserved &= ~(PHY_INIT1 | PHY_INIT2); 1145 phy_reserved |= (PHY_INIT3 | PHY_INIT4); 1146 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { 1147 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1148 return PHY_ERROR; 1149 } 1150 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1151 phy_reserved |= PHY_INIT5; 1152 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { 1153 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1154 return PHY_ERROR; 1155 } 1156 } 1157 if (np->phy_oui == PHY_OUI_CICADA) { 1158 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); 1159 phy_reserved |= PHY_INIT6; 1160 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { 1161 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1162 return PHY_ERROR; 1163 } 1164 } 1165 /* some phys clear out pause advertisment on reset, set it back */ 1166 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); 1167 1168 /* restart auto negotiation */ 1169 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1170 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1171 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1172 return PHY_ERROR; 1173 } 1174 1175 return 0; 1176} 1177 1178static void nv_start_rx(struct net_device *dev) 1179{ 1180 struct fe_priv *np = netdev_priv(dev); 1181 u8 __iomem *base = get_hwbase(dev); 1182 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1183 1184 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 1185 /* Already running? Stop it. */ 1186 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1187 rx_ctrl &= ~NVREG_RCVCTL_START; 1188 writel(rx_ctrl, base + NvRegReceiverControl); 1189 pci_push(base); 1190 } 1191 writel(np->linkspeed, base + NvRegLinkSpeed); 1192 pci_push(base); 1193 rx_ctrl |= NVREG_RCVCTL_START; 1194 if (np->mac_in_use) 1195 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1196 writel(rx_ctrl, base + NvRegReceiverControl); 1197 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1198 dev->name, np->duplex, np->linkspeed); 1199 pci_push(base); 1200} 1201 1202static void nv_stop_rx(struct net_device *dev) 1203{ 1204 struct fe_priv *np = netdev_priv(dev); 1205 u8 __iomem *base = get_hwbase(dev); 1206 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1207 1208 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 1209 if (!np->mac_in_use) 1210 rx_ctrl &= ~NVREG_RCVCTL_START; 1211 else 1212 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1213 writel(rx_ctrl, base + NvRegReceiverControl); 1214 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1215 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1216 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1217 1218 udelay(NV_RXSTOP_DELAY2); 1219 if (!np->mac_in_use) 1220 writel(0, base + NvRegLinkSpeed); 1221} 1222 1223static void nv_start_tx(struct net_device *dev) 1224{ 1225 struct fe_priv *np = netdev_priv(dev); 1226 u8 __iomem *base = get_hwbase(dev); 1227 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1228 1229 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 1230 tx_ctrl |= NVREG_XMITCTL_START; 1231 if (np->mac_in_use) 1232 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1233 writel(tx_ctrl, base + NvRegTransmitterControl); 1234 pci_push(base); 1235} 1236 1237static void nv_stop_tx(struct net_device *dev) 1238{ 1239 struct fe_priv *np = netdev_priv(dev); 1240 u8 __iomem *base = get_hwbase(dev); 1241 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1242 1243 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 1244 if (!np->mac_in_use) 1245 tx_ctrl &= ~NVREG_XMITCTL_START; 1246 else 1247 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1248 writel(tx_ctrl, base + NvRegTransmitterControl); 1249 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1250 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1251 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1252 1253 udelay(NV_TXSTOP_DELAY2); 1254 if (!np->mac_in_use) 1255 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, 1256 base + NvRegTransmitPoll); 1257} 1258 1259static void nv_txrx_reset(struct net_device *dev) 1260{ 1261 struct fe_priv *np = netdev_priv(dev); 1262 u8 __iomem *base = get_hwbase(dev); 1263 1264 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); 1265 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1266 pci_push(base); 1267 udelay(NV_TXRX_RESET_DELAY); 1268 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1269 pci_push(base); 1270} 1271 1272static void nv_mac_reset(struct net_device *dev) 1273{ 1274 struct fe_priv *np = netdev_priv(dev); 1275 u8 __iomem *base = get_hwbase(dev); 1276 1277 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); 1278 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1279 pci_push(base); 1280 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); 1281 pci_push(base); 1282 udelay(NV_MAC_RESET_DELAY); 1283 writel(0, base + NvRegMacReset); 1284 pci_push(base); 1285 udelay(NV_MAC_RESET_DELAY); 1286 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1287 pci_push(base); 1288} 1289 1290static void nv_get_hw_stats(struct net_device *dev) 1291{ 1292 struct fe_priv *np = netdev_priv(dev); 1293 u8 __iomem *base = get_hwbase(dev); 1294 1295 np->estats.tx_bytes += readl(base + NvRegTxCnt); 1296 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); 1297 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); 1298 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); 1299 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); 1300 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); 1301 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); 1302 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); 1303 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); 1304 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); 1305 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); 1306 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); 1307 np->estats.rx_runt += readl(base + NvRegRxRunt); 1308 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); 1309 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); 1310 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); 1311 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); 1312 np->estats.rx_length_error += readl(base + NvRegRxLenErr); 1313 np->estats.rx_unicast += readl(base + NvRegRxUnicast); 1314 np->estats.rx_multicast += readl(base + NvRegRxMulticast); 1315 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); 1316 np->estats.rx_packets = 1317 np->estats.rx_unicast + 1318 np->estats.rx_multicast + 1319 np->estats.rx_broadcast; 1320 np->estats.rx_errors_total = 1321 np->estats.rx_crc_errors + 1322 np->estats.rx_over_errors + 1323 np->estats.rx_frame_error + 1324 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + 1325 np->estats.rx_late_collision + 1326 np->estats.rx_runt + 1327 np->estats.rx_frame_too_long; 1328 np->estats.tx_errors_total = 1329 np->estats.tx_late_collision + 1330 np->estats.tx_fifo_errors + 1331 np->estats.tx_carrier_errors + 1332 np->estats.tx_excess_deferral + 1333 np->estats.tx_retry_error; 1334 1335 if (np->driver_data & DEV_HAS_STATISTICS_V2) { 1336 np->estats.tx_deferral += readl(base + NvRegTxDef); 1337 np->estats.tx_packets += readl(base + NvRegTxFrame); 1338 np->estats.rx_bytes += readl(base + NvRegRxCnt); 1339 np->estats.tx_pause += readl(base + NvRegTxPause); 1340 np->estats.rx_pause += readl(base + NvRegRxPause); 1341 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1342 } 1343} 1344 1345/* 1346 * nv_get_stats: dev->get_stats function 1347 * Get latest stats value from the nic. 1348 * Called with read_lock(&dev_base_lock) held for read - 1349 * only synchronized against unregister_netdevice. 1350 */ 1351static struct net_device_stats *nv_get_stats(struct net_device *dev) 1352{ 1353 struct fe_priv *np = netdev_priv(dev); 1354 1355 /* If the nic supports hw counters then retrieve latest values */ 1356 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) { 1357 nv_get_hw_stats(dev); 1358 1359 /* copy to net_device stats */ 1360 np->stats.tx_bytes = np->estats.tx_bytes; 1361 np->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1362 np->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1363 np->stats.rx_crc_errors = np->estats.rx_crc_errors; 1364 np->stats.rx_over_errors = np->estats.rx_over_errors; 1365 np->stats.rx_errors = np->estats.rx_errors_total; 1366 np->stats.tx_errors = np->estats.tx_errors_total; 1367 } 1368 return &np->stats; 1369} 1370 1371/* 1372 * nv_alloc_rx: fill rx ring entries. 1373 * Return 1 if the allocations for the skbs failed and the 1374 * rx engine is without Available descriptors 1375 */ 1376static int nv_alloc_rx(struct net_device *dev) 1377{ 1378 struct fe_priv *np = netdev_priv(dev); 1379 struct ring_desc* less_rx; 1380 1381 less_rx = np->get_rx.orig; 1382 if (less_rx-- == np->first_rx.orig) 1383 less_rx = np->last_rx.orig; 1384 1385 while (np->put_rx.orig != less_rx) { 1386 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1387 if (skb) { 1388 skb->dev = dev; 1389 np->put_rx_ctx->skb = skb; 1390 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, 1391 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1392 np->put_rx_ctx->dma_len = skb->end-skb->data; 1393 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); 1394 wmb(); 1395 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1396 if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) 1397 np->put_rx.orig = np->first_rx.orig; 1398 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1399 np->put_rx_ctx = np->first_rx_ctx; 1400 } else { 1401 return 1; 1402 } 1403 } 1404 return 0; 1405} 1406 1407static int nv_alloc_rx_optimized(struct net_device *dev) 1408{ 1409 struct fe_priv *np = netdev_priv(dev); 1410 struct ring_desc_ex* less_rx; 1411 1412 less_rx = np->get_rx.ex; 1413 if (less_rx-- == np->first_rx.ex) 1414 less_rx = np->last_rx.ex; 1415 1416 while (np->put_rx.ex != less_rx) { 1417 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1418 if (skb) { 1419 skb->dev = dev; 1420 np->put_rx_ctx->skb = skb; 1421 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, 1422 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1423 np->put_rx_ctx->dma_len = skb->end-skb->data; 1424 np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32; 1425 np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF; 1426 wmb(); 1427 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1428 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) 1429 np->put_rx.ex = np->first_rx.ex; 1430 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1431 np->put_rx_ctx = np->first_rx_ctx; 1432 } else { 1433 return 1; 1434 } 1435 } 1436 return 0; 1437} 1438 1439/* If rx bufs are exhausted called after 50ms to attempt to refresh */ 1440#ifdef CONFIG_FORCEDETH_NAPI 1441static void nv_do_rx_refill(unsigned long data) 1442{ 1443 struct net_device *dev = (struct net_device *) data; 1444 1445 /* Just reschedule NAPI rx processing */ 1446 netif_rx_schedule(dev); 1447} 1448#else 1449static void nv_do_rx_refill(unsigned long data) 1450{ 1451 struct net_device *dev = (struct net_device *) data; 1452 struct fe_priv *np = netdev_priv(dev); 1453 int retcode; 1454 1455 if (!using_multi_irqs(dev)) { 1456 if (np->msi_flags & NV_MSI_X_ENABLED) 1457 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1458 else 1459 disable_irq(dev->irq); 1460 } else { 1461 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1462 } 1463 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1464 retcode = nv_alloc_rx(dev); 1465 else 1466 retcode = nv_alloc_rx_optimized(dev); 1467 if (retcode) { 1468 spin_lock_irq(&np->lock); 1469 if (!np->in_shutdown) 1470 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1471 spin_unlock_irq(&np->lock); 1472 } 1473 if (!using_multi_irqs(dev)) { 1474 if (np->msi_flags & NV_MSI_X_ENABLED) 1475 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1476 else 1477 enable_irq(dev->irq); 1478 } else { 1479 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1480 } 1481} 1482#endif 1483 1484static void nv_init_rx(struct net_device *dev) 1485{ 1486 struct fe_priv *np = netdev_priv(dev); 1487 int i; 1488 np->get_rx = np->put_rx = np->first_rx = np->rx_ring; 1489 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1490 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; 1491 else 1492 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; 1493 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; 1494 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; 1495 1496 for (i = 0; i < np->rx_ring_size; i++) { 1497 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1498 np->rx_ring.orig[i].flaglen = 0; 1499 np->rx_ring.orig[i].buf = 0; 1500 } else { 1501 np->rx_ring.ex[i].flaglen = 0; 1502 np->rx_ring.ex[i].txvlan = 0; 1503 np->rx_ring.ex[i].bufhigh = 0; 1504 np->rx_ring.ex[i].buflow = 0; 1505 } 1506 np->rx_skb[i].skb = NULL; 1507 np->rx_skb[i].dma = 0; 1508 } 1509} 1510 1511static void nv_init_tx(struct net_device *dev) 1512{ 1513 struct fe_priv *np = netdev_priv(dev); 1514 int i; 1515 np->get_tx = np->put_tx = np->first_tx = np->tx_ring; 1516 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1517 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; 1518 else 1519 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; 1520 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; 1521 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; 1522 1523 for (i = 0; i < np->tx_ring_size; i++) { 1524 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1525 np->tx_ring.orig[i].flaglen = 0; 1526 np->tx_ring.orig[i].buf = 0; 1527 } else { 1528 np->tx_ring.ex[i].flaglen = 0; 1529 np->tx_ring.ex[i].txvlan = 0; 1530 np->tx_ring.ex[i].bufhigh = 0; 1531 np->tx_ring.ex[i].buflow = 0; 1532 } 1533 np->tx_skb[i].skb = NULL; 1534 np->tx_skb[i].dma = 0; 1535 } 1536} 1537 1538static int nv_init_ring(struct net_device *dev) 1539{ 1540 struct fe_priv *np = netdev_priv(dev); 1541 1542 nv_init_tx(dev); 1543 nv_init_rx(dev); 1544 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1545 return nv_alloc_rx(dev); 1546 else 1547 return nv_alloc_rx_optimized(dev); 1548} 1549 1550static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb) 1551{ 1552 struct fe_priv *np = netdev_priv(dev); 1553 1554 if (tx_skb->dma) { 1555 pci_unmap_page(np->pci_dev, tx_skb->dma, 1556 tx_skb->dma_len, 1557 PCI_DMA_TODEVICE); 1558 tx_skb->dma = 0; 1559 } 1560 if (tx_skb->skb) { 1561 dev_kfree_skb_any(tx_skb->skb); 1562 tx_skb->skb = NULL; 1563 return 1; 1564 } else { 1565 return 0; 1566 } 1567} 1568 1569static void nv_drain_tx(struct net_device *dev) 1570{ 1571 struct fe_priv *np = netdev_priv(dev); 1572 unsigned int i; 1573 1574 for (i = 0; i < np->tx_ring_size; i++) { 1575 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1576 np->tx_ring.orig[i].flaglen = 0; 1577 np->tx_ring.orig[i].buf = 0; 1578 } else { 1579 np->tx_ring.ex[i].flaglen = 0; 1580 np->tx_ring.ex[i].txvlan = 0; 1581 np->tx_ring.ex[i].bufhigh = 0; 1582 np->tx_ring.ex[i].buflow = 0; 1583 } 1584 if (nv_release_txskb(dev, &np->tx_skb[i])) 1585 np->stats.tx_dropped++; 1586 } 1587} 1588 1589static void nv_drain_rx(struct net_device *dev) 1590{ 1591 struct fe_priv *np = netdev_priv(dev); 1592 int i; 1593 1594 for (i = 0; i < np->rx_ring_size; i++) { 1595 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1596 np->rx_ring.orig[i].flaglen = 0; 1597 np->rx_ring.orig[i].buf = 0; 1598 } else { 1599 np->rx_ring.ex[i].flaglen = 0; 1600 np->rx_ring.ex[i].txvlan = 0; 1601 np->rx_ring.ex[i].bufhigh = 0; 1602 np->rx_ring.ex[i].buflow = 0; 1603 } 1604 wmb(); 1605 if (np->rx_skb[i].skb) { 1606 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, 1607 np->rx_skb[i].skb->end-np->rx_skb[i].skb->data, 1608 PCI_DMA_FROMDEVICE); 1609 dev_kfree_skb(np->rx_skb[i].skb); 1610 np->rx_skb[i].skb = NULL; 1611 } 1612 } 1613} 1614 1615static void drain_ring(struct net_device *dev) 1616{ 1617 nv_drain_tx(dev); 1618 nv_drain_rx(dev); 1619} 1620 1621static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) 1622{ 1623 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); 1624} 1625 1626/* 1627 * nv_start_xmit: dev->hard_start_xmit function 1628 * Called with netif_tx_lock held. 1629 */ 1630static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 1631{ 1632 struct fe_priv *np = netdev_priv(dev); 1633 u32 tx_flags = 0; 1634 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 1635 unsigned int fragments = skb_shinfo(skb)->nr_frags; 1636 unsigned int i; 1637 u32 offset = 0; 1638 u32 bcnt; 1639 u32 size = skb->len-skb->data_len; 1640 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1641 u32 empty_slots; 1642 struct ring_desc* put_tx; 1643 struct ring_desc* start_tx; 1644 struct ring_desc* prev_tx; 1645 struct nv_skb_map* prev_tx_ctx; 1646 1647 /* add fragments to entries count */ 1648 for (i = 0; i < fragments; i++) { 1649 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 1650 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1651 } 1652 1653 empty_slots = nv_get_empty_tx_slots(np); 1654 if (unlikely(empty_slots <= entries)) { 1655 spin_lock_irq(&np->lock); 1656 netif_stop_queue(dev); 1657 np->tx_stop = 1; 1658 spin_unlock_irq(&np->lock); 1659 return NETDEV_TX_BUSY; 1660 } 1661 1662 start_tx = put_tx = np->put_tx.orig; 1663 1664 /* setup the header buffer */ 1665 do { 1666 prev_tx = put_tx; 1667 prev_tx_ctx = np->put_tx_ctx; 1668 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1669 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 1670 PCI_DMA_TODEVICE); 1671 np->put_tx_ctx->dma_len = bcnt; 1672 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 1673 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1674 1675 tx_flags = np->tx_flags; 1676 offset += bcnt; 1677 size -= bcnt; 1678 if (unlikely(put_tx++ == np->last_tx.orig)) 1679 put_tx = np->first_tx.orig; 1680 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 1681 np->put_tx_ctx = np->first_tx_ctx; 1682 } while (size); 1683 1684 /* setup the fragments */ 1685 for (i = 0; i < fragments; i++) { 1686 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1687 u32 size = frag->size; 1688 offset = 0; 1689 1690 do { 1691 prev_tx = put_tx; 1692 prev_tx_ctx = np->put_tx_ctx; 1693 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1694 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 1695 PCI_DMA_TODEVICE); 1696 np->put_tx_ctx->dma_len = bcnt; 1697 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 1698 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1699 1700 offset += bcnt; 1701 size -= bcnt; 1702 if (unlikely(put_tx++ == np->last_tx.orig)) 1703 put_tx = np->first_tx.orig; 1704 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 1705 np->put_tx_ctx = np->first_tx_ctx; 1706 } while (size); 1707 } 1708 1709 /* set last fragment flag */ 1710 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); 1711 1712 /* save skb in this slot's context area */ 1713 prev_tx_ctx->skb = skb; 1714 1715 if (skb_is_gso(skb)) 1716 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1717 else 1718 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 1719 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 1720 1721 spin_lock_irq(&np->lock); 1722 1723 /* set tx flags */ 1724 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 1725 np->put_tx.orig = put_tx; 1726 1727 spin_unlock_irq(&np->lock); 1728 1729 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", 1730 dev->name, entries, tx_flags_extra); 1731 { 1732 int j; 1733 for (j=0; j<64; j++) { 1734 if ((j%16) == 0) 1735 dprintk("\n%03x:", j); 1736 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 1737 } 1738 dprintk("\n"); 1739 } 1740 1741 dev->trans_start = jiffies; 1742 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 1743 return NETDEV_TX_OK; 1744} 1745 1746static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) 1747{ 1748 struct fe_priv *np = netdev_priv(dev); 1749 u32 tx_flags = 0; 1750 u32 tx_flags_extra; 1751 unsigned int fragments = skb_shinfo(skb)->nr_frags; 1752 unsigned int i; 1753 u32 offset = 0; 1754 u32 bcnt; 1755 u32 size = skb->len-skb->data_len; 1756 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1757 u32 empty_slots; 1758 struct ring_desc_ex* put_tx; 1759 struct ring_desc_ex* start_tx; 1760 struct ring_desc_ex* prev_tx; 1761 struct nv_skb_map* prev_tx_ctx; 1762 1763 /* add fragments to entries count */ 1764 for (i = 0; i < fragments; i++) { 1765 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 1766 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1767 } 1768 1769 empty_slots = nv_get_empty_tx_slots(np); 1770 if (unlikely(empty_slots <= entries)) { 1771 spin_lock_irq(&np->lock); 1772 netif_stop_queue(dev); 1773 np->tx_stop = 1; 1774 spin_unlock_irq(&np->lock); 1775 return NETDEV_TX_BUSY; 1776 } 1777 1778 start_tx = put_tx = np->put_tx.ex; 1779 1780 /* setup the header buffer */ 1781 do { 1782 prev_tx = put_tx; 1783 prev_tx_ctx = np->put_tx_ctx; 1784 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1785 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 1786 PCI_DMA_TODEVICE); 1787 np->put_tx_ctx->dma_len = bcnt; 1788 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; 1789 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; 1790 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1791 1792 tx_flags = NV_TX2_VALID; 1793 offset += bcnt; 1794 size -= bcnt; 1795 if (unlikely(put_tx++ == np->last_tx.ex)) 1796 put_tx = np->first_tx.ex; 1797 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 1798 np->put_tx_ctx = np->first_tx_ctx; 1799 } while (size); 1800 1801 /* setup the fragments */ 1802 for (i = 0; i < fragments; i++) { 1803 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1804 u32 size = frag->size; 1805 offset = 0; 1806 1807 do { 1808 prev_tx = put_tx; 1809 prev_tx_ctx = np->put_tx_ctx; 1810 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1811 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 1812 PCI_DMA_TODEVICE); 1813 np->put_tx_ctx->dma_len = bcnt; 1814 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; 1815 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; 1816 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1817 1818 offset += bcnt; 1819 size -= bcnt; 1820 if (unlikely(put_tx++ == np->last_tx.ex)) 1821 put_tx = np->first_tx.ex; 1822 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 1823 np->put_tx_ctx = np->first_tx_ctx; 1824 } while (size); 1825 } 1826 1827 /* set last fragment flag */ 1828 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); 1829 1830 /* save skb in this slot's context area */ 1831 prev_tx_ctx->skb = skb; 1832 1833 if (skb_is_gso(skb)) 1834 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1835 else 1836 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 1837 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 1838 1839 /* vlan tag */ 1840 if (likely(!np->vlangrp)) { 1841 start_tx->txvlan = 0; 1842 } else { 1843 if (vlan_tx_tag_present(skb)) 1844 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb)); 1845 else 1846 start_tx->txvlan = 0; 1847 } 1848 1849 spin_lock_irq(&np->lock); 1850 1851 /* set tx flags */ 1852 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 1853 np->put_tx.ex = put_tx; 1854 1855 spin_unlock_irq(&np->lock); 1856 1857 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", 1858 dev->name, entries, tx_flags_extra); 1859 { 1860 int j; 1861 for (j=0; j<64; j++) { 1862 if ((j%16) == 0) 1863 dprintk("\n%03x:", j); 1864 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 1865 } 1866 dprintk("\n"); 1867 } 1868 1869 dev->trans_start = jiffies; 1870 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 1871 return NETDEV_TX_OK; 1872} 1873 1874/* 1875 * nv_tx_done: check for completed packets, release the skbs. 1876 * 1877 * Caller must own np->lock. 1878 */ 1879static void nv_tx_done(struct net_device *dev) 1880{ 1881 struct fe_priv *np = netdev_priv(dev); 1882 u32 flags; 1883 struct ring_desc* orig_get_tx = np->get_tx.orig; 1884 1885 while ((np->get_tx.orig != np->put_tx.orig) && 1886 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) { 1887 1888 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", 1889 dev->name, flags); 1890 1891 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, 1892 np->get_tx_ctx->dma_len, 1893 PCI_DMA_TODEVICE); 1894 np->get_tx_ctx->dma = 0; 1895 1896 if (np->desc_ver == DESC_VER_1) { 1897 if (flags & NV_TX_LASTPACKET) { 1898 if (flags & NV_TX_ERROR) { 1899 if (flags & NV_TX_UNDERFLOW) 1900 np->stats.tx_fifo_errors++; 1901 if (flags & NV_TX_CARRIERLOST) 1902 np->stats.tx_carrier_errors++; 1903 np->stats.tx_errors++; 1904 } else { 1905 np->stats.tx_packets++; 1906 np->stats.tx_bytes += np->get_tx_ctx->skb->len; 1907 } 1908 dev_kfree_skb_any(np->get_tx_ctx->skb); 1909 np->get_tx_ctx->skb = NULL; 1910 } 1911 } else { 1912 if (flags & NV_TX2_LASTPACKET) { 1913 if (flags & NV_TX2_ERROR) { 1914 if (flags & NV_TX2_UNDERFLOW) 1915 np->stats.tx_fifo_errors++; 1916 if (flags & NV_TX2_CARRIERLOST) 1917 np->stats.tx_carrier_errors++; 1918 np->stats.tx_errors++; 1919 } else { 1920 np->stats.tx_packets++; 1921 np->stats.tx_bytes += np->get_tx_ctx->skb->len; 1922 } 1923 dev_kfree_skb_any(np->get_tx_ctx->skb); 1924 np->get_tx_ctx->skb = NULL; 1925 } 1926 } 1927 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) 1928 np->get_tx.orig = np->first_tx.orig; 1929 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 1930 np->get_tx_ctx = np->first_tx_ctx; 1931 } 1932 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { 1933 np->tx_stop = 0; 1934 netif_wake_queue(dev); 1935 } 1936} 1937 1938static void nv_tx_done_optimized(struct net_device *dev, int limit) 1939{ 1940 struct fe_priv *np = netdev_priv(dev); 1941 u32 flags; 1942 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 1943 1944 while ((np->get_tx.ex != np->put_tx.ex) && 1945 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) && 1946 (limit-- > 0)) { 1947 1948 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", 1949 dev->name, flags); 1950 1951 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, 1952 np->get_tx_ctx->dma_len, 1953 PCI_DMA_TODEVICE); 1954 np->get_tx_ctx->dma = 0; 1955 1956 if (flags & NV_TX2_LASTPACKET) { 1957 if (!(flags & NV_TX2_ERROR)) 1958 np->stats.tx_packets++; 1959 dev_kfree_skb_any(np->get_tx_ctx->skb); 1960 np->get_tx_ctx->skb = NULL; 1961 } 1962 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 1963 np->get_tx.ex = np->first_tx.ex; 1964 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 1965 np->get_tx_ctx = np->first_tx_ctx; 1966 } 1967 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { 1968 np->tx_stop = 0; 1969 netif_wake_queue(dev); 1970 } 1971} 1972 1973/* 1974 * nv_tx_timeout: dev->tx_timeout function 1975 * Called with netif_tx_lock held. 1976 */ 1977static void nv_tx_timeout(struct net_device *dev) 1978{ 1979 struct fe_priv *np = netdev_priv(dev); 1980 u8 __iomem *base = get_hwbase(dev); 1981 u32 status; 1982 1983 if (np->msi_flags & NV_MSI_X_ENABLED) 1984 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 1985 else 1986 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 1987 1988 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); 1989 1990 { 1991 int i; 1992 1993 printk(KERN_INFO "%s: Ring at %lx\n", 1994 dev->name, (unsigned long)np->ring_addr); 1995 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 1996 for (i=0;i<=np->register_size;i+= 32) { 1997 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 1998 i, 1999 readl(base + i + 0), readl(base + i + 4), 2000 readl(base + i + 8), readl(base + i + 12), 2001 readl(base + i + 16), readl(base + i + 20), 2002 readl(base + i + 24), readl(base + i + 28)); 2003 } 2004 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2005 for (i=0;i<np->tx_ring_size;i+= 4) { 2006 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 2007 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2008 i, 2009 le32_to_cpu(np->tx_ring.orig[i].buf), 2010 le32_to_cpu(np->tx_ring.orig[i].flaglen), 2011 le32_to_cpu(np->tx_ring.orig[i+1].buf), 2012 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), 2013 le32_to_cpu(np->tx_ring.orig[i+2].buf), 2014 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), 2015 le32_to_cpu(np->tx_ring.orig[i+3].buf), 2016 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); 2017 } else { 2018 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 2019 i, 2020 le32_to_cpu(np->tx_ring.ex[i].bufhigh), 2021 le32_to_cpu(np->tx_ring.ex[i].buflow), 2022 le32_to_cpu(np->tx_ring.ex[i].flaglen), 2023 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), 2024 le32_to_cpu(np->tx_ring.ex[i+1].buflow), 2025 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), 2026 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), 2027 le32_to_cpu(np->tx_ring.ex[i+2].buflow), 2028 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), 2029 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), 2030 le32_to_cpu(np->tx_ring.ex[i+3].buflow), 2031 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); 2032 } 2033 } 2034 } 2035 2036 spin_lock_irq(&np->lock); 2037 2038 /* 1) stop tx engine */ 2039 nv_stop_tx(dev); 2040 2041 /* 2) check that the packets were not sent already: */ 2042 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 2043 nv_tx_done(dev); 2044 else 2045 nv_tx_done_optimized(dev, np->tx_ring_size); 2046 2047 /* 3) if there are dead entries: clear everything */ 2048 if (np->get_tx_ctx != np->put_tx_ctx) { 2049 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 2050 nv_drain_tx(dev); 2051 nv_init_tx(dev); 2052 setup_hw_rings(dev, NV_SETUP_TX_RING); 2053 } 2054 2055 netif_wake_queue(dev); 2056 2057 /* 4) restart tx engine */ 2058 nv_start_tx(dev); 2059 spin_unlock_irq(&np->lock); 2060} 2061 2062/* 2063 * Called when the nic notices a mismatch between the actual data len on the 2064 * wire and the len indicated in the 802 header 2065 */ 2066static int nv_getlen(struct net_device *dev, void *packet, int datalen) 2067{ 2068 int hdrlen; /* length of the 802 header */ 2069 int protolen; /* length as stored in the proto field */ 2070 2071 /* 1) calculate len according to header */ 2072 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2073 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 2074 hdrlen = VLAN_HLEN; 2075 } else { 2076 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 2077 hdrlen = ETH_HLEN; 2078 } 2079 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", 2080 dev->name, datalen, protolen, hdrlen); 2081 if (protolen > ETH_DATA_LEN) 2082 return datalen; /* Value in proto field not a len, no checks possible */ 2083 2084 protolen += hdrlen; 2085 /* consistency checks: */ 2086 if (datalen > ETH_ZLEN) { 2087 if (datalen >= protolen) { 2088 /* more data on wire than in 802 header, trim of 2089 * additional data. 2090 */ 2091 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2092 dev->name, protolen); 2093 return protolen; 2094 } else { 2095 /* less data on wire than mentioned in header. 2096 * Discard the packet. 2097 */ 2098 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", 2099 dev->name); 2100 return -1; 2101 } 2102 } else { 2103 /* short packet. Accept only if 802 values are also short */ 2104 if (protolen > ETH_ZLEN) { 2105 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", 2106 dev->name); 2107 return -1; 2108 } 2109 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2110 dev->name, datalen); 2111 return datalen; 2112 } 2113} 2114 2115static int nv_rx_process(struct net_device *dev, int limit) 2116{ 2117 struct fe_priv *np = netdev_priv(dev); 2118 u32 flags; 2119 u32 rx_processed_cnt = 0; 2120 struct sk_buff *skb; 2121 int len; 2122 2123 while((np->get_rx.orig != np->put_rx.orig) && 2124 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2125 (rx_processed_cnt++ < limit)) { 2126 2127 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", 2128 dev->name, flags); 2129 2130 /* 2131 * the packet is for us - immediately tear down the pci mapping. 2132 * TODO: check if a prefetch of the first cacheline improves 2133 * the performance. 2134 */ 2135 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2136 np->get_rx_ctx->dma_len, 2137 PCI_DMA_FROMDEVICE); 2138 skb = np->get_rx_ctx->skb; 2139 np->get_rx_ctx->skb = NULL; 2140 2141 { 2142 int j; 2143 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2144 for (j=0; j<64; j++) { 2145 if ((j%16) == 0) 2146 dprintk("\n%03x:", j); 2147 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2148 } 2149 dprintk("\n"); 2150 } 2151 /* look at what we actually got: */ 2152 if (np->desc_ver == DESC_VER_1) { 2153 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2154 len = flags & LEN_MASK_V1; 2155 if (unlikely(flags & NV_RX_ERROR)) { 2156 if (flags & NV_RX_ERROR4) { 2157 len = nv_getlen(dev, skb->data, len); 2158 if (len < 0) { 2159 np->stats.rx_errors++; 2160 dev_kfree_skb(skb); 2161 goto next_pkt; 2162 } 2163 } 2164 /* framing errors are soft errors */ 2165 else if (flags & NV_RX_FRAMINGERR) { 2166 if (flags & NV_RX_SUBSTRACT1) { 2167 len--; 2168 } 2169 } 2170 /* the rest are hard errors */ 2171 else { 2172 if (flags & NV_RX_MISSEDFRAME) 2173 np->stats.rx_missed_errors++; 2174 if (flags & NV_RX_CRCERR) 2175 np->stats.rx_crc_errors++; 2176 if (flags & NV_RX_OVERFLOW) 2177 np->stats.rx_over_errors++; 2178 np->stats.rx_errors++; 2179 dev_kfree_skb(skb); 2180 goto next_pkt; 2181 } 2182 } 2183 } else { 2184 dev_kfree_skb(skb); 2185 goto next_pkt; 2186 } 2187 } else { 2188 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2189 len = flags & LEN_MASK_V2; 2190 if (unlikely(flags & NV_RX2_ERROR)) { 2191 if (flags & NV_RX2_ERROR4) { 2192 len = nv_getlen(dev, skb->data, len); 2193 if (len < 0) { 2194 np->stats.rx_errors++; 2195 dev_kfree_skb(skb); 2196 goto next_pkt; 2197 } 2198 } 2199 /* framing errors are soft errors */ 2200 else if (flags & NV_RX2_FRAMINGERR) { 2201 if (flags & NV_RX2_SUBSTRACT1) { 2202 len--; 2203 } 2204 } 2205 /* the rest are hard errors */ 2206 else { 2207 if (flags & NV_RX2_CRCERR) 2208 np->stats.rx_crc_errors++; 2209 if (flags & NV_RX2_OVERFLOW) 2210 np->stats.rx_over_errors++; 2211 np->stats.rx_errors++; 2212 dev_kfree_skb(skb); 2213 goto next_pkt; 2214 } 2215 } 2216 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { 2217 skb->ip_summed = CHECKSUM_UNNECESSARY; 2218 } else { 2219 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || 2220 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { 2221 skb->ip_summed = CHECKSUM_UNNECESSARY; 2222 } 2223 } 2224 } else { 2225 dev_kfree_skb(skb); 2226 goto next_pkt; 2227 } 2228 } 2229 /* got a valid packet - forward it to the network core */ 2230 skb_put(skb, len); 2231 skb->protocol = eth_type_trans(skb, dev); 2232 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", 2233 dev->name, len, skb->protocol); 2234#ifdef CONFIG_FORCEDETH_NAPI 2235 netif_receive_skb(skb); 2236#else 2237 netif_rx(skb); 2238#endif 2239 dev->last_rx = jiffies; 2240 np->stats.rx_packets++; 2241 np->stats.rx_bytes += len; 2242next_pkt: 2243 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2244 np->get_rx.orig = np->first_rx.orig; 2245 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2246 np->get_rx_ctx = np->first_rx_ctx; 2247 } 2248 2249 return rx_processed_cnt; 2250} 2251 2252static int nv_rx_process_optimized(struct net_device *dev, int limit) 2253{ 2254 struct fe_priv *np = netdev_priv(dev); 2255 u32 flags; 2256 u32 vlanflags = 0; 2257 u32 rx_processed_cnt = 0; 2258 struct sk_buff *skb; 2259 int len; 2260 2261 while((np->get_rx.ex != np->put_rx.ex) && 2262 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2263 (rx_processed_cnt++ < limit)) { 2264 2265 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", 2266 dev->name, flags); 2267 2268 /* 2269 * the packet is for us - immediately tear down the pci mapping. 2270 * TODO: check if a prefetch of the first cacheline improves 2271 * the performance. 2272 */ 2273 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2274 np->get_rx_ctx->dma_len, 2275 PCI_DMA_FROMDEVICE); 2276 skb = np->get_rx_ctx->skb; 2277 np->get_rx_ctx->skb = NULL; 2278 2279 { 2280 int j; 2281 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2282 for (j=0; j<64; j++) { 2283 if ((j%16) == 0) 2284 dprintk("\n%03x:", j); 2285 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2286 } 2287 dprintk("\n"); 2288 } 2289 /* look at what we actually got: */ 2290 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2291 len = flags & LEN_MASK_V2; 2292 if (unlikely(flags & NV_RX2_ERROR)) { 2293 if (flags & NV_RX2_ERROR4) { 2294 len = nv_getlen(dev, skb->data, len); 2295 if (len < 0) { 2296 dev_kfree_skb(skb); 2297 goto next_pkt; 2298 } 2299 } 2300 /* framing errors are soft errors */ 2301 else if (flags & NV_RX2_FRAMINGERR) { 2302 if (flags & NV_RX2_SUBSTRACT1) { 2303 len--; 2304 } 2305 } 2306 /* the rest are hard errors */ 2307 else { 2308 dev_kfree_skb(skb); 2309 goto next_pkt; 2310 } 2311 } 2312 2313 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { 2314 skb->ip_summed = CHECKSUM_UNNECESSARY; 2315 } else { 2316 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || 2317 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { 2318 skb->ip_summed = CHECKSUM_UNNECESSARY; 2319 } 2320 } 2321 2322 /* got a valid packet - forward it to the network core */ 2323 skb_put(skb, len); 2324 skb->protocol = eth_type_trans(skb, dev); 2325 prefetch(skb->data); 2326 2327 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n", 2328 dev->name, len, skb->protocol); 2329 2330 if (likely(!np->vlangrp)) { 2331#ifdef CONFIG_FORCEDETH_NAPI 2332 netif_receive_skb(skb); 2333#else 2334 netif_rx(skb); 2335#endif 2336 } else { 2337 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2338 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2339#ifdef CONFIG_FORCEDETH_NAPI 2340 vlan_hwaccel_receive_skb(skb, np->vlangrp, 2341 vlanflags & NV_RX3_VLAN_TAG_MASK); 2342#else 2343 vlan_hwaccel_rx(skb, np->vlangrp, 2344 vlanflags & NV_RX3_VLAN_TAG_MASK); 2345#endif 2346 } else { 2347#ifdef CONFIG_FORCEDETH_NAPI 2348 netif_receive_skb(skb); 2349#else 2350 netif_rx(skb); 2351#endif 2352 } 2353 } 2354 2355 dev->last_rx = jiffies; 2356 np->stats.rx_packets++; 2357 np->stats.rx_bytes += len; 2358 } else { 2359 dev_kfree_skb(skb); 2360 } 2361next_pkt: 2362 if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) 2363 np->get_rx.ex = np->first_rx.ex; 2364 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2365 np->get_rx_ctx = np->first_rx_ctx; 2366 } 2367 2368 return rx_processed_cnt; 2369} 2370 2371static void set_bufsize(struct net_device *dev) 2372{ 2373 struct fe_priv *np = netdev_priv(dev); 2374 2375 if (dev->mtu <= ETH_DATA_LEN) 2376 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; 2377 else 2378 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; 2379} 2380 2381/* 2382 * nv_change_mtu: dev->change_mtu function 2383 * Called with dev_base_lock held for read. 2384 */ 2385static int nv_change_mtu(struct net_device *dev, int new_mtu) 2386{ 2387 struct fe_priv *np = netdev_priv(dev); 2388 int old_mtu; 2389 2390 if (new_mtu < 64 || new_mtu > np->pkt_limit) 2391 return -EINVAL; 2392 2393 old_mtu = dev->mtu; 2394 dev->mtu = new_mtu; 2395 2396 /* return early if the buffer sizes will not change */ 2397 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) 2398 return 0; 2399 if (old_mtu == new_mtu) 2400 return 0; 2401 2402 /* synchronized against open : rtnl_lock() held by caller */ 2403 if (netif_running(dev)) { 2404 u8 __iomem *base = get_hwbase(dev); 2405 /* 2406 * It seems that the nic preloads valid ring entries into an 2407 * internal buffer. The procedure for flushing everything is 2408 * guessed, there is probably a simpler approach. 2409 * Changing the MTU is a rare event, it shouldn't matter. 2410 */ 2411 nv_disable_irq(dev); 2412 netif_tx_lock_bh(dev); 2413 spin_lock(&np->lock); 2414 /* stop engines */ 2415 nv_stop_rx(dev); 2416 nv_stop_tx(dev); 2417 nv_txrx_reset(dev); 2418 /* drain rx queue */ 2419 nv_drain_rx(dev); 2420 nv_drain_tx(dev); 2421 /* reinit driver view of the rx queue */ 2422 set_bufsize(dev); 2423 if (nv_init_ring(dev)) { 2424 if (!np->in_shutdown) 2425 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2426 } 2427 /* reinit nic view of the rx queue */ 2428 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 2429 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 2430 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 2431 base + NvRegRingSizes); 2432 pci_push(base); 2433 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2434 pci_push(base); 2435 2436 /* restart rx engine */ 2437 nv_start_rx(dev); 2438 nv_start_tx(dev); 2439 spin_unlock(&np->lock); 2440 netif_tx_unlock_bh(dev); 2441 nv_enable_irq(dev); 2442 } 2443 return 0; 2444} 2445 2446static void nv_copy_mac_to_hw(struct net_device *dev) 2447{ 2448 u8 __iomem *base = get_hwbase(dev); 2449 u32 mac[2]; 2450 2451 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 2452 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 2453 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 2454 2455 writel(mac[0], base + NvRegMacAddrA); 2456 writel(mac[1], base + NvRegMacAddrB); 2457} 2458 2459/* 2460 * nv_set_mac_address: dev->set_mac_address function 2461 * Called with rtnl_lock() held. 2462 */ 2463static int nv_set_mac_address(struct net_device *dev, void *addr) 2464{ 2465 struct fe_priv *np = netdev_priv(dev); 2466 struct sockaddr *macaddr = (struct sockaddr*)addr; 2467 2468 if (!is_valid_ether_addr(macaddr->sa_data)) 2469 return -EADDRNOTAVAIL; 2470 2471 /* synchronized against open : rtnl_lock() held by caller */ 2472 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 2473 2474 if (netif_running(dev)) { 2475 netif_tx_lock_bh(dev); 2476 spin_lock_irq(&np->lock); 2477 2478 /* stop rx engine */ 2479 nv_stop_rx(dev); 2480 2481 /* set mac address */ 2482 nv_copy_mac_to_hw(dev); 2483 2484 /* restart rx engine */ 2485 nv_start_rx(dev); 2486 spin_unlock_irq(&np->lock); 2487 netif_tx_unlock_bh(dev); 2488 } else { 2489 nv_copy_mac_to_hw(dev); 2490 } 2491 return 0; 2492} 2493 2494/* 2495 * nv_set_multicast: dev->set_multicast function 2496 * Called with netif_tx_lock held. 2497 */ 2498static void nv_set_multicast(struct net_device *dev) 2499{ 2500 struct fe_priv *np = netdev_priv(dev); 2501 u8 __iomem *base = get_hwbase(dev); 2502 u32 addr[2]; 2503 u32 mask[2]; 2504 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; 2505 2506 memset(addr, 0, sizeof(addr)); 2507 memset(mask, 0, sizeof(mask)); 2508 2509 if (dev->flags & IFF_PROMISC) { 2510 pff |= NVREG_PFF_PROMISC; 2511 } else { 2512 pff |= NVREG_PFF_MYADDR; 2513 2514 if (dev->flags & IFF_ALLMULTI || dev->mc_list) { 2515 u32 alwaysOff[2]; 2516 u32 alwaysOn[2]; 2517 2518 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; 2519 if (dev->flags & IFF_ALLMULTI) { 2520 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 2521 } else { 2522 struct dev_mc_list *walk; 2523 2524 walk = dev->mc_list; 2525 while (walk != NULL) { 2526 u32 a, b; 2527 a = le32_to_cpu(*(u32 *) walk->dmi_addr); 2528 b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4])); 2529 alwaysOn[0] &= a; 2530 alwaysOff[0] &= ~a; 2531 alwaysOn[1] &= b; 2532 alwaysOff[1] &= ~b; 2533 walk = walk->next; 2534 } 2535 } 2536 addr[0] = alwaysOn[0]; 2537 addr[1] = alwaysOn[1]; 2538 mask[0] = alwaysOn[0] | alwaysOff[0]; 2539 mask[1] = alwaysOn[1] | alwaysOff[1]; 2540 } 2541 } 2542 addr[0] |= NVREG_MCASTADDRA_FORCE; 2543 pff |= NVREG_PFF_ALWAYS; 2544 spin_lock_irq(&np->lock); 2545 nv_stop_rx(dev); 2546 writel(addr[0], base + NvRegMulticastAddrA); 2547 writel(addr[1], base + NvRegMulticastAddrB); 2548 writel(mask[0], base + NvRegMulticastMaskA); 2549 writel(mask[1], base + NvRegMulticastMaskB); 2550 writel(pff, base + NvRegPacketFilterFlags); 2551 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", 2552 dev->name); 2553 nv_start_rx(dev); 2554 spin_unlock_irq(&np->lock); 2555} 2556 2557static void nv_update_pause(struct net_device *dev, u32 pause_flags) 2558{ 2559 struct fe_priv *np = netdev_priv(dev); 2560 u8 __iomem *base = get_hwbase(dev); 2561 2562 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); 2563 2564 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { 2565 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; 2566 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { 2567 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); 2568 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2569 } else { 2570 writel(pff, base + NvRegPacketFilterFlags); 2571 } 2572 } 2573 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 2574 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 2575 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 2576 writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame); 2577 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 2578 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2579 } else { 2580 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 2581 writel(regmisc, base + NvRegMisc1); 2582 } 2583 } 2584} 2585 2586/** 2587 * nv_update_linkspeed: Setup the MAC according to the link partner 2588 * @dev: Network device to be configured 2589 * 2590 * The function queries the PHY and checks if there is a link partner. 2591 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is 2592 * set to 10 MBit HD. 2593 * 2594 * The function returns 0 if there is no link partner and 1 if there is 2595 * a good link partner. 2596 */ 2597static int nv_update_linkspeed(struct net_device *dev) 2598{ 2599 struct fe_priv *np = netdev_priv(dev); 2600 u8 __iomem *base = get_hwbase(dev); 2601 int adv = 0; 2602 int lpa = 0; 2603 int adv_lpa, adv_pause, lpa_pause; 2604 int newls = np->linkspeed; 2605 int newdup = np->duplex; 2606 int mii_status; 2607 int retval = 0; 2608 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 2609 2610 /* BMSR_LSTATUS is latched, read it twice: 2611 * we want the current value. 2612 */ 2613 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 2614 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 2615 2616 if (!(mii_status & BMSR_LSTATUS)) { 2617 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", 2618 dev->name); 2619 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2620 newdup = 0; 2621 retval = 0; 2622 goto set_speed; 2623 } 2624 2625 if (np->autoneg == 0) { 2626 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", 2627 dev->name, np->fixed_mode); 2628 if (np->fixed_mode & LPA_100FULL) { 2629 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2630 newdup = 1; 2631 } else if (np->fixed_mode & LPA_100HALF) { 2632 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2633 newdup = 0; 2634 } else if (np->fixed_mode & LPA_10FULL) { 2635 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2636 newdup = 1; 2637 } else { 2638 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2639 newdup = 0; 2640 } 2641 retval = 1; 2642 goto set_speed; 2643 } 2644 /* check auto negotiation is complete */ 2645 if (!(mii_status & BMSR_ANEGCOMPLETE)) { 2646 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ 2647 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2648 newdup = 0; 2649 retval = 0; 2650 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); 2651 goto set_speed; 2652 } 2653 2654 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 2655 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 2656 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", 2657 dev->name, adv, lpa); 2658 2659 retval = 1; 2660 if (np->gigabit == PHY_GIGABIT) { 2661 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 2662 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); 2663 2664 if ((control_1000 & ADVERTISE_1000FULL) && 2665 (status_1000 & LPA_1000FULL)) { 2666 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", 2667 dev->name); 2668 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 2669 newdup = 1; 2670 goto set_speed; 2671 } 2672 } 2673 2674 /* FIXME: handle parallel detection properly */ 2675 adv_lpa = lpa & adv; 2676 if (adv_lpa & LPA_100FULL) { 2677 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2678 newdup = 1; 2679 } else if (adv_lpa & LPA_100HALF) { 2680 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2681 newdup = 0; 2682 } else if (adv_lpa & LPA_10FULL) { 2683 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2684 newdup = 1; 2685 } else if (adv_lpa & LPA_10HALF) { 2686 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2687 newdup = 0; 2688 } else { 2689 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); 2690 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2691 newdup = 0; 2692 } 2693 2694set_speed: 2695 if (np->duplex == newdup && np->linkspeed == newls) 2696 return retval; 2697 2698 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", 2699 dev->name, np->linkspeed, np->duplex, newls, newdup); 2700 2701 np->duplex = newdup; 2702 np->linkspeed = newls; 2703 2704 if (np->gigabit == PHY_GIGABIT) { 2705 phyreg = readl(base + NvRegRandomSeed); 2706 phyreg &= ~(0x3FF00); 2707 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) 2708 phyreg |= NVREG_RNDSEED_FORCE3; 2709 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) 2710 phyreg |= NVREG_RNDSEED_FORCE2; 2711 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 2712 phyreg |= NVREG_RNDSEED_FORCE; 2713 writel(phyreg, base + NvRegRandomSeed); 2714 } 2715 2716 phyreg = readl(base + NvRegPhyInterface); 2717 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 2718 if (np->duplex == 0) 2719 phyreg |= PHY_HALF; 2720 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 2721 phyreg |= PHY_100; 2722 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2723 phyreg |= PHY_1000; 2724 writel(phyreg, base + NvRegPhyInterface); 2725 2726 if (phyreg & PHY_RGMII) { 2727 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2728 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 2729 else 2730 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 2731 } else { 2732 txreg = NVREG_TX_DEFERRAL_DEFAULT; 2733 } 2734 writel(txreg, base + NvRegTxDeferral); 2735 2736 if (np->desc_ver == DESC_VER_1) { 2737 txreg = NVREG_TX_WM_DESC1_DEFAULT; 2738 } else { 2739 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2740 txreg = NVREG_TX_WM_DESC2_3_1000; 2741 else 2742 txreg = NVREG_TX_WM_DESC2_3_DEFAULT; 2743 } 2744 writel(txreg, base + NvRegTxWatermark); 2745 2746 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 2747 base + NvRegMisc1); 2748 pci_push(base); 2749 writel(np->linkspeed, base + NvRegLinkSpeed); 2750 pci_push(base); 2751 2752 pause_flags = 0; 2753 /* setup pause frame */ 2754 if (np->duplex != 0) { 2755 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 2756 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 2757 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 2758 2759 switch (adv_pause) { 2760 case ADVERTISE_PAUSE_CAP: 2761 if (lpa_pause & LPA_PAUSE_CAP) { 2762 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2763 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2764 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2765 } 2766 break; 2767 case ADVERTISE_PAUSE_ASYM: 2768 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 2769 { 2770 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2771 } 2772 break; 2773 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: 2774 if (lpa_pause & LPA_PAUSE_CAP) 2775 { 2776 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2777 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2778 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2779 } 2780 if (lpa_pause == LPA_PAUSE_ASYM) 2781 { 2782 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2783 } 2784 break; 2785 } 2786 } else { 2787 pause_flags = np->pause_flags; 2788 } 2789 } 2790 nv_update_pause(dev, pause_flags); 2791 2792 return retval; 2793} 2794 2795static void nv_linkchange(struct net_device *dev) 2796{ 2797 if (nv_update_linkspeed(dev)) { 2798 if (!netif_carrier_ok(dev)) { 2799 netif_carrier_on(dev); 2800 printk(KERN_INFO "%s: link up.\n", dev->name); 2801 nv_start_rx(dev); 2802 } 2803 } else { 2804 if (netif_carrier_ok(dev)) { 2805 netif_carrier_off(dev); 2806 printk(KERN_INFO "%s: link down.\n", dev->name); 2807 nv_stop_rx(dev); 2808 } 2809 } 2810} 2811 2812static void nv_link_irq(struct net_device *dev) 2813{ 2814 u8 __iomem *base = get_hwbase(dev); 2815 u32 miistat; 2816 2817 miistat = readl(base + NvRegMIIStatus); 2818 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 2819 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); 2820 2821 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 2822 nv_linkchange(dev); 2823 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); 2824} 2825 2826static irqreturn_t nv_nic_irq(int foo, void *data) 2827{ 2828 struct net_device *dev = (struct net_device *) data; 2829 struct fe_priv *np = netdev_priv(dev); 2830 u8 __iomem *base = get_hwbase(dev); 2831 u32 events; 2832 int i; 2833 2834 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 2835 2836 for (i=0; ; i++) { 2837 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 2838 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2839 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 2840 } else { 2841 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2842 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 2843 } 2844 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2845 if (!(events & np->irqmask)) 2846 break; 2847 2848 spin_lock(&np->lock); 2849 nv_tx_done(dev); 2850 spin_unlock(&np->lock); 2851 2852#ifdef CONFIG_FORCEDETH_NAPI 2853 if (events & NVREG_IRQ_RX_ALL) { 2854 netif_rx_schedule(dev); 2855 2856 /* Disable furthur receive irq's */ 2857 spin_lock(&np->lock); 2858 np->irqmask &= ~NVREG_IRQ_RX_ALL; 2859 2860 if (np->msi_flags & NV_MSI_X_ENABLED) 2861 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2862 else 2863 writel(np->irqmask, base + NvRegIrqMask); 2864 spin_unlock(&np->lock); 2865 } 2866#else 2867 if (nv_rx_process(dev, dev->weight)) { 2868 if (unlikely(nv_alloc_rx(dev))) { 2869 spin_lock(&np->lock); 2870 if (!np->in_shutdown) 2871 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2872 spin_unlock(&np->lock); 2873 } 2874 } 2875#endif 2876 if (unlikely(events & NVREG_IRQ_LINK)) { 2877 spin_lock(&np->lock); 2878 nv_link_irq(dev); 2879 spin_unlock(&np->lock); 2880 } 2881 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 2882 spin_lock(&np->lock); 2883 nv_linkchange(dev); 2884 spin_unlock(&np->lock); 2885 np->link_timeout = jiffies + LINK_TIMEOUT; 2886 } 2887 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 2888 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2889 dev->name, events); 2890 } 2891 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { 2892 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2893 dev->name, events); 2894 } 2895 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { 2896 spin_lock(&np->lock); 2897 /* disable interrupts on the nic */ 2898 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 2899 writel(0, base + NvRegIrqMask); 2900 else 2901 writel(np->irqmask, base + NvRegIrqMask); 2902 pci_push(base); 2903 2904 if (!np->in_shutdown) { 2905 np->nic_poll_irq = np->irqmask; 2906 np->recover_error = 1; 2907 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2908 } 2909 spin_unlock(&np->lock); 2910 break; 2911 } 2912 if (unlikely(i > max_interrupt_work)) { 2913 spin_lock(&np->lock); 2914 /* disable interrupts on the nic */ 2915 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 2916 writel(0, base + NvRegIrqMask); 2917 else 2918 writel(np->irqmask, base + NvRegIrqMask); 2919 pci_push(base); 2920 2921 if (!np->in_shutdown) { 2922 np->nic_poll_irq = np->irqmask; 2923 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2924 } 2925 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 2926 spin_unlock(&np->lock); 2927 break; 2928 } 2929 2930 } 2931 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 2932 2933 return IRQ_RETVAL(i); 2934} 2935 2936#define TX_WORK_PER_LOOP 64 2937#define RX_WORK_PER_LOOP 64 2938/** 2939 * All _optimized functions are used to help increase performance 2940 * (reduce CPU and increase throughput). They use descripter version 3, 2941 * compiler directives, and reduce memory accesses. 2942 */ 2943static irqreturn_t nv_nic_irq_optimized(int foo, void *data) 2944{ 2945 struct net_device *dev = (struct net_device *) data; 2946 struct fe_priv *np = netdev_priv(dev); 2947 u8 __iomem *base = get_hwbase(dev); 2948 u32 events; 2949 int i; 2950 2951 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); 2952 2953 for (i=0; ; i++) { 2954 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 2955 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2956 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 2957 } else { 2958 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2959 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 2960 } 2961 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2962 if (!(events & np->irqmask)) 2963 break; 2964 2965 spin_lock(&np->lock); 2966 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 2967 spin_unlock(&np->lock); 2968 2969#ifdef CONFIG_FORCEDETH_NAPI 2970 if (events & NVREG_IRQ_RX_ALL) { 2971 netif_rx_schedule(dev); 2972 2973 /* Disable furthur receive irq's */ 2974 spin_lock(&np->lock); 2975 np->irqmask &= ~NVREG_IRQ_RX_ALL; 2976 2977 if (np->msi_flags & NV_MSI_X_ENABLED) 2978 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2979 else 2980 writel(np->irqmask, base + NvRegIrqMask); 2981 spin_unlock(&np->lock); 2982 } 2983#else 2984 if (nv_rx_process_optimized(dev, dev->weight)) { 2985 if (unlikely(nv_alloc_rx_optimized(dev))) { 2986 spin_lock(&np->lock); 2987 if (!np->in_shutdown) 2988 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2989 spin_unlock(&np->lock); 2990 } 2991 } 2992#endif 2993 if (unlikely(events & NVREG_IRQ_LINK)) { 2994 spin_lock(&np->lock); 2995 nv_link_irq(dev); 2996 spin_unlock(&np->lock); 2997 } 2998 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 2999 spin_lock(&np->lock); 3000 nv_linkchange(dev); 3001 spin_unlock(&np->lock); 3002 np->link_timeout = jiffies + LINK_TIMEOUT; 3003 } 3004 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 3005 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3006 dev->name, events); 3007 } 3008 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { 3009 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3010 dev->name, events); 3011 } 3012 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { 3013 spin_lock(&np->lock); 3014 /* disable interrupts on the nic */ 3015 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3016 writel(0, base + NvRegIrqMask); 3017 else 3018 writel(np->irqmask, base + NvRegIrqMask); 3019 pci_push(base); 3020 3021 if (!np->in_shutdown) { 3022 np->nic_poll_irq = np->irqmask; 3023 np->recover_error = 1; 3024 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3025 } 3026 spin_unlock(&np->lock); 3027 break; 3028 } 3029 3030 if (unlikely(i > max_interrupt_work)) { 3031 spin_lock(&np->lock); 3032 /* disable interrupts on the nic */ 3033 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3034 writel(0, base + NvRegIrqMask); 3035 else 3036 writel(np->irqmask, base + NvRegIrqMask); 3037 pci_push(base); 3038 3039 if (!np->in_shutdown) { 3040 np->nic_poll_irq = np->irqmask; 3041 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3042 } 3043 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 3044 spin_unlock(&np->lock); 3045 break; 3046 } 3047 3048 } 3049 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); 3050 3051 return IRQ_RETVAL(i); 3052} 3053 3054static irqreturn_t nv_nic_irq_tx(int foo, void *data) 3055{ 3056 struct net_device *dev = (struct net_device *) data; 3057 struct fe_priv *np = netdev_priv(dev); 3058 u8 __iomem *base = get_hwbase(dev); 3059 u32 events; 3060 int i; 3061 unsigned long flags; 3062 3063 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 3064 3065 for (i=0; ; i++) { 3066 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3067 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3068 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 3069 if (!(events & np->irqmask)) 3070 break; 3071 3072 spin_lock_irqsave(&np->lock, flags); 3073 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3074 spin_unlock_irqrestore(&np->lock, flags); 3075 3076 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 3077 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3078 dev->name, events); 3079 } 3080 if (unlikely(i > max_interrupt_work)) { 3081 spin_lock_irqsave(&np->lock, flags); 3082 /* disable interrupts on the nic */ 3083 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 3084 pci_push(base); 3085 3086 if (!np->in_shutdown) { 3087 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; 3088 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3089 } 3090 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 3091 spin_unlock_irqrestore(&np->lock, flags); 3092 break; 3093 } 3094 3095 } 3096 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); 3097 3098 return IRQ_RETVAL(i); 3099} 3100 3101#ifdef CONFIG_FORCEDETH_NAPI 3102static int nv_napi_poll(struct net_device *dev, int *budget) 3103{ 3104 int pkts, limit = min(*budget, dev->quota); 3105 struct fe_priv *np = netdev_priv(dev); 3106 u8 __iomem *base = get_hwbase(dev); 3107 unsigned long flags; 3108 int retcode; 3109 3110 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3111 pkts = nv_rx_process(dev, limit); 3112 retcode = nv_alloc_rx(dev); 3113 } else { 3114 pkts = nv_rx_process_optimized(dev, limit); 3115 retcode = nv_alloc_rx_optimized(dev); 3116 } 3117 3118 if (retcode) { 3119 spin_lock_irqsave(&np->lock, flags); 3120 if (!np->in_shutdown) 3121 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3122 spin_unlock_irqrestore(&np->lock, flags); 3123 } 3124 3125 if (pkts < limit) { 3126 /* all done, no more packets present */ 3127 netif_rx_complete(dev); 3128 3129 /* re-enable receive interrupts */ 3130 spin_lock_irqsave(&np->lock, flags); 3131 3132 np->irqmask |= NVREG_IRQ_RX_ALL; 3133 if (np->msi_flags & NV_MSI_X_ENABLED) 3134 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3135 else 3136 writel(np->irqmask, base + NvRegIrqMask); 3137 3138 spin_unlock_irqrestore(&np->lock, flags); 3139 return 0; 3140 } else { 3141 /* used up our quantum, so reschedule */ 3142 dev->quota -= pkts; 3143 *budget -= pkts; 3144 return 1; 3145 } 3146} 3147#endif 3148 3149#ifdef CONFIG_FORCEDETH_NAPI 3150static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3151{ 3152 struct net_device *dev = (struct net_device *) data; 3153 u8 __iomem *base = get_hwbase(dev); 3154 u32 events; 3155 3156 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3157 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3158 3159 if (events) { 3160 netif_rx_schedule(dev); 3161 /* disable receive interrupts on the nic */ 3162 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3163 pci_push(base); 3164 } 3165 return IRQ_HANDLED; 3166} 3167#else 3168static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3169{ 3170 struct net_device *dev = (struct net_device *) data; 3171 struct fe_priv *np = netdev_priv(dev); 3172 u8 __iomem *base = get_hwbase(dev); 3173 u32 events; 3174 int i; 3175 unsigned long flags; 3176 3177 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 3178 3179 for (i=0; ; i++) { 3180 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3181 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3182 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 3183 if (!(events & np->irqmask)) 3184 break; 3185 3186 if (nv_rx_process_optimized(dev, dev->weight)) { 3187 if (unlikely(nv_alloc_rx_optimized(dev))) { 3188 spin_lock_irqsave(&np->lock, flags); 3189 if (!np->in_shutdown) 3190 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3191 spin_unlock_irqrestore(&np->lock, flags); 3192 } 3193 } 3194 3195 if (unlikely(i > max_interrupt_work)) { 3196 spin_lock_irqsave(&np->lock, flags); 3197 /* disable interrupts on the nic */ 3198 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3199 pci_push(base); 3200 3201 if (!np->in_shutdown) { 3202 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; 3203 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3204 } 3205 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 3206 spin_unlock_irqrestore(&np->lock, flags); 3207 break; 3208 } 3209 } 3210 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 3211 3212 return IRQ_RETVAL(i); 3213} 3214#endif 3215 3216static irqreturn_t nv_nic_irq_other(int foo, void *data) 3217{ 3218 struct net_device *dev = (struct net_device *) data; 3219 struct fe_priv *np = netdev_priv(dev); 3220 u8 __iomem *base = get_hwbase(dev); 3221 u32 events; 3222 int i; 3223 unsigned long flags; 3224 3225 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 3226 3227 for (i=0; ; i++) { 3228 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3229 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3230 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3231 if (!(events & np->irqmask)) 3232 break; 3233 3234 /* check tx in case we reached max loop limit in tx isr */ 3235 spin_lock_irqsave(&np->lock, flags); 3236 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3237 spin_unlock_irqrestore(&np->lock, flags); 3238 3239 if (events & NVREG_IRQ_LINK) { 3240 spin_lock_irqsave(&np->lock, flags); 3241 nv_link_irq(dev); 3242 spin_unlock_irqrestore(&np->lock, flags); 3243 } 3244 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 3245 spin_lock_irqsave(&np->lock, flags); 3246 nv_linkchange(dev); 3247 spin_unlock_irqrestore(&np->lock, flags); 3248 np->link_timeout = jiffies + LINK_TIMEOUT; 3249 } 3250 if (events & NVREG_IRQ_RECOVER_ERROR) { 3251 spin_lock_irq(&np->lock); 3252 /* disable interrupts on the nic */ 3253 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3254 pci_push(base); 3255 3256 if (!np->in_shutdown) { 3257 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3258 np->recover_error = 1; 3259 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3260 } 3261 spin_unlock_irq(&np->lock); 3262 break; 3263 } 3264 if (events & (NVREG_IRQ_UNKNOWN)) { 3265 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3266 dev->name, events); 3267 } 3268 if (unlikely(i > max_interrupt_work)) { 3269 spin_lock_irqsave(&np->lock, flags); 3270 /* disable interrupts on the nic */ 3271 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3272 pci_push(base); 3273 3274 if (!np->in_shutdown) { 3275 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3276 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3277 } 3278 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 3279 spin_unlock_irqrestore(&np->lock, flags); 3280 break; 3281 } 3282 3283 } 3284 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); 3285 3286 return IRQ_RETVAL(i); 3287} 3288 3289static irqreturn_t nv_nic_irq_test(int foo, void *data) 3290{ 3291 struct net_device *dev = (struct net_device *) data; 3292 struct fe_priv *np = netdev_priv(dev); 3293 u8 __iomem *base = get_hwbase(dev); 3294 u32 events; 3295 3296 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); 3297 3298 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3299 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3300 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3301 } else { 3302 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3303 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3304 } 3305 pci_push(base); 3306 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3307 if (!(events & NVREG_IRQ_TIMER)) 3308 return IRQ_RETVAL(0); 3309 3310 spin_lock(&np->lock); 3311 np->intr_test = 1; 3312 spin_unlock(&np->lock); 3313 3314 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); 3315 3316 return IRQ_RETVAL(1); 3317} 3318 3319static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) 3320{ 3321 u8 __iomem *base = get_hwbase(dev); 3322 int i; 3323 u32 msixmap = 0; 3324 3325 /* Each interrupt bit can be mapped to a MSIX vector (4 bits). 3326 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents 3327 * the remaining 8 interrupts. 3328 */ 3329 for (i = 0; i < 8; i++) { 3330 if ((irqmask >> i) & 0x1) { 3331 msixmap |= vector << (i << 2); 3332 } 3333 } 3334 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3335 3336 msixmap = 0; 3337 for (i = 0; i < 8; i++) { 3338 if ((irqmask >> (i + 8)) & 0x1) { 3339 msixmap |= vector << (i << 2); 3340 } 3341 } 3342 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3343} 3344 3345static int nv_request_irq(struct net_device *dev, int intr_test) 3346{ 3347 struct fe_priv *np = get_nvpriv(dev); 3348 u8 __iomem *base = get_hwbase(dev); 3349 int ret = 1; 3350 int i; 3351 irqreturn_t (*handler)(int foo, void *data); 3352 3353 if (intr_test) { 3354 handler = nv_nic_irq_test; 3355 } else { 3356 if (np->desc_ver == DESC_VER_3) 3357 handler = nv_nic_irq_optimized; 3358 else 3359 handler = nv_nic_irq; 3360 } 3361 3362 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3363 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3364 np->msi_x_entry[i].entry = i; 3365 } 3366 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 3367 np->msi_flags |= NV_MSI_X_ENABLED; 3368 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3369 /* Request irq for rx handling */ 3370 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) { 3371 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 3372 pci_disable_msix(np->pci_dev); 3373 np->msi_flags &= ~NV_MSI_X_ENABLED; 3374 goto out_err; 3375 } 3376 /* Request irq for tx handling */ 3377 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) { 3378 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 3379 pci_disable_msix(np->pci_dev); 3380 np->msi_flags &= ~NV_MSI_X_ENABLED; 3381 goto out_free_rx; 3382 } 3383 /* Request irq for link and timer handling */ 3384 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) { 3385 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 3386 pci_disable_msix(np->pci_dev); 3387 np->msi_flags &= ~NV_MSI_X_ENABLED; 3388 goto out_free_tx; 3389 } 3390 /* map interrupts to their respective vector */ 3391 writel(0, base + NvRegMSIXMap0); 3392 writel(0, base + NvRegMSIXMap1); 3393 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); 3394 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); 3395 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 3396 } else { 3397 /* Request irq for all interrupts */ 3398 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { 3399 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3400 pci_disable_msix(np->pci_dev); 3401 np->msi_flags &= ~NV_MSI_X_ENABLED; 3402 goto out_err; 3403 } 3404 3405 /* map interrupts to vector 0 */ 3406 writel(0, base + NvRegMSIXMap0); 3407 writel(0, base + NvRegMSIXMap1); 3408 } 3409 } 3410 } 3411 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3412 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3413 np->msi_flags |= NV_MSI_ENABLED; 3414 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3415 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3416 pci_disable_msi(np->pci_dev); 3417 np->msi_flags &= ~NV_MSI_ENABLED; 3418 goto out_err; 3419 } 3420 3421 /* map interrupts to vector 0 */ 3422 writel(0, base + NvRegMSIMap0); 3423 writel(0, base + NvRegMSIMap1); 3424 /* enable msi vector 0 */ 3425 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 3426 } 3427 } 3428 if (ret != 0) { 3429 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) 3430 goto out_err; 3431 3432 } 3433 3434 return 0; 3435out_free_tx: 3436 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); 3437out_free_rx: 3438 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); 3439out_err: 3440 return 1; 3441} 3442 3443static void nv_free_irq(struct net_device *dev) 3444{ 3445 struct fe_priv *np = get_nvpriv(dev); 3446 int i; 3447 3448 if (np->msi_flags & NV_MSI_X_ENABLED) { 3449 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3450 free_irq(np->msi_x_entry[i].vector, dev); 3451 } 3452 pci_disable_msix(np->pci_dev); 3453 np->msi_flags &= ~NV_MSI_X_ENABLED; 3454 } else { 3455 free_irq(np->pci_dev->irq, dev); 3456 if (np->msi_flags & NV_MSI_ENABLED) { 3457 pci_disable_msi(np->pci_dev); 3458 np->msi_flags &= ~NV_MSI_ENABLED; 3459 } 3460 } 3461} 3462 3463static void nv_do_nic_poll(unsigned long data) 3464{ 3465 struct net_device *dev = (struct net_device *) data; 3466 struct fe_priv *np = netdev_priv(dev); 3467 u8 __iomem *base = get_hwbase(dev); 3468 u32 mask = 0; 3469 3470 /* 3471 * First disable irq(s) and then 3472 * reenable interrupts on the nic, we have to do this before calling 3473 * nv_nic_irq because that may decide to do otherwise 3474 */ 3475 3476 if (!using_multi_irqs(dev)) { 3477 if (np->msi_flags & NV_MSI_X_ENABLED) 3478 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 3479 else 3480 disable_irq_lockdep(dev->irq); 3481 mask = np->irqmask; 3482 } else { 3483 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 3484 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 3485 mask |= NVREG_IRQ_RX_ALL; 3486 } 3487 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 3488 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 3489 mask |= NVREG_IRQ_TX_ALL; 3490 } 3491 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 3492 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 3493 mask |= NVREG_IRQ_OTHER; 3494 } 3495 } 3496 np->nic_poll_irq = 0; 3497 3498 if (np->recover_error) { 3499 np->recover_error = 0; 3500 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n"); 3501 if (netif_running(dev)) { 3502 netif_tx_lock_bh(dev); 3503 spin_lock(&np->lock); 3504 /* stop engines */ 3505 nv_stop_rx(dev); 3506 nv_stop_tx(dev); 3507 nv_txrx_reset(dev); 3508 /* drain rx queue */ 3509 nv_drain_rx(dev); 3510 nv_drain_tx(dev); 3511 /* reinit driver view of the rx queue */ 3512 set_bufsize(dev); 3513 if (nv_init_ring(dev)) { 3514 if (!np->in_shutdown) 3515 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3516 } 3517 /* reinit nic view of the rx queue */ 3518 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3519 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3520 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3521 base + NvRegRingSizes); 3522 pci_push(base); 3523 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3524 pci_push(base); 3525 3526 /* restart rx engine */ 3527 nv_start_rx(dev); 3528 nv_start_tx(dev); 3529 spin_unlock(&np->lock); 3530 netif_tx_unlock_bh(dev); 3531 } 3532 } 3533 3534 /* FIXME: Do we need synchronize_irq(dev->irq) here? */ 3535 3536 writel(mask, base + NvRegIrqMask); 3537 pci_push(base); 3538 3539 if (!using_multi_irqs(dev)) { 3540 if (np->desc_ver == DESC_VER_3) 3541 nv_nic_irq_optimized(0, dev); 3542 else 3543 nv_nic_irq(0, dev); 3544 if (np->msi_flags & NV_MSI_X_ENABLED) 3545 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 3546 else 3547 enable_irq_lockdep(dev->irq); 3548 } else { 3549 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 3550 nv_nic_irq_rx(0, dev); 3551 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 3552 } 3553 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 3554 nv_nic_irq_tx(0, dev); 3555 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 3556 } 3557 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 3558 nv_nic_irq_other(0, dev); 3559 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 3560 } 3561 } 3562} 3563 3564#ifdef CONFIG_NET_POLL_CONTROLLER 3565static void nv_poll_controller(struct net_device *dev) 3566{ 3567 nv_do_nic_poll((unsigned long) dev); 3568} 3569#endif 3570 3571static void nv_do_stats_poll(unsigned long data) 3572{ 3573 struct net_device *dev = (struct net_device *) data; 3574 struct fe_priv *np = netdev_priv(dev); 3575 3576 nv_get_hw_stats(dev); 3577 3578 if (!np->in_shutdown) 3579 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 3580} 3581 3582static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 3583{ 3584 struct fe_priv *np = netdev_priv(dev); 3585 strcpy(info->driver, "forcedeth"); 3586 strcpy(info->version, FORCEDETH_VERSION); 3587 strcpy(info->bus_info, pci_name(np->pci_dev)); 3588} 3589 3590static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 3591{ 3592 struct fe_priv *np = netdev_priv(dev); 3593 wolinfo->supported = WAKE_MAGIC; 3594 3595 spin_lock_irq(&np->lock); 3596 if (np->wolenabled) 3597 wolinfo->wolopts = WAKE_MAGIC; 3598 spin_unlock_irq(&np->lock); 3599} 3600 3601static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 3602{ 3603 struct fe_priv *np = netdev_priv(dev); 3604 u8 __iomem *base = get_hwbase(dev); 3605 u32 flags = 0; 3606 3607 if (wolinfo->wolopts == 0) { 3608 np->wolenabled = 0; 3609 } else if (wolinfo->wolopts & WAKE_MAGIC) { 3610 np->wolenabled = 1; 3611 flags = NVREG_WAKEUPFLAGS_ENABLE; 3612 } 3613 if (netif_running(dev)) { 3614 spin_lock_irq(&np->lock); 3615 writel(flags, base + NvRegWakeUpFlags); 3616 spin_unlock_irq(&np->lock); 3617 } 3618 return 0; 3619} 3620 3621static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 3622{ 3623 struct fe_priv *np = netdev_priv(dev); 3624 int adv; 3625 3626 spin_lock_irq(&np->lock); 3627 ecmd->port = PORT_MII; 3628 if (!netif_running(dev)) { 3629 /* We do not track link speed / duplex setting if the 3630 * interface is disabled. Force a link check */ 3631 if (nv_update_linkspeed(dev)) { 3632 if (!netif_carrier_ok(dev)) 3633 netif_carrier_on(dev); 3634 } else { 3635 if (netif_carrier_ok(dev)) 3636 netif_carrier_off(dev); 3637 } 3638 } 3639 3640 if (netif_carrier_ok(dev)) { 3641 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 3642 case NVREG_LINKSPEED_10: 3643 ecmd->speed = SPEED_10; 3644 break; 3645 case NVREG_LINKSPEED_100: 3646 ecmd->speed = SPEED_100; 3647 break; 3648 case NVREG_LINKSPEED_1000: 3649 ecmd->speed = SPEED_1000; 3650 break; 3651 } 3652 ecmd->duplex = DUPLEX_HALF; 3653 if (np->duplex) 3654 ecmd->duplex = DUPLEX_FULL; 3655 } else { 3656 ecmd->speed = -1; 3657 ecmd->duplex = -1; 3658 } 3659 3660 ecmd->autoneg = np->autoneg; 3661 3662 ecmd->advertising = ADVERTISED_MII; 3663 if (np->autoneg) { 3664 ecmd->advertising |= ADVERTISED_Autoneg; 3665 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3666 if (adv & ADVERTISE_10HALF) 3667 ecmd->advertising |= ADVERTISED_10baseT_Half; 3668 if (adv & ADVERTISE_10FULL) 3669 ecmd->advertising |= ADVERTISED_10baseT_Full; 3670 if (adv & ADVERTISE_100HALF) 3671 ecmd->advertising |= ADVERTISED_100baseT_Half; 3672 if (adv & ADVERTISE_100FULL) 3673 ecmd->advertising |= ADVERTISED_100baseT_Full; 3674 if (np->gigabit == PHY_GIGABIT) { 3675 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3676 if (adv & ADVERTISE_1000FULL) 3677 ecmd->advertising |= ADVERTISED_1000baseT_Full; 3678 } 3679 } 3680 ecmd->supported = (SUPPORTED_Autoneg | 3681 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 3682 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 3683 SUPPORTED_MII); 3684 if (np->gigabit == PHY_GIGABIT) 3685 ecmd->supported |= SUPPORTED_1000baseT_Full; 3686 3687 ecmd->phy_address = np->phyaddr; 3688 ecmd->transceiver = XCVR_EXTERNAL; 3689 3690 /* ignore maxtxpkt, maxrxpkt for now */ 3691 spin_unlock_irq(&np->lock); 3692 return 0; 3693} 3694 3695static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 3696{ 3697 struct fe_priv *np = netdev_priv(dev); 3698 3699 if (ecmd->port != PORT_MII) 3700 return -EINVAL; 3701 if (ecmd->transceiver != XCVR_EXTERNAL) 3702 return -EINVAL; 3703 if (ecmd->phy_address != np->phyaddr) { 3704 /* TODO: support switching between multiple phys. Should be 3705 * trivial, but not enabled due to lack of test hardware. */ 3706 return -EINVAL; 3707 } 3708 if (ecmd->autoneg == AUTONEG_ENABLE) { 3709 u32 mask; 3710 3711 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 3712 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 3713 if (np->gigabit == PHY_GIGABIT) 3714 mask |= ADVERTISED_1000baseT_Full; 3715 3716 if ((ecmd->advertising & mask) == 0) 3717 return -EINVAL; 3718 3719 } else if (ecmd->autoneg == AUTONEG_DISABLE) { 3720 /* Note: autonegotiation disable, speed 1000 intentionally 3721 * forbidden - noone should need that. */ 3722 3723 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) 3724 return -EINVAL; 3725 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 3726 return -EINVAL; 3727 } else { 3728 return -EINVAL; 3729 } 3730 3731 netif_carrier_off(dev); 3732 if (netif_running(dev)) { 3733 nv_disable_irq(dev); 3734 netif_tx_lock_bh(dev); 3735 spin_lock(&np->lock); 3736 /* stop engines */ 3737 nv_stop_rx(dev); 3738 nv_stop_tx(dev); 3739 spin_unlock(&np->lock); 3740 netif_tx_unlock_bh(dev); 3741 } 3742 3743 if (ecmd->autoneg == AUTONEG_ENABLE) { 3744 int adv, bmcr; 3745 3746 np->autoneg = 1; 3747 3748 /* advertise only what has been requested */ 3749 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3750 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3751 if (ecmd->advertising & ADVERTISED_10baseT_Half) 3752 adv |= ADVERTISE_10HALF; 3753 if (ecmd->advertising & ADVERTISED_10baseT_Full) 3754 adv |= ADVERTISE_10FULL; 3755 if (ecmd->advertising & ADVERTISED_100baseT_Half) 3756 adv |= ADVERTISE_100HALF; 3757 if (ecmd->advertising & ADVERTISED_100baseT_Full) 3758 adv |= ADVERTISE_100FULL; 3759 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 3760 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 3761 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3762 adv |= ADVERTISE_PAUSE_ASYM; 3763 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 3764 3765 if (np->gigabit == PHY_GIGABIT) { 3766 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3767 adv &= ~ADVERTISE_1000FULL; 3768 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 3769 adv |= ADVERTISE_1000FULL; 3770 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 3771 } 3772 3773 if (netif_running(dev)) 3774 printk(KERN_INFO "%s: link down.\n", dev->name); 3775 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3776 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 3777 bmcr |= BMCR_ANENABLE; 3778 /* reset the phy in order for settings to stick, 3779 * and cause autoneg to start */ 3780 if (phy_reset(dev, bmcr)) { 3781 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 3782 return -EINVAL; 3783 } 3784 } else { 3785 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 3786 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3787 } 3788 } else { 3789 int adv, bmcr; 3790 3791 np->autoneg = 0; 3792 3793 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3794 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3795 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) 3796 adv |= ADVERTISE_10HALF; 3797 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) 3798 adv |= ADVERTISE_10FULL; 3799 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) 3800 adv |= ADVERTISE_100HALF; 3801 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) 3802 adv |= ADVERTISE_100FULL; 3803 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 3804 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ 3805 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 3806 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3807 } 3808 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { 3809 adv |= ADVERTISE_PAUSE_ASYM; 3810 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3811 } 3812 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 3813 np->fixed_mode = adv; 3814 3815 if (np->gigabit == PHY_GIGABIT) { 3816 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3817 adv &= ~ADVERTISE_1000FULL; 3818 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 3819 } 3820 3821 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3822 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); 3823 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) 3824 bmcr |= BMCR_FULLDPLX; 3825 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 3826 bmcr |= BMCR_SPEED100; 3827 if (np->phy_oui == PHY_OUI_MARVELL) { 3828 /* reset the phy in order for forced mode settings to stick */ 3829 if (phy_reset(dev, bmcr)) { 3830 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 3831 return -EINVAL; 3832 } 3833 } else { 3834 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3835 if (netif_running(dev)) { 3836 /* Wait a bit and then reconfigure the nic. */ 3837 udelay(10); 3838 nv_linkchange(dev); 3839 } 3840 } 3841 } 3842 3843 if (netif_running(dev)) { 3844 nv_start_rx(dev); 3845 nv_start_tx(dev); 3846 nv_enable_irq(dev); 3847 } 3848 3849 return 0; 3850} 3851 3852#define FORCEDETH_REGS_VER 1 3853 3854static int nv_get_regs_len(struct net_device *dev) 3855{ 3856 struct fe_priv *np = netdev_priv(dev); 3857 return np->register_size; 3858} 3859 3860static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 3861{ 3862 struct fe_priv *np = netdev_priv(dev); 3863 u8 __iomem *base = get_hwbase(dev); 3864 u32 *rbuf = buf; 3865 int i; 3866 3867 regs->version = FORCEDETH_REGS_VER; 3868 spin_lock_irq(&np->lock); 3869 for (i = 0;i <= np->register_size/sizeof(u32); i++) 3870 rbuf[i] = readl(base + i*sizeof(u32)); 3871 spin_unlock_irq(&np->lock); 3872} 3873 3874static int nv_nway_reset(struct net_device *dev) 3875{ 3876 struct fe_priv *np = netdev_priv(dev); 3877 int ret; 3878 3879 if (np->autoneg) { 3880 int bmcr; 3881 3882 netif_carrier_off(dev); 3883 if (netif_running(dev)) { 3884 nv_disable_irq(dev); 3885 netif_tx_lock_bh(dev); 3886 spin_lock(&np->lock); 3887 /* stop engines */ 3888 nv_stop_rx(dev); 3889 nv_stop_tx(dev); 3890 spin_unlock(&np->lock); 3891 netif_tx_unlock_bh(dev); 3892 printk(KERN_INFO "%s: link down.\n", dev->name); 3893 } 3894 3895 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3896 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 3897 bmcr |= BMCR_ANENABLE; 3898 /* reset the phy in order for settings to stick*/ 3899 if (phy_reset(dev, bmcr)) { 3900 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 3901 return -EINVAL; 3902 } 3903 } else { 3904 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 3905 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3906 } 3907 3908 if (netif_running(dev)) { 3909 nv_start_rx(dev); 3910 nv_start_tx(dev); 3911 nv_enable_irq(dev); 3912 } 3913 ret = 0; 3914 } else { 3915 ret = -EINVAL; 3916 } 3917 3918 return ret; 3919} 3920 3921static int nv_set_tso(struct net_device *dev, u32 value) 3922{ 3923 struct fe_priv *np = netdev_priv(dev); 3924 3925 if ((np->driver_data & DEV_HAS_CHECKSUM)) 3926 return ethtool_op_set_tso(dev, value); 3927 else 3928 return -EOPNOTSUPP; 3929} 3930 3931static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 3932{ 3933 struct fe_priv *np = netdev_priv(dev); 3934 3935 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 3936 ring->rx_mini_max_pending = 0; 3937 ring->rx_jumbo_max_pending = 0; 3938 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 3939 3940 ring->rx_pending = np->rx_ring_size; 3941 ring->rx_mini_pending = 0; 3942 ring->rx_jumbo_pending = 0; 3943 ring->tx_pending = np->tx_ring_size; 3944} 3945 3946static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 3947{ 3948 struct fe_priv *np = netdev_priv(dev); 3949 u8 __iomem *base = get_hwbase(dev); 3950 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; 3951 dma_addr_t ring_addr; 3952 3953 if (ring->rx_pending < RX_RING_MIN || 3954 ring->tx_pending < TX_RING_MIN || 3955 ring->rx_mini_pending != 0 || 3956 ring->rx_jumbo_pending != 0 || 3957 (np->desc_ver == DESC_VER_1 && 3958 (ring->rx_pending > RING_MAX_DESC_VER_1 || 3959 ring->tx_pending > RING_MAX_DESC_VER_1)) || 3960 (np->desc_ver != DESC_VER_1 && 3961 (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 3962 ring->tx_pending > RING_MAX_DESC_VER_2_3))) { 3963 return -EINVAL; 3964 } 3965 3966 /* allocate new rings */ 3967 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3968 rxtx_ring = pci_alloc_consistent(np->pci_dev, 3969 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 3970 &ring_addr); 3971 } else { 3972 rxtx_ring = pci_alloc_consistent(np->pci_dev, 3973 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 3974 &ring_addr); 3975 } 3976 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); 3977 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); 3978 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { 3979 /* fall back to old rings */ 3980 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3981 if (rxtx_ring) 3982 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 3983 rxtx_ring, ring_addr); 3984 } else { 3985 if (rxtx_ring) 3986 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 3987 rxtx_ring, ring_addr); 3988 } 3989 if (rx_skbuff) 3990 kfree(rx_skbuff); 3991 if (tx_skbuff) 3992 kfree(tx_skbuff); 3993 goto exit; 3994 } 3995 3996 if (netif_running(dev)) { 3997 nv_disable_irq(dev); 3998 netif_tx_lock_bh(dev); 3999 spin_lock(&np->lock); 4000 /* stop engines */ 4001 nv_stop_rx(dev); 4002 nv_stop_tx(dev); 4003 nv_txrx_reset(dev); 4004 /* drain queues */ 4005 nv_drain_rx(dev); 4006 nv_drain_tx(dev); 4007 /* delete queues */ 4008 free_rings(dev); 4009 } 4010 4011 /* set new values */ 4012 np->rx_ring_size = ring->rx_pending; 4013 np->tx_ring_size = ring->tx_pending; 4014 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4015 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4016 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4017 } else { 4018 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4019 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4020 } 4021 np->rx_skb = (struct nv_skb_map*)rx_skbuff; 4022 np->tx_skb = (struct nv_skb_map*)tx_skbuff; 4023 np->ring_addr = ring_addr; 4024 4025 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4026 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); 4027 4028 if (netif_running(dev)) { 4029 /* reinit driver view of the queues */ 4030 set_bufsize(dev); 4031 if (nv_init_ring(dev)) { 4032 if (!np->in_shutdown) 4033 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4034 } 4035 4036 /* reinit nic view of the queues */ 4037 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4038 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4039 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4040 base + NvRegRingSizes); 4041 pci_push(base); 4042 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4043 pci_push(base); 4044 4045 /* restart engines */ 4046 nv_start_rx(dev); 4047 nv_start_tx(dev); 4048 spin_unlock(&np->lock); 4049 netif_tx_unlock_bh(dev); 4050 nv_enable_irq(dev); 4051 } 4052 return 0; 4053exit: 4054 return -ENOMEM; 4055} 4056 4057static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4058{ 4059 struct fe_priv *np = netdev_priv(dev); 4060 4061 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; 4062 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; 4063 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; 4064} 4065 4066static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4067{ 4068 struct fe_priv *np = netdev_priv(dev); 4069 int adv, bmcr; 4070 4071 if ((!np->autoneg && np->duplex == 0) || 4072 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 4073 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 4074 dev->name); 4075 return -EINVAL; 4076 } 4077 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 4078 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); 4079 return -EINVAL; 4080 } 4081 4082 netif_carrier_off(dev); 4083 if (netif_running(dev)) { 4084 nv_disable_irq(dev); 4085 netif_tx_lock_bh(dev); 4086 spin_lock(&np->lock); 4087 /* stop engines */ 4088 nv_stop_rx(dev); 4089 nv_stop_tx(dev); 4090 spin_unlock(&np->lock); 4091 netif_tx_unlock_bh(dev); 4092 } 4093 4094 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); 4095 if (pause->rx_pause) 4096 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; 4097 if (pause->tx_pause) 4098 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; 4099 4100 if (np->autoneg && pause->autoneg) { 4101 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; 4102 4103 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4104 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4105 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 4106 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4107 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4108 adv |= ADVERTISE_PAUSE_ASYM; 4109 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4110 4111 if (netif_running(dev)) 4112 printk(KERN_INFO "%s: link down.\n", dev->name); 4113 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4114 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4115 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4116 } else { 4117 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4118 if (pause->rx_pause) 4119 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4120 if (pause->tx_pause) 4121 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4122 4123 if (!netif_running(dev)) 4124 nv_update_linkspeed(dev); 4125 else 4126 nv_update_pause(dev, np->pause_flags); 4127 } 4128 4129 if (netif_running(dev)) { 4130 nv_start_rx(dev); 4131 nv_start_tx(dev); 4132 nv_enable_irq(dev); 4133 } 4134 return 0; 4135} 4136 4137static u32 nv_get_rx_csum(struct net_device *dev) 4138{ 4139 struct fe_priv *np = netdev_priv(dev); 4140 return (np->rx_csum) != 0; 4141} 4142 4143static int nv_set_rx_csum(struct net_device *dev, u32 data) 4144{ 4145 struct fe_priv *np = netdev_priv(dev); 4146 u8 __iomem *base = get_hwbase(dev); 4147 int retcode = 0; 4148 4149 if (np->driver_data & DEV_HAS_CHECKSUM) { 4150 if (data) { 4151 np->rx_csum = 1; 4152 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4153 } else { 4154 np->rx_csum = 0; 4155 /* vlan is dependent on rx checksum offload */ 4156 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) 4157 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; 4158 } 4159 if (netif_running(dev)) { 4160 spin_lock_irq(&np->lock); 4161 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4162 spin_unlock_irq(&np->lock); 4163 } 4164 } else { 4165 return -EINVAL; 4166 } 4167 4168 return retcode; 4169} 4170 4171static int nv_set_tx_csum(struct net_device *dev, u32 data) 4172{ 4173 struct fe_priv *np = netdev_priv(dev); 4174 4175 if (np->driver_data & DEV_HAS_CHECKSUM) 4176 return ethtool_op_set_tx_hw_csum(dev, data); 4177 else 4178 return -EOPNOTSUPP; 4179} 4180 4181static int nv_set_sg(struct net_device *dev, u32 data) 4182{ 4183 struct fe_priv *np = netdev_priv(dev); 4184 4185 if (np->driver_data & DEV_HAS_CHECKSUM) 4186 return ethtool_op_set_sg(dev, data); 4187 else 4188 return -EOPNOTSUPP; 4189} 4190 4191static int nv_get_stats_count(struct net_device *dev) 4192{ 4193 struct fe_priv *np = netdev_priv(dev); 4194 4195 if (np->driver_data & DEV_HAS_STATISTICS_V1) 4196 return NV_DEV_STATISTICS_V1_COUNT; 4197 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4198 return NV_DEV_STATISTICS_V2_COUNT; 4199 else 4200 return 0; 4201} 4202 4203static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) 4204{ 4205 struct fe_priv *np = netdev_priv(dev); 4206 4207 /* update stats */ 4208 nv_do_stats_poll((unsigned long)dev); 4209 4210 memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64)); 4211} 4212 4213static int nv_self_test_count(struct net_device *dev) 4214{ 4215 struct fe_priv *np = netdev_priv(dev); 4216 4217 if (np->driver_data & DEV_HAS_TEST_EXTENDED) 4218 return NV_TEST_COUNT_EXTENDED; 4219 else 4220 return NV_TEST_COUNT_BASE; 4221} 4222 4223static int nv_link_test(struct net_device *dev) 4224{ 4225 struct fe_priv *np = netdev_priv(dev); 4226 int mii_status; 4227 4228 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4229 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4230 4231 /* check phy link status */ 4232 if (!(mii_status & BMSR_LSTATUS)) 4233 return 0; 4234 else 4235 return 1; 4236} 4237 4238static int nv_register_test(struct net_device *dev) 4239{ 4240 u8 __iomem *base = get_hwbase(dev); 4241 int i = 0; 4242 u32 orig_read, new_read; 4243 4244 do { 4245 orig_read = readl(base + nv_registers_test[i].reg); 4246 4247 /* xor with mask to toggle bits */ 4248 orig_read ^= nv_registers_test[i].mask; 4249 4250 writel(orig_read, base + nv_registers_test[i].reg); 4251 4252 new_read = readl(base + nv_registers_test[i].reg); 4253 4254 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) 4255 return 0; 4256 4257 /* restore original value */ 4258 orig_read ^= nv_registers_test[i].mask; 4259 writel(orig_read, base + nv_registers_test[i].reg); 4260 4261 } while (nv_registers_test[++i].reg != 0); 4262 4263 return 1; 4264} 4265 4266static int nv_interrupt_test(struct net_device *dev) 4267{ 4268 struct fe_priv *np = netdev_priv(dev); 4269 u8 __iomem *base = get_hwbase(dev); 4270 int ret = 1; 4271 int testcnt; 4272 u32 save_msi_flags, save_poll_interval = 0; 4273 4274 if (netif_running(dev)) { 4275 /* free current irq */ 4276 nv_free_irq(dev); 4277 save_poll_interval = readl(base+NvRegPollingInterval); 4278 } 4279 4280 /* flag to test interrupt handler */ 4281 np->intr_test = 0; 4282 4283 /* setup test irq */ 4284 save_msi_flags = np->msi_flags; 4285 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; 4286 np->msi_flags |= 0x001; /* setup 1 vector */ 4287 if (nv_request_irq(dev, 1)) 4288 return 0; 4289 4290 /* setup timer interrupt */ 4291 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 4292 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4293 4294 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4295 4296 /* wait for at least one interrupt */ 4297 msleep(100); 4298 4299 spin_lock_irq(&np->lock); 4300 4301 /* flag should be set within ISR */ 4302 testcnt = np->intr_test; 4303 if (!testcnt) 4304 ret = 2; 4305 4306 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4307 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 4308 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4309 else 4310 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4311 4312 spin_unlock_irq(&np->lock); 4313 4314 nv_free_irq(dev); 4315 4316 np->msi_flags = save_msi_flags; 4317 4318 if (netif_running(dev)) { 4319 writel(save_poll_interval, base + NvRegPollingInterval); 4320 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4321 /* restore original irq */ 4322 if (nv_request_irq(dev, 0)) 4323 return 0; 4324 } 4325 4326 return ret; 4327} 4328 4329static int nv_loopback_test(struct net_device *dev) 4330{ 4331 struct fe_priv *np = netdev_priv(dev); 4332 u8 __iomem *base = get_hwbase(dev); 4333 struct sk_buff *tx_skb, *rx_skb; 4334 dma_addr_t test_dma_addr; 4335 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 4336 u32 flags; 4337 int len, i, pkt_len; 4338 u8 *pkt_data; 4339 u32 filter_flags = 0; 4340 u32 misc1_flags = 0; 4341 int ret = 1; 4342 4343 if (netif_running(dev)) { 4344 nv_disable_irq(dev); 4345 filter_flags = readl(base + NvRegPacketFilterFlags); 4346 misc1_flags = readl(base + NvRegMisc1); 4347 } else { 4348 nv_txrx_reset(dev); 4349 } 4350 4351 /* reinit driver view of the rx queue */ 4352 set_bufsize(dev); 4353 nv_init_ring(dev); 4354 4355 /* setup hardware for loopback */ 4356 writel(NVREG_MISC1_FORCE, base + NvRegMisc1); 4357 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); 4358 4359 /* reinit nic view of the rx queue */ 4360 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4361 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4362 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4363 base + NvRegRingSizes); 4364 pci_push(base); 4365 4366 /* restart rx engine */ 4367 nv_start_rx(dev); 4368 nv_start_tx(dev); 4369 4370 /* setup packet for tx */ 4371 pkt_len = ETH_DATA_LEN; 4372 tx_skb = dev_alloc_skb(pkt_len); 4373 if (!tx_skb) { 4374 printk(KERN_ERR "dev_alloc_skb() failed during loopback test" 4375 " of %s\n", dev->name); 4376 ret = 0; 4377 goto out; 4378 } 4379 pkt_data = skb_put(tx_skb, pkt_len); 4380 for (i = 0; i < pkt_len; i++) 4381 pkt_data[i] = (u8)(i & 0xff); 4382 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, 4383 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); 4384 4385 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4386 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 4387 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4388 } else { 4389 np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32; 4390 np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; 4391 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4392 } 4393 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4394 pci_push(get_hwbase(dev)); 4395 4396 msleep(500); 4397 4398 /* check for rx of the packet */ 4399 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4400 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); 4401 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 4402 4403 } else { 4404 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); 4405 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 4406 } 4407 4408 if (flags & NV_RX_AVAIL) { 4409 ret = 0; 4410 } else if (np->desc_ver == DESC_VER_1) { 4411 if (flags & NV_RX_ERROR) 4412 ret = 0; 4413 } else { 4414 if (flags & NV_RX2_ERROR) { 4415 ret = 0; 4416 } 4417 } 4418 4419 if (ret) { 4420 if (len != pkt_len) { 4421 ret = 0; 4422 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 4423 dev->name, len, pkt_len); 4424 } else { 4425 rx_skb = np->rx_skb[0].skb; 4426 for (i = 0; i < pkt_len; i++) { 4427 if (rx_skb->data[i] != (u8)(i & 0xff)) { 4428 ret = 0; 4429 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", 4430 dev->name, i); 4431 break; 4432 } 4433 } 4434 } 4435 } else { 4436 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); 4437 } 4438 4439 pci_unmap_page(np->pci_dev, test_dma_addr, 4440 tx_skb->end-tx_skb->data, 4441 PCI_DMA_TODEVICE); 4442 dev_kfree_skb_any(tx_skb); 4443 out: 4444 /* stop engines */ 4445 nv_stop_rx(dev); 4446 nv_stop_tx(dev); 4447 nv_txrx_reset(dev); 4448 /* drain rx queue */ 4449 nv_drain_rx(dev); 4450 nv_drain_tx(dev); 4451 4452 if (netif_running(dev)) { 4453 writel(misc1_flags, base + NvRegMisc1); 4454 writel(filter_flags, base + NvRegPacketFilterFlags); 4455 nv_enable_irq(dev); 4456 } 4457 4458 return ret; 4459} 4460 4461static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) 4462{ 4463 struct fe_priv *np = netdev_priv(dev); 4464 u8 __iomem *base = get_hwbase(dev); 4465 int result; 4466 memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64)); 4467 4468 if (!nv_link_test(dev)) { 4469 test->flags |= ETH_TEST_FL_FAILED; 4470 buffer[0] = 1; 4471 } 4472 4473 if (test->flags & ETH_TEST_FL_OFFLINE) { 4474 if (netif_running(dev)) { 4475 netif_stop_queue(dev); 4476 netif_poll_disable(dev); 4477 netif_tx_lock_bh(dev); 4478 spin_lock_irq(&np->lock); 4479 nv_disable_hw_interrupts(dev, np->irqmask); 4480 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 4481 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4482 } else { 4483 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4484 } 4485 /* stop engines */ 4486 nv_stop_rx(dev); 4487 nv_stop_tx(dev); 4488 nv_txrx_reset(dev); 4489 /* drain rx queue */ 4490 nv_drain_rx(dev); 4491 nv_drain_tx(dev); 4492 spin_unlock_irq(&np->lock); 4493 netif_tx_unlock_bh(dev); 4494 } 4495 4496 if (!nv_register_test(dev)) { 4497 test->flags |= ETH_TEST_FL_FAILED; 4498 buffer[1] = 1; 4499 } 4500 4501 result = nv_interrupt_test(dev); 4502 if (result != 1) { 4503 test->flags |= ETH_TEST_FL_FAILED; 4504 buffer[2] = 1; 4505 } 4506 if (result == 0) { 4507 /* bail out */ 4508 return; 4509 } 4510 4511 if (!nv_loopback_test(dev)) { 4512 test->flags |= ETH_TEST_FL_FAILED; 4513 buffer[3] = 1; 4514 } 4515 4516 if (netif_running(dev)) { 4517 /* reinit driver view of the rx queue */ 4518 set_bufsize(dev); 4519 if (nv_init_ring(dev)) { 4520 if (!np->in_shutdown) 4521 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4522 } 4523 /* reinit nic view of the rx queue */ 4524 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4525 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4526 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4527 base + NvRegRingSizes); 4528 pci_push(base); 4529 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4530 pci_push(base); 4531 /* restart rx engine */ 4532 nv_start_rx(dev); 4533 nv_start_tx(dev); 4534 netif_start_queue(dev); 4535 netif_poll_enable(dev); 4536 nv_enable_hw_interrupts(dev, np->irqmask); 4537 } 4538 } 4539} 4540 4541static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) 4542{ 4543 switch (stringset) { 4544 case ETH_SS_STATS: 4545 memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str)); 4546 break; 4547 case ETH_SS_TEST: 4548 memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str)); 4549 break; 4550 } 4551} 4552 4553static const struct ethtool_ops ops = { 4554 .get_drvinfo = nv_get_drvinfo, 4555 .get_link = ethtool_op_get_link, 4556 .get_wol = nv_get_wol, 4557 .set_wol = nv_set_wol, 4558 .get_settings = nv_get_settings, 4559 .set_settings = nv_set_settings, 4560 .get_regs_len = nv_get_regs_len, 4561 .get_regs = nv_get_regs, 4562 .nway_reset = nv_nway_reset, 4563 .get_perm_addr = ethtool_op_get_perm_addr, 4564 .get_tso = ethtool_op_get_tso, 4565 .set_tso = nv_set_tso, 4566 .get_ringparam = nv_get_ringparam, 4567 .set_ringparam = nv_set_ringparam, 4568 .get_pauseparam = nv_get_pauseparam, 4569 .set_pauseparam = nv_set_pauseparam, 4570 .get_rx_csum = nv_get_rx_csum, 4571 .set_rx_csum = nv_set_rx_csum, 4572 .get_tx_csum = ethtool_op_get_tx_csum, 4573 .set_tx_csum = nv_set_tx_csum, 4574 .get_sg = ethtool_op_get_sg, 4575 .set_sg = nv_set_sg, 4576 .get_strings = nv_get_strings, 4577 .get_stats_count = nv_get_stats_count, 4578 .get_ethtool_stats = nv_get_ethtool_stats, 4579 .self_test_count = nv_self_test_count, 4580 .self_test = nv_self_test, 4581}; 4582 4583static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 4584{ 4585 struct fe_priv *np = get_nvpriv(dev); 4586 4587 spin_lock_irq(&np->lock); 4588 4589 /* save vlan group */ 4590 np->vlangrp = grp; 4591 4592 if (grp) { 4593 /* enable vlan on MAC */ 4594 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; 4595 } else { 4596 /* disable vlan on MAC */ 4597 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; 4598 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; 4599 } 4600 4601 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4602 4603 spin_unlock_irq(&np->lock); 4604}; 4605 4606static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 4607{ 4608 /* nothing to do */ 4609}; 4610 4611/* The mgmt unit and driver use a semaphore to access the phy during init */ 4612static int nv_mgmt_acquire_sema(struct net_device *dev) 4613{ 4614 u8 __iomem *base = get_hwbase(dev); 4615 int i; 4616 u32 tx_ctrl, mgmt_sema; 4617 4618 for (i = 0; i < 10; i++) { 4619 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; 4620 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) 4621 break; 4622 msleep(500); 4623 } 4624 4625 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) 4626 return 0; 4627 4628 for (i = 0; i < 2; i++) { 4629 tx_ctrl = readl(base + NvRegTransmitterControl); 4630 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; 4631 writel(tx_ctrl, base + NvRegTransmitterControl); 4632 4633 /* verify that semaphore was acquired */ 4634 tx_ctrl = readl(base + NvRegTransmitterControl); 4635 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && 4636 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) 4637 return 1; 4638 else 4639 udelay(50); 4640 } 4641 4642 return 0; 4643} 4644 4645static int nv_open(struct net_device *dev) 4646{ 4647 struct fe_priv *np = netdev_priv(dev); 4648 u8 __iomem *base = get_hwbase(dev); 4649 int ret = 1; 4650 int oom, i; 4651 4652 dprintk(KERN_DEBUG "nv_open: begin\n"); 4653 4654 /* erase previous misconfiguration */ 4655 if (np->driver_data & DEV_HAS_POWER_CNTRL) 4656 nv_mac_reset(dev); 4657 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 4658 writel(0, base + NvRegMulticastAddrB); 4659 writel(0, base + NvRegMulticastMaskA); 4660 writel(0, base + NvRegMulticastMaskB); 4661 writel(0, base + NvRegPacketFilterFlags); 4662 4663 writel(0, base + NvRegTransmitterControl); 4664 writel(0, base + NvRegReceiverControl); 4665 4666 writel(0, base + NvRegAdapterControl); 4667 4668 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 4669 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 4670 4671 /* initialize descriptor rings */ 4672 set_bufsize(dev); 4673 oom = nv_init_ring(dev); 4674 4675 writel(0, base + NvRegLinkSpeed); 4676 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 4677 nv_txrx_reset(dev); 4678 writel(0, base + NvRegUnknownSetupReg6); 4679 4680 np->in_shutdown = 0; 4681 4682 /* give hw rings */ 4683 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4684 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4685 base + NvRegRingSizes); 4686 4687 writel(np->linkspeed, base + NvRegLinkSpeed); 4688 if (np->desc_ver == DESC_VER_1) 4689 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 4690 else 4691 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); 4692 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4693 writel(np->vlanctl_bits, base + NvRegVlanControl); 4694 pci_push(base); 4695 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 4696 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 4697 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 4698 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 4699 4700 writel(0, base + NvRegMIIMask); 4701 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4702 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 4703 4704 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 4705 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 4706 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 4707 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4708 4709 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 4710 get_random_bytes(&i, sizeof(i)); 4711 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); 4712 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); 4713 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); 4714 if (poll_interval == -1) { 4715 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 4716 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 4717 else 4718 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 4719 } 4720 else 4721 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 4722 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4723 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 4724 base + NvRegAdapterControl); 4725 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); 4726 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); 4727 if (np->wolenabled) 4728 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 4729 4730 i = readl(base + NvRegPowerState); 4731 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 4732 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 4733 4734 pci_push(base); 4735 udelay(10); 4736 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 4737 4738 nv_disable_hw_interrupts(dev, np->irqmask); 4739 pci_push(base); 4740 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 4741 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4742 pci_push(base); 4743 4744 if (nv_request_irq(dev, 0)) { 4745 goto out_drain; 4746 } 4747 4748 /* ask for interrupts */ 4749 nv_enable_hw_interrupts(dev, np->irqmask); 4750 4751 spin_lock_irq(&np->lock); 4752 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 4753 writel(0, base + NvRegMulticastAddrB); 4754 writel(0, base + NvRegMulticastMaskA); 4755 writel(0, base + NvRegMulticastMaskB); 4756 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 4757 /* One manual link speed update: Interrupts are enabled, future link 4758 * speed changes cause interrupts and are handled by nv_link_irq(). 4759 */ 4760 { 4761 u32 miistat; 4762 miistat = readl(base + NvRegMIIStatus); 4763 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 4764 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); 4765 } 4766 /* set linkspeed to invalid value, thus force nv_update_linkspeed 4767 * to init hw */ 4768 np->linkspeed = 0; 4769 ret = nv_update_linkspeed(dev); 4770 nv_start_rx(dev); 4771 nv_start_tx(dev); 4772 netif_start_queue(dev); 4773 netif_poll_enable(dev); 4774 4775 if (ret) { 4776 netif_carrier_on(dev); 4777 } else { 4778 printk("%s: no link during initialization.\n", dev->name); 4779 netif_carrier_off(dev); 4780 } 4781 if (oom) 4782 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4783 4784 /* start statistics timer */ 4785 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) 4786 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 4787 4788 spin_unlock_irq(&np->lock); 4789 4790 return 0; 4791out_drain: 4792 drain_ring(dev); 4793 return ret; 4794} 4795 4796static int nv_close(struct net_device *dev) 4797{ 4798 struct fe_priv *np = netdev_priv(dev); 4799 u8 __iomem *base; 4800 4801 spin_lock_irq(&np->lock); 4802 np->in_shutdown = 1; 4803 spin_unlock_irq(&np->lock); 4804 netif_poll_disable(dev); 4805 synchronize_irq(dev->irq); 4806 4807 del_timer_sync(&np->oom_kick); 4808 del_timer_sync(&np->nic_poll); 4809 del_timer_sync(&np->stats_poll); 4810 4811 netif_stop_queue(dev); 4812 spin_lock_irq(&np->lock); 4813 nv_stop_tx(dev); 4814 nv_stop_rx(dev); 4815 nv_txrx_reset(dev); 4816 4817 /* disable interrupts on the nic or we will lock up */ 4818 base = get_hwbase(dev); 4819 nv_disable_hw_interrupts(dev, np->irqmask); 4820 pci_push(base); 4821 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 4822 4823 spin_unlock_irq(&np->lock); 4824 4825 nv_free_irq(dev); 4826 4827 drain_ring(dev); 4828 4829 if (np->wolenabled) 4830 nv_start_rx(dev); 4831 4832 /* FIXME: power down nic */ 4833 4834 return 0; 4835} 4836 4837static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 4838{ 4839 struct net_device *dev; 4840 struct fe_priv *np; 4841 unsigned long addr; 4842 u8 __iomem *base; 4843 int err, i; 4844 u32 powerstate, txreg; 4845 u32 phystate_orig = 0, phystate; 4846 int phyinitialized = 0; 4847 4848 dev = alloc_etherdev(sizeof(struct fe_priv)); 4849 err = -ENOMEM; 4850 if (!dev) 4851 goto out; 4852 4853 np = netdev_priv(dev); 4854 np->pci_dev = pci_dev; 4855 spin_lock_init(&np->lock); 4856 SET_MODULE_OWNER(dev); 4857 SET_NETDEV_DEV(dev, &pci_dev->dev); 4858 4859 init_timer(&np->oom_kick); 4860 np->oom_kick.data = (unsigned long) dev; 4861 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ 4862 init_timer(&np->nic_poll); 4863 np->nic_poll.data = (unsigned long) dev; 4864 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ 4865 init_timer(&np->stats_poll); 4866 np->stats_poll.data = (unsigned long) dev; 4867 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ 4868 4869 err = pci_enable_device(pci_dev); 4870 if (err) { 4871 printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n", 4872 err, pci_name(pci_dev)); 4873 goto out_free; 4874 } 4875 4876 pci_set_master(pci_dev); 4877 4878 err = pci_request_regions(pci_dev, DRV_NAME); 4879 if (err < 0) 4880 goto out_disable; 4881 4882 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2)) 4883 np->register_size = NV_PCI_REGSZ_VER3; 4884 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 4885 np->register_size = NV_PCI_REGSZ_VER2; 4886 else 4887 np->register_size = NV_PCI_REGSZ_VER1; 4888 4889 err = -EINVAL; 4890 addr = 0; 4891 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 4892 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", 4893 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), 4894 pci_resource_len(pci_dev, i), 4895 pci_resource_flags(pci_dev, i)); 4896 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 4897 pci_resource_len(pci_dev, i) >= np->register_size) { 4898 addr = pci_resource_start(pci_dev, i); 4899 break; 4900 } 4901 } 4902 if (i == DEVICE_COUNT_RESOURCE) { 4903 printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n", 4904 pci_name(pci_dev)); 4905 goto out_relreg; 4906 } 4907 4908 /* copy of driver data */ 4909 np->driver_data = id->driver_data; 4910 4911 /* handle different descriptor versions */ 4912 if (id->driver_data & DEV_HAS_HIGH_DMA) { 4913 /* packet format 3: supports 40-bit addressing */ 4914 np->desc_ver = DESC_VER_3; 4915 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 4916 if (dma_64bit) { 4917 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { 4918 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", 4919 pci_name(pci_dev)); 4920 } else { 4921 dev->features |= NETIF_F_HIGHDMA; 4922 printk(KERN_INFO "forcedeth: using HIGHDMA\n"); 4923 } 4924 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) { 4925 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n", 4926 pci_name(pci_dev)); 4927 } 4928 } 4929 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 4930 /* packet format 2: supports jumbo frames */ 4931 np->desc_ver = DESC_VER_2; 4932 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; 4933 } else { 4934 /* original packet format */ 4935 np->desc_ver = DESC_VER_1; 4936 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; 4937 } 4938 4939 np->pkt_limit = NV_PKTLIMIT_1; 4940 if (id->driver_data & DEV_HAS_LARGEDESC) 4941 np->pkt_limit = NV_PKTLIMIT_2; 4942 4943 if (id->driver_data & DEV_HAS_CHECKSUM) { 4944 np->rx_csum = 1; 4945 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4946 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 4947 dev->features |= NETIF_F_TSO; 4948 } 4949 4950 np->vlanctl_bits = 0; 4951 if (id->driver_data & DEV_HAS_VLAN) { 4952 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 4953 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; 4954 dev->vlan_rx_register = nv_vlan_rx_register; 4955 dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid; 4956 } 4957 4958 np->msi_flags = 0; 4959 if ((id->driver_data & DEV_HAS_MSI) && msi) { 4960 np->msi_flags |= NV_MSI_CAPABLE; 4961 } 4962 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 4963 np->msi_flags |= NV_MSI_X_CAPABLE; 4964 } 4965 4966 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 4967 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { 4968 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 4969 } 4970 4971 4972 err = -ENOMEM; 4973 np->base = ioremap(addr, np->register_size); 4974 if (!np->base) 4975 goto out_relreg; 4976 dev->base_addr = (unsigned long)np->base; 4977 4978 dev->irq = pci_dev->irq; 4979 4980 np->rx_ring_size = RX_RING_DEFAULT; 4981 np->tx_ring_size = TX_RING_DEFAULT; 4982 4983 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4984 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 4985 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 4986 &np->ring_addr); 4987 if (!np->rx_ring.orig) 4988 goto out_unmap; 4989 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4990 } else { 4991 np->rx_ring.ex = pci_alloc_consistent(pci_dev, 4992 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 4993 &np->ring_addr); 4994 if (!np->rx_ring.ex) 4995 goto out_unmap; 4996 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4997 } 4998 np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL); 4999 np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL); 5000 if (!np->rx_skb || !np->tx_skb) 5001 goto out_freering; 5002 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 5003 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); 5004 5005 dev->open = nv_open; 5006 dev->stop = nv_close; 5007 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 5008 dev->hard_start_xmit = nv_start_xmit; 5009 else 5010 dev->hard_start_xmit = nv_start_xmit_optimized; 5011 dev->get_stats = nv_get_stats; 5012 dev->change_mtu = nv_change_mtu; 5013 dev->set_mac_address = nv_set_mac_address; 5014 dev->set_multicast_list = nv_set_multicast; 5015#ifdef CONFIG_NET_POLL_CONTROLLER 5016 dev->poll_controller = nv_poll_controller; 5017#endif 5018 dev->weight = RX_WORK_PER_LOOP; 5019#ifdef CONFIG_FORCEDETH_NAPI 5020 dev->poll = nv_napi_poll; 5021#endif 5022 SET_ETHTOOL_OPS(dev, &ops); 5023 dev->tx_timeout = nv_tx_timeout; 5024 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5025 5026 pci_set_drvdata(pci_dev, dev); 5027 5028 /* read the mac address */ 5029 base = get_hwbase(dev); 5030 np->orig_mac[0] = readl(base + NvRegMacAddrA); 5031 np->orig_mac[1] = readl(base + NvRegMacAddrB); 5032 5033 /* check the workaround bit for correct mac address order */ 5034 txreg = readl(base + NvRegTransmitPoll); 5035 if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { 5036 /* mac address is already in correct order */ 5037 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5038 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5039 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5040 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5041 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5042 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5043 } else { 5044 /* need to reverse mac address to correct order */ 5045 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 5046 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 5047 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 5048 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 5049 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 5050 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 5051 /* set permanent address to be correct aswell */ 5052 np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 5053 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 5054 np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 5055 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5056 } 5057 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 5058 5059 if (!is_valid_ether_addr(dev->perm_addr)) { 5060 /* 5061 * Bad mac address. At least one bios sets the mac address 5062 * to 01:23:45:67:89:ab 5063 */ 5064 printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n", 5065 pci_name(pci_dev), 5066 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 5067 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 5068 printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n"); 5069 dev->dev_addr[0] = 0x00; 5070 dev->dev_addr[1] = 0x00; 5071 dev->dev_addr[2] = 0x6c; 5072 get_random_bytes(&dev->dev_addr[3], 3); 5073 } 5074 5075 dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev), 5076 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 5077 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 5078 5079 /* set mac address */ 5080 nv_copy_mac_to_hw(dev); 5081 5082 /* disable WOL */ 5083 writel(0, base + NvRegWakeUpFlags); 5084 np->wolenabled = 0; 5085 5086 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5087 u8 revision_id; 5088 pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id); 5089 5090 /* take phy and nic out of low power mode */ 5091 powerstate = readl(base + NvRegPowerState2); 5092 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; 5093 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || 5094 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && 5095 revision_id >= 0xA3) 5096 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; 5097 writel(powerstate, base + NvRegPowerState2); 5098 } 5099 5100 if (np->desc_ver == DESC_VER_1) { 5101 np->tx_flags = NV_TX_VALID; 5102 } else { 5103 np->tx_flags = NV_TX2_VALID; 5104 } 5105 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { 5106 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5107 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5108 np->msi_flags |= 0x0003; 5109 } else { 5110 np->irqmask = NVREG_IRQMASK_CPU; 5111 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5112 np->msi_flags |= 0x0001; 5113 } 5114 5115 if (id->driver_data & DEV_NEED_TIMERIRQ) 5116 np->irqmask |= NVREG_IRQ_TIMER; 5117 if (id->driver_data & DEV_NEED_LINKTIMER) { 5118 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); 5119 np->need_linktimer = 1; 5120 np->link_timeout = jiffies + LINK_TIMEOUT; 5121 } else { 5122 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); 5123 np->need_linktimer = 0; 5124 } 5125 5126 /* clear phy state and temporarily halt phy interrupts */ 5127 writel(0, base + NvRegMIIMask); 5128 phystate = readl(base + NvRegAdapterControl); 5129 if (phystate & NVREG_ADAPTCTL_RUNNING) { 5130 phystate_orig = 1; 5131 phystate &= ~NVREG_ADAPTCTL_RUNNING; 5132 writel(phystate, base + NvRegAdapterControl); 5133 } 5134 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 5135 5136 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 5137 /* management unit running on the mac? */ 5138 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) { 5139 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; 5140 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use); 5141 for (i = 0; i < 5000; i++) { 5142 msleep(1); 5143 if (nv_mgmt_acquire_sema(dev)) { 5144 /* management unit setup the phy already? */ 5145 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 5146 NVREG_XMITCTL_SYNC_PHY_INIT) { 5147 /* phy is inited by mgmt unit */ 5148 phyinitialized = 1; 5149 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); 5150 } else { 5151 /* we need to init the phy */ 5152 } 5153 break; 5154 } 5155 } 5156 } 5157 } 5158 5159 /* find a suitable phy */ 5160 for (i = 1; i <= 32; i++) { 5161 int id1, id2; 5162 int phyaddr = i & 0x1F; 5163 5164 spin_lock_irq(&np->lock); 5165 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); 5166 spin_unlock_irq(&np->lock); 5167 if (id1 < 0 || id1 == 0xffff) 5168 continue; 5169 spin_lock_irq(&np->lock); 5170 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); 5171 spin_unlock_irq(&np->lock); 5172 if (id2 < 0 || id2 == 0xffff) 5173 continue; 5174 5175 np->phy_model = id2 & PHYID2_MODEL_MASK; 5176 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 5177 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 5178 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 5179 pci_name(pci_dev), id1, id2, phyaddr); 5180 np->phyaddr = phyaddr; 5181 np->phy_oui = id1 | id2; 5182 break; 5183 } 5184 if (i == 33) { 5185 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", 5186 pci_name(pci_dev)); 5187 goto out_error; 5188 } 5189 5190 if (!phyinitialized) { 5191 /* reset it */ 5192 phy_init(dev); 5193 } else { 5194 /* see if it is a gigabit phy */ 5195 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 5196 if (mii_status & PHY_GIGABIT) { 5197 np->gigabit = PHY_GIGABIT; 5198 } 5199 } 5200 5201 /* set default link speed settings */ 5202 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 5203 np->duplex = 0; 5204 np->autoneg = 1; 5205 5206 err = register_netdev(dev); 5207 if (err) { 5208 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); 5209 goto out_error; 5210 } 5211 printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n", 5212 dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device, 5213 pci_name(pci_dev)); 5214 5215 return 0; 5216 5217out_error: 5218 if (phystate_orig) 5219 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 5220 pci_set_drvdata(pci_dev, NULL); 5221out_freering: 5222 free_rings(dev); 5223out_unmap: 5224 iounmap(get_hwbase(dev)); 5225out_relreg: 5226 pci_release_regions(pci_dev); 5227out_disable: 5228 pci_disable_device(pci_dev); 5229out_free: 5230 free_netdev(dev); 5231out: 5232 return err; 5233} 5234 5235static void __devexit nv_remove(struct pci_dev *pci_dev) 5236{ 5237 struct net_device *dev = pci_get_drvdata(pci_dev); 5238 struct fe_priv *np = netdev_priv(dev); 5239 u8 __iomem *base = get_hwbase(dev); 5240 5241 unregister_netdev(dev); 5242 5243 /* special op: write back the misordered MAC address - otherwise 5244 * the next nv_probe would see a wrong address. 5245 */ 5246 writel(np->orig_mac[0], base + NvRegMacAddrA); 5247 writel(np->orig_mac[1], base + NvRegMacAddrB); 5248 5249 /* free all structures */ 5250 free_rings(dev); 5251 iounmap(get_hwbase(dev)); 5252 pci_release_regions(pci_dev); 5253 pci_disable_device(pci_dev); 5254 free_netdev(dev); 5255 pci_set_drvdata(pci_dev, NULL); 5256} 5257 5258#ifdef CONFIG_PM 5259static int nv_suspend(struct pci_dev *pdev, pm_message_t state) 5260{ 5261 struct net_device *dev = pci_get_drvdata(pdev); 5262 struct fe_priv *np = netdev_priv(dev); 5263 5264 if (!netif_running(dev)) 5265 goto out; 5266 5267 netif_device_detach(dev); 5268 5269 // Gross. 5270 nv_close(dev); 5271 5272 pci_save_state(pdev); 5273 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); 5274 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5275out: 5276 return 0; 5277} 5278 5279static int nv_resume(struct pci_dev *pdev) 5280{ 5281 struct net_device *dev = pci_get_drvdata(pdev); 5282 int rc = 0; 5283 5284 if (!netif_running(dev)) 5285 goto out; 5286 5287 netif_device_attach(dev); 5288 5289 pci_set_power_state(pdev, PCI_D0); 5290 pci_restore_state(pdev); 5291 pci_enable_wake(pdev, PCI_D0, 0); 5292 5293 rc = nv_open(dev); 5294out: 5295 return rc; 5296} 5297#else 5298#define nv_suspend NULL 5299#define nv_resume NULL 5300#endif /* CONFIG_PM */ 5301 5302static struct pci_device_id pci_tbl[] = { 5303 { /* nForce Ethernet Controller */ 5304 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), 5305 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5306 }, 5307 { /* nForce2 Ethernet Controller */ 5308 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), 5309 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5310 }, 5311 { /* nForce3 Ethernet Controller */ 5312 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), 5313 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5314 }, 5315 { /* nForce3 Ethernet Controller */ 5316 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), 5317 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5318 }, 5319 { /* nForce3 Ethernet Controller */ 5320 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), 5321 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5322 }, 5323 { /* nForce3 Ethernet Controller */ 5324 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), 5325 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5326 }, 5327 { /* nForce3 Ethernet Controller */ 5328 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), 5329 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5330 }, 5331 { /* CK804 Ethernet Controller */ 5332 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), 5333 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, 5334 }, 5335 { /* CK804 Ethernet Controller */ 5336 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), 5337 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, 5338 }, 5339 { /* MCP04 Ethernet Controller */ 5340 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), 5341 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, 5342 }, 5343 { /* MCP04 Ethernet Controller */ 5344 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), 5345 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, 5346 }, 5347 { /* MCP51 Ethernet Controller */ 5348 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 5349 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, 5350 }, 5351 { /* MCP51 Ethernet Controller */ 5352 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 5353 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, 5354 }, 5355 { /* MCP55 Ethernet Controller */ 5356 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 5357 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5358 }, 5359 { /* MCP55 Ethernet Controller */ 5360 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 5361 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5362 }, 5363 { /* MCP61 Ethernet Controller */ 5364 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 5365 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5366 }, 5367 { /* MCP61 Ethernet Controller */ 5368 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 5369 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5370 }, 5371 { /* MCP61 Ethernet Controller */ 5372 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 5373 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5374 }, 5375 { /* MCP61 Ethernet Controller */ 5376 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 5377 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5378 }, 5379 { /* MCP65 Ethernet Controller */ 5380 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 5381 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5382 }, 5383 { /* MCP65 Ethernet Controller */ 5384 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 5385 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5386 }, 5387 { /* MCP65 Ethernet Controller */ 5388 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 5389 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5390 }, 5391 { /* MCP65 Ethernet Controller */ 5392 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 5393 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5394 }, 5395 { /* MCP67 Ethernet Controller */ 5396 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 5397 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5398 }, 5399 { /* MCP67 Ethernet Controller */ 5400 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 5401 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5402 }, 5403 { /* MCP67 Ethernet Controller */ 5404 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 5405 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5406 }, 5407 { /* MCP67 Ethernet Controller */ 5408 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 5409 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5410 }, 5411 {0,}, 5412}; 5413 5414static struct pci_driver driver = { 5415 .name = "forcedeth", 5416 .id_table = pci_tbl, 5417 .probe = nv_probe, 5418 .remove = __devexit_p(nv_remove), 5419 .suspend = nv_suspend, 5420 .resume = nv_resume, 5421}; 5422 5423static int __init init_nic(void) 5424{ 5425 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); 5426 return pci_register_driver(&driver); 5427} 5428 5429static void __exit exit_nic(void) 5430{ 5431 pci_unregister_driver(&driver); 5432} 5433 5434module_param(max_interrupt_work, int, 0); 5435MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 5436module_param(optimization_mode, int, 0); 5437MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); 5438module_param(poll_interval, int, 0); 5439MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 5440module_param(msi, int, 0); 5441MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); 5442module_param(msix, int, 0); 5443MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); 5444module_param(dma_64bit, int, 0); 5445MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 5446 5447MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 5448MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 5449MODULE_LICENSE("GPL"); 5450 5451MODULE_DEVICE_TABLE(pci, pci_tbl); 5452 5453module_init(init_nic); 5454module_exit(exit_nic);