Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at c9a28fa7b9ac19b676deefa0a171ce7df8755c08 5678 lines 175 kB view raw
1/* 2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. 3 * 4 * Note: This driver is a cleanroom reimplementation based on reverse 5 * engineered documentation written by Carl-Daniel Hailfinger 6 * and Andrew de Quincey. 7 * 8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered 9 * trademarks of NVIDIA Corporation in the United States and other 10 * countries. 11 * 12 * Copyright (C) 2003,4,5 Manfred Spraul 13 * Copyright (C) 2004 Andrew de Quincey (wol support) 14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 15 * IRQ rate fixes, bigendian fixes, cleanups, verification) 16 * Copyright (c) 2004,5,6 NVIDIA Corporation 17 * 18 * This program is free software; you can redistribute it and/or modify 19 * it under the terms of the GNU General Public License as published by 20 * the Free Software Foundation; either version 2 of the License, or 21 * (at your option) any later version. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, write to the Free Software 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 31 * 32 * Changelog: 33 * 0.01: 05 Oct 2003: First release that compiles without warnings. 34 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs. 35 * Check all PCI BARs for the register window. 36 * udelay added to mii_rw. 37 * 0.03: 06 Oct 2003: Initialize dev->irq. 38 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks. 39 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout. 40 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated, 41 * irq mask updated 42 * 0.07: 14 Oct 2003: Further irq mask updates. 43 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill 44 * added into irq handler, NULL check for drain_ring. 45 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the 46 * requested interrupt sources. 47 * 0.10: 20 Oct 2003: First cleanup for release. 48 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased. 49 * MAC Address init fix, set_multicast cleanup. 50 * 0.12: 23 Oct 2003: Cleanups for release. 51 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10. 52 * Set link speed correctly. start rx before starting 53 * tx (nv_start_rx sets the link speed). 54 * 0.14: 25 Oct 2003: Nic dependant irq mask. 55 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during 56 * open. 57 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size 58 * increased to 1628 bytes. 59 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from 60 * the tx length. 61 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats 62 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac 63 * addresses, really stop rx if already running 64 * in nv_start_rx, clean up a bit. 65 * 0.20: 07 Dec 2003: alloc fixes 66 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix. 67 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup 68 * on close. 69 * 0.23: 26 Jan 2004: various small cleanups 70 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces 71 * 0.25: 09 Mar 2004: wol support 72 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes 73 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings, 74 * added CK804/MCP04 device IDs, code fixes 75 * for registers, link status and other minor fixes. 76 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe 77 * 0.29: 31 Aug 2004: Add backup timer for link change notification. 78 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset 79 * into nv_close, otherwise reenabling for wol can 80 * cause DMA to kfree'd memory. 81 * 0.31: 14 Nov 2004: ethtool support for getting/setting link 82 * capabilities. 83 * 0.32: 16 Apr 2005: RX_ERROR4 handling added. 84 * 0.33: 16 May 2005: Support for MCP51 added. 85 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. 86 * 0.35: 26 Jun 2005: Support for MCP55 added. 87 * 0.36: 28 Jun 2005: Add jumbo frame support. 88 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list 89 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of 90 * per-packet flags. 91 * 0.39: 18 Jul 2005: Add 64bit descriptor support. 92 * 0.40: 19 Jul 2005: Add support for mac address change. 93 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead 94 * of nv_remove 95 * 0.42: 06 Aug 2005: Fix lack of link speed initialization 96 * in the second (and later) nv_open call 97 * 0.43: 10 Aug 2005: Add support for tx checksum. 98 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation. 99 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check 100 * 0.46: 20 Oct 2005: Add irq optimization modes. 101 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. 102 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single 103 * 0.49: 10 Dec 2005: Fix tso for large buffers. 104 * 0.50: 20 Jan 2006: Add 8021pq tagging support. 105 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. 106 * 0.52: 20 Jan 2006: Add MSI/MSIX support. 107 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. 108 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. 109 * 0.55: 22 Mar 2006: Add flow control (pause frame). 110 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. 111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections. 112 * 0.58: 30 Oct 2006: Added support for sideband management unit. 113 * 0.59: 30 Oct 2006: Added support for recoverable error. 114 * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats. 115 * 116 * Known bugs: 117 * We suspect that on some hardware no TX done interrupts are generated. 118 * This means recovery from netif_stop_queue only happens if the hw timer 119 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) 120 * and the timer is active in the IRQMask, or if a rx packet arrives by chance. 121 * If your hardware reliably generates tx done interrupts, then you can remove 122 * DEV_NEED_TIMERIRQ from the driver_data flags. 123 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 124 * superfluous timer interrupts from the nic. 125 */ 126#ifdef CONFIG_FORCEDETH_NAPI 127#define DRIVERNAPI "-NAPI" 128#else 129#define DRIVERNAPI 130#endif 131#define FORCEDETH_VERSION "0.61" 132#define DRV_NAME "forcedeth" 133 134#include <linux/module.h> 135#include <linux/types.h> 136#include <linux/pci.h> 137#include <linux/interrupt.h> 138#include <linux/netdevice.h> 139#include <linux/etherdevice.h> 140#include <linux/delay.h> 141#include <linux/spinlock.h> 142#include <linux/ethtool.h> 143#include <linux/timer.h> 144#include <linux/skbuff.h> 145#include <linux/mii.h> 146#include <linux/random.h> 147#include <linux/init.h> 148#include <linux/if_vlan.h> 149#include <linux/dma-mapping.h> 150 151#include <asm/irq.h> 152#include <asm/io.h> 153#include <asm/uaccess.h> 154#include <asm/system.h> 155 156#if 0 157#define dprintk printk 158#else 159#define dprintk(x...) do { } while (0) 160#endif 161 162#define TX_WORK_PER_LOOP 64 163#define RX_WORK_PER_LOOP 64 164 165/* 166 * Hardware access: 167 */ 168 169#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ 170#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ 171#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ 172#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ 173#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ 174#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ 175#define DEV_HAS_MSI 0x0040 /* device supports MSI */ 176#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ 177#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ 178#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ 179#define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */ 180#define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */ 181#define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */ 182#define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */ 183#define DEV_HAS_CORRECT_MACADDR 0x4000 /* device supports correct mac address order */ 184 185enum { 186 NvRegIrqStatus = 0x000, 187#define NVREG_IRQSTAT_MIIEVENT 0x040 188#define NVREG_IRQSTAT_MASK 0x81ff 189 NvRegIrqMask = 0x004, 190#define NVREG_IRQ_RX_ERROR 0x0001 191#define NVREG_IRQ_RX 0x0002 192#define NVREG_IRQ_RX_NOBUF 0x0004 193#define NVREG_IRQ_TX_ERR 0x0008 194#define NVREG_IRQ_TX_OK 0x0010 195#define NVREG_IRQ_TIMER 0x0020 196#define NVREG_IRQ_LINK 0x0040 197#define NVREG_IRQ_RX_FORCED 0x0080 198#define NVREG_IRQ_TX_FORCED 0x0100 199#define NVREG_IRQ_RECOVER_ERROR 0x8000 200#define NVREG_IRQMASK_THROUGHPUT 0x00df 201#define NVREG_IRQMASK_CPU 0x0060 202#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) 203#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) 204#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) 205 206#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ 207 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ 208 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR)) 209 210 NvRegUnknownSetupReg6 = 0x008, 211#define NVREG_UNKSETUP6_VAL 3 212 213/* 214 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic 215 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 216 */ 217 NvRegPollingInterval = 0x00c, 218#define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */ 219#define NVREG_POLL_DEFAULT_CPU 13 220 NvRegMSIMap0 = 0x020, 221 NvRegMSIMap1 = 0x024, 222 NvRegMSIIrqMask = 0x030, 223#define NVREG_MSI_VECTOR_0_ENABLED 0x01 224 NvRegMisc1 = 0x080, 225#define NVREG_MISC1_PAUSE_TX 0x01 226#define NVREG_MISC1_HD 0x02 227#define NVREG_MISC1_FORCE 0x3b0f3c 228 229 NvRegMacReset = 0x3c, 230#define NVREG_MAC_RESET_ASSERT 0x0F3 231 NvRegTransmitterControl = 0x084, 232#define NVREG_XMITCTL_START 0x01 233#define NVREG_XMITCTL_MGMT_ST 0x40000000 234#define NVREG_XMITCTL_SYNC_MASK 0x000f0000 235#define NVREG_XMITCTL_SYNC_NOT_READY 0x0 236#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 237#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 238#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 239#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 240#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 241#define NVREG_XMITCTL_HOST_LOADED 0x00004000 242#define NVREG_XMITCTL_TX_PATH_EN 0x01000000 243 NvRegTransmitterStatus = 0x088, 244#define NVREG_XMITSTAT_BUSY 0x01 245 246 NvRegPacketFilterFlags = 0x8c, 247#define NVREG_PFF_PAUSE_RX 0x08 248#define NVREG_PFF_ALWAYS 0x7F0000 249#define NVREG_PFF_PROMISC 0x80 250#define NVREG_PFF_MYADDR 0x20 251#define NVREG_PFF_LOOPBACK 0x10 252 253 NvRegOffloadConfig = 0x90, 254#define NVREG_OFFLOAD_HOMEPHY 0x601 255#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 256 NvRegReceiverControl = 0x094, 257#define NVREG_RCVCTL_START 0x01 258#define NVREG_RCVCTL_RX_PATH_EN 0x01000000 259 NvRegReceiverStatus = 0x98, 260#define NVREG_RCVSTAT_BUSY 0x01 261 262 NvRegRandomSeed = 0x9c, 263#define NVREG_RNDSEED_MASK 0x00ff 264#define NVREG_RNDSEED_FORCE 0x7f00 265#define NVREG_RNDSEED_FORCE2 0x2d00 266#define NVREG_RNDSEED_FORCE3 0x7400 267 268 NvRegTxDeferral = 0xA0, 269#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 270#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 271#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 272 NvRegRxDeferral = 0xA4, 273#define NVREG_RX_DEFERRAL_DEFAULT 0x16 274 NvRegMacAddrA = 0xA8, 275 NvRegMacAddrB = 0xAC, 276 NvRegMulticastAddrA = 0xB0, 277#define NVREG_MCASTADDRA_FORCE 0x01 278 NvRegMulticastAddrB = 0xB4, 279 NvRegMulticastMaskA = 0xB8, 280 NvRegMulticastMaskB = 0xBC, 281 282 NvRegPhyInterface = 0xC0, 283#define PHY_RGMII 0x10000000 284 285 NvRegTxRingPhysAddr = 0x100, 286 NvRegRxRingPhysAddr = 0x104, 287 NvRegRingSizes = 0x108, 288#define NVREG_RINGSZ_TXSHIFT 0 289#define NVREG_RINGSZ_RXSHIFT 16 290 NvRegTransmitPoll = 0x10c, 291#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 292 NvRegLinkSpeed = 0x110, 293#define NVREG_LINKSPEED_FORCE 0x10000 294#define NVREG_LINKSPEED_10 1000 295#define NVREG_LINKSPEED_100 100 296#define NVREG_LINKSPEED_1000 50 297#define NVREG_LINKSPEED_MASK (0xFFF) 298 NvRegUnknownSetupReg5 = 0x130, 299#define NVREG_UNKSETUP5_BIT31 (1<<31) 300 NvRegTxWatermark = 0x13c, 301#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 302#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 303#define NVREG_TX_WM_DESC2_3_1000 0xfe08000 304 NvRegTxRxControl = 0x144, 305#define NVREG_TXRXCTL_KICK 0x0001 306#define NVREG_TXRXCTL_BIT1 0x0002 307#define NVREG_TXRXCTL_BIT2 0x0004 308#define NVREG_TXRXCTL_IDLE 0x0008 309#define NVREG_TXRXCTL_RESET 0x0010 310#define NVREG_TXRXCTL_RXCHECK 0x0400 311#define NVREG_TXRXCTL_DESC_1 0 312#define NVREG_TXRXCTL_DESC_2 0x002100 313#define NVREG_TXRXCTL_DESC_3 0xc02200 314#define NVREG_TXRXCTL_VLANSTRIP 0x00040 315#define NVREG_TXRXCTL_VLANINS 0x00080 316 NvRegTxRingPhysAddrHigh = 0x148, 317 NvRegRxRingPhysAddrHigh = 0x14C, 318 NvRegTxPauseFrame = 0x170, 319#define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080 320#define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030 321 NvRegMIIStatus = 0x180, 322#define NVREG_MIISTAT_ERROR 0x0001 323#define NVREG_MIISTAT_LINKCHANGE 0x0008 324#define NVREG_MIISTAT_MASK 0x000f 325#define NVREG_MIISTAT_MASK2 0x000f 326 NvRegMIIMask = 0x184, 327#define NVREG_MII_LINKCHANGE 0x0008 328 329 NvRegAdapterControl = 0x188, 330#define NVREG_ADAPTCTL_START 0x02 331#define NVREG_ADAPTCTL_LINKUP 0x04 332#define NVREG_ADAPTCTL_PHYVALID 0x40000 333#define NVREG_ADAPTCTL_RUNNING 0x100000 334#define NVREG_ADAPTCTL_PHYSHIFT 24 335 NvRegMIISpeed = 0x18c, 336#define NVREG_MIISPEED_BIT8 (1<<8) 337#define NVREG_MIIDELAY 5 338 NvRegMIIControl = 0x190, 339#define NVREG_MIICTL_INUSE 0x08000 340#define NVREG_MIICTL_WRITE 0x00400 341#define NVREG_MIICTL_ADDRSHIFT 5 342 NvRegMIIData = 0x194, 343 NvRegWakeUpFlags = 0x200, 344#define NVREG_WAKEUPFLAGS_VAL 0x7770 345#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 346#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 347#define NVREG_WAKEUPFLAGS_D3SHIFT 12 348#define NVREG_WAKEUPFLAGS_D2SHIFT 8 349#define NVREG_WAKEUPFLAGS_D1SHIFT 4 350#define NVREG_WAKEUPFLAGS_D0SHIFT 0 351#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 352#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 353#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 354#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 355 356 NvRegPatternCRC = 0x204, 357 NvRegPatternMask = 0x208, 358 NvRegPowerCap = 0x268, 359#define NVREG_POWERCAP_D3SUPP (1<<30) 360#define NVREG_POWERCAP_D2SUPP (1<<26) 361#define NVREG_POWERCAP_D1SUPP (1<<25) 362 NvRegPowerState = 0x26c, 363#define NVREG_POWERSTATE_POWEREDUP 0x8000 364#define NVREG_POWERSTATE_VALID 0x0100 365#define NVREG_POWERSTATE_MASK 0x0003 366#define NVREG_POWERSTATE_D0 0x0000 367#define NVREG_POWERSTATE_D1 0x0001 368#define NVREG_POWERSTATE_D2 0x0002 369#define NVREG_POWERSTATE_D3 0x0003 370 NvRegTxCnt = 0x280, 371 NvRegTxZeroReXmt = 0x284, 372 NvRegTxOneReXmt = 0x288, 373 NvRegTxManyReXmt = 0x28c, 374 NvRegTxLateCol = 0x290, 375 NvRegTxUnderflow = 0x294, 376 NvRegTxLossCarrier = 0x298, 377 NvRegTxExcessDef = 0x29c, 378 NvRegTxRetryErr = 0x2a0, 379 NvRegRxFrameErr = 0x2a4, 380 NvRegRxExtraByte = 0x2a8, 381 NvRegRxLateCol = 0x2ac, 382 NvRegRxRunt = 0x2b0, 383 NvRegRxFrameTooLong = 0x2b4, 384 NvRegRxOverflow = 0x2b8, 385 NvRegRxFCSErr = 0x2bc, 386 NvRegRxFrameAlignErr = 0x2c0, 387 NvRegRxLenErr = 0x2c4, 388 NvRegRxUnicast = 0x2c8, 389 NvRegRxMulticast = 0x2cc, 390 NvRegRxBroadcast = 0x2d0, 391 NvRegTxDef = 0x2d4, 392 NvRegTxFrame = 0x2d8, 393 NvRegRxCnt = 0x2dc, 394 NvRegTxPause = 0x2e0, 395 NvRegRxPause = 0x2e4, 396 NvRegRxDropFrame = 0x2e8, 397 NvRegVlanControl = 0x300, 398#define NVREG_VLANCONTROL_ENABLE 0x2000 399 NvRegMSIXMap0 = 0x3e0, 400 NvRegMSIXMap1 = 0x3e4, 401 NvRegMSIXIrqStatus = 0x3f0, 402 403 NvRegPowerState2 = 0x600, 404#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11 405#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 406}; 407 408/* Big endian: should work, but is untested */ 409struct ring_desc { 410 __le32 buf; 411 __le32 flaglen; 412}; 413 414struct ring_desc_ex { 415 __le32 bufhigh; 416 __le32 buflow; 417 __le32 txvlan; 418 __le32 flaglen; 419}; 420 421union ring_type { 422 struct ring_desc* orig; 423 struct ring_desc_ex* ex; 424}; 425 426#define FLAG_MASK_V1 0xffff0000 427#define FLAG_MASK_V2 0xffffc000 428#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 429#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) 430 431#define NV_TX_LASTPACKET (1<<16) 432#define NV_TX_RETRYERROR (1<<19) 433#define NV_TX_FORCED_INTERRUPT (1<<24) 434#define NV_TX_DEFERRED (1<<26) 435#define NV_TX_CARRIERLOST (1<<27) 436#define NV_TX_LATECOLLISION (1<<28) 437#define NV_TX_UNDERFLOW (1<<29) 438#define NV_TX_ERROR (1<<30) 439#define NV_TX_VALID (1<<31) 440 441#define NV_TX2_LASTPACKET (1<<29) 442#define NV_TX2_RETRYERROR (1<<18) 443#define NV_TX2_FORCED_INTERRUPT (1<<30) 444#define NV_TX2_DEFERRED (1<<25) 445#define NV_TX2_CARRIERLOST (1<<26) 446#define NV_TX2_LATECOLLISION (1<<27) 447#define NV_TX2_UNDERFLOW (1<<28) 448/* error and valid are the same for both */ 449#define NV_TX2_ERROR (1<<30) 450#define NV_TX2_VALID (1<<31) 451#define NV_TX2_TSO (1<<28) 452#define NV_TX2_TSO_SHIFT 14 453#define NV_TX2_TSO_MAX_SHIFT 14 454#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) 455#define NV_TX2_CHECKSUM_L3 (1<<27) 456#define NV_TX2_CHECKSUM_L4 (1<<26) 457 458#define NV_TX3_VLAN_TAG_PRESENT (1<<18) 459 460#define NV_RX_DESCRIPTORVALID (1<<16) 461#define NV_RX_MISSEDFRAME (1<<17) 462#define NV_RX_SUBSTRACT1 (1<<18) 463#define NV_RX_ERROR1 (1<<23) 464#define NV_RX_ERROR2 (1<<24) 465#define NV_RX_ERROR3 (1<<25) 466#define NV_RX_ERROR4 (1<<26) 467#define NV_RX_CRCERR (1<<27) 468#define NV_RX_OVERFLOW (1<<28) 469#define NV_RX_FRAMINGERR (1<<29) 470#define NV_RX_ERROR (1<<30) 471#define NV_RX_AVAIL (1<<31) 472 473#define NV_RX2_CHECKSUMMASK (0x1C000000) 474#define NV_RX2_CHECKSUMOK1 (0x10000000) 475#define NV_RX2_CHECKSUMOK2 (0x14000000) 476#define NV_RX2_CHECKSUMOK3 (0x18000000) 477#define NV_RX2_DESCRIPTORVALID (1<<29) 478#define NV_RX2_SUBSTRACT1 (1<<25) 479#define NV_RX2_ERROR1 (1<<18) 480#define NV_RX2_ERROR2 (1<<19) 481#define NV_RX2_ERROR3 (1<<20) 482#define NV_RX2_ERROR4 (1<<21) 483#define NV_RX2_CRCERR (1<<22) 484#define NV_RX2_OVERFLOW (1<<23) 485#define NV_RX2_FRAMINGERR (1<<24) 486/* error and avail are the same for both */ 487#define NV_RX2_ERROR (1<<30) 488#define NV_RX2_AVAIL (1<<31) 489 490#define NV_RX3_VLAN_TAG_PRESENT (1<<16) 491#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 492 493/* Miscelaneous hardware related defines: */ 494#define NV_PCI_REGSZ_VER1 0x270 495#define NV_PCI_REGSZ_VER2 0x2d4 496#define NV_PCI_REGSZ_VER3 0x604 497 498/* various timeout delays: all in usec */ 499#define NV_TXRX_RESET_DELAY 4 500#define NV_TXSTOP_DELAY1 10 501#define NV_TXSTOP_DELAY1MAX 500000 502#define NV_TXSTOP_DELAY2 100 503#define NV_RXSTOP_DELAY1 10 504#define NV_RXSTOP_DELAY1MAX 500000 505#define NV_RXSTOP_DELAY2 100 506#define NV_SETUP5_DELAY 5 507#define NV_SETUP5_DELAYMAX 50000 508#define NV_POWERUP_DELAY 5 509#define NV_POWERUP_DELAYMAX 5000 510#define NV_MIIBUSY_DELAY 50 511#define NV_MIIPHY_DELAY 10 512#define NV_MIIPHY_DELAYMAX 10000 513#define NV_MAC_RESET_DELAY 64 514 515#define NV_WAKEUPPATTERNS 5 516#define NV_WAKEUPMASKENTRIES 4 517 518/* General driver defaults */ 519#define NV_WATCHDOG_TIMEO (5*HZ) 520 521#define RX_RING_DEFAULT 128 522#define TX_RING_DEFAULT 256 523#define RX_RING_MIN 128 524#define TX_RING_MIN 64 525#define RING_MAX_DESC_VER_1 1024 526#define RING_MAX_DESC_VER_2_3 16384 527 528/* rx/tx mac addr + type + vlan + align + slack*/ 529#define NV_RX_HEADERS (64) 530/* even more slack. */ 531#define NV_RX_ALLOC_PAD (64) 532 533/* maximum mtu size */ 534#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ 535#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ 536 537#define OOM_REFILL (1+HZ/20) 538#define POLL_WAIT (1+HZ/100) 539#define LINK_TIMEOUT (3*HZ) 540#define STATS_INTERVAL (10*HZ) 541 542/* 543 * desc_ver values: 544 * The nic supports three different descriptor types: 545 * - DESC_VER_1: Original 546 * - DESC_VER_2: support for jumbo frames. 547 * - DESC_VER_3: 64-bit format. 548 */ 549#define DESC_VER_1 1 550#define DESC_VER_2 2 551#define DESC_VER_3 3 552 553/* PHY defines */ 554#define PHY_OUI_MARVELL 0x5043 555#define PHY_OUI_CICADA 0x03f1 556#define PHY_OUI_VITESSE 0x01c1 557#define PHY_OUI_REALTEK 0x0732 558#define PHYID1_OUI_MASK 0x03ff 559#define PHYID1_OUI_SHFT 6 560#define PHYID2_OUI_MASK 0xfc00 561#define PHYID2_OUI_SHFT 10 562#define PHYID2_MODEL_MASK 0x03f0 563#define PHY_MODEL_MARVELL_E3016 0x220 564#define PHY_MARVELL_E3016_INITMASK 0x0300 565#define PHY_CICADA_INIT1 0x0f000 566#define PHY_CICADA_INIT2 0x0e00 567#define PHY_CICADA_INIT3 0x01000 568#define PHY_CICADA_INIT4 0x0200 569#define PHY_CICADA_INIT5 0x0004 570#define PHY_CICADA_INIT6 0x02000 571#define PHY_VITESSE_INIT_REG1 0x1f 572#define PHY_VITESSE_INIT_REG2 0x10 573#define PHY_VITESSE_INIT_REG3 0x11 574#define PHY_VITESSE_INIT_REG4 0x12 575#define PHY_VITESSE_INIT_MSK1 0xc 576#define PHY_VITESSE_INIT_MSK2 0x0180 577#define PHY_VITESSE_INIT1 0x52b5 578#define PHY_VITESSE_INIT2 0xaf8a 579#define PHY_VITESSE_INIT3 0x8 580#define PHY_VITESSE_INIT4 0x8f8a 581#define PHY_VITESSE_INIT5 0xaf86 582#define PHY_VITESSE_INIT6 0x8f86 583#define PHY_VITESSE_INIT7 0xaf82 584#define PHY_VITESSE_INIT8 0x0100 585#define PHY_VITESSE_INIT9 0x8f82 586#define PHY_VITESSE_INIT10 0x0 587#define PHY_REALTEK_INIT_REG1 0x1f 588#define PHY_REALTEK_INIT_REG2 0x19 589#define PHY_REALTEK_INIT_REG3 0x13 590#define PHY_REALTEK_INIT1 0x0000 591#define PHY_REALTEK_INIT2 0x8e00 592#define PHY_REALTEK_INIT3 0x0001 593#define PHY_REALTEK_INIT4 0xad17 594 595#define PHY_GIGABIT 0x0100 596 597#define PHY_TIMEOUT 0x1 598#define PHY_ERROR 0x2 599 600#define PHY_100 0x1 601#define PHY_1000 0x2 602#define PHY_HALF 0x100 603 604#define NV_PAUSEFRAME_RX_CAPABLE 0x0001 605#define NV_PAUSEFRAME_TX_CAPABLE 0x0002 606#define NV_PAUSEFRAME_RX_ENABLE 0x0004 607#define NV_PAUSEFRAME_TX_ENABLE 0x0008 608#define NV_PAUSEFRAME_RX_REQ 0x0010 609#define NV_PAUSEFRAME_TX_REQ 0x0020 610#define NV_PAUSEFRAME_AUTONEG 0x0040 611 612/* MSI/MSI-X defines */ 613#define NV_MSI_X_MAX_VECTORS 8 614#define NV_MSI_X_VECTORS_MASK 0x000f 615#define NV_MSI_CAPABLE 0x0010 616#define NV_MSI_X_CAPABLE 0x0020 617#define NV_MSI_ENABLED 0x0040 618#define NV_MSI_X_ENABLED 0x0080 619 620#define NV_MSI_X_VECTOR_ALL 0x0 621#define NV_MSI_X_VECTOR_RX 0x0 622#define NV_MSI_X_VECTOR_TX 0x1 623#define NV_MSI_X_VECTOR_OTHER 0x2 624 625/* statistics */ 626struct nv_ethtool_str { 627 char name[ETH_GSTRING_LEN]; 628}; 629 630static const struct nv_ethtool_str nv_estats_str[] = { 631 { "tx_bytes" }, 632 { "tx_zero_rexmt" }, 633 { "tx_one_rexmt" }, 634 { "tx_many_rexmt" }, 635 { "tx_late_collision" }, 636 { "tx_fifo_errors" }, 637 { "tx_carrier_errors" }, 638 { "tx_excess_deferral" }, 639 { "tx_retry_error" }, 640 { "rx_frame_error" }, 641 { "rx_extra_byte" }, 642 { "rx_late_collision" }, 643 { "rx_runt" }, 644 { "rx_frame_too_long" }, 645 { "rx_over_errors" }, 646 { "rx_crc_errors" }, 647 { "rx_frame_align_error" }, 648 { "rx_length_error" }, 649 { "rx_unicast" }, 650 { "rx_multicast" }, 651 { "rx_broadcast" }, 652 { "rx_packets" }, 653 { "rx_errors_total" }, 654 { "tx_errors_total" }, 655 656 /* version 2 stats */ 657 { "tx_deferral" }, 658 { "tx_packets" }, 659 { "rx_bytes" }, 660 { "tx_pause" }, 661 { "rx_pause" }, 662 { "rx_drop_frame" } 663}; 664 665struct nv_ethtool_stats { 666 u64 tx_bytes; 667 u64 tx_zero_rexmt; 668 u64 tx_one_rexmt; 669 u64 tx_many_rexmt; 670 u64 tx_late_collision; 671 u64 tx_fifo_errors; 672 u64 tx_carrier_errors; 673 u64 tx_excess_deferral; 674 u64 tx_retry_error; 675 u64 rx_frame_error; 676 u64 rx_extra_byte; 677 u64 rx_late_collision; 678 u64 rx_runt; 679 u64 rx_frame_too_long; 680 u64 rx_over_errors; 681 u64 rx_crc_errors; 682 u64 rx_frame_align_error; 683 u64 rx_length_error; 684 u64 rx_unicast; 685 u64 rx_multicast; 686 u64 rx_broadcast; 687 u64 rx_packets; 688 u64 rx_errors_total; 689 u64 tx_errors_total; 690 691 /* version 2 stats */ 692 u64 tx_deferral; 693 u64 tx_packets; 694 u64 rx_bytes; 695 u64 tx_pause; 696 u64 rx_pause; 697 u64 rx_drop_frame; 698}; 699 700#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 701#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 702 703/* diagnostics */ 704#define NV_TEST_COUNT_BASE 3 705#define NV_TEST_COUNT_EXTENDED 4 706 707static const struct nv_ethtool_str nv_etests_str[] = { 708 { "link (online/offline)" }, 709 { "register (offline) " }, 710 { "interrupt (offline) " }, 711 { "loopback (offline) " } 712}; 713 714struct register_test { 715 __u32 reg; 716 __u32 mask; 717}; 718 719static const struct register_test nv_registers_test[] = { 720 { NvRegUnknownSetupReg6, 0x01 }, 721 { NvRegMisc1, 0x03c }, 722 { NvRegOffloadConfig, 0x03ff }, 723 { NvRegMulticastAddrA, 0xffffffff }, 724 { NvRegTxWatermark, 0x0ff }, 725 { NvRegWakeUpFlags, 0x07777 }, 726 { 0,0 } 727}; 728 729struct nv_skb_map { 730 struct sk_buff *skb; 731 dma_addr_t dma; 732 unsigned int dma_len; 733}; 734 735/* 736 * SMP locking: 737 * All hardware access under dev->priv->lock, except the performance 738 * critical parts: 739 * - rx is (pseudo-) lockless: it relies on the single-threading provided 740 * by the arch code for interrupts. 741 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 742 * needs dev->priv->lock :-( 743 * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 744 */ 745 746/* in dev: base, irq */ 747struct fe_priv { 748 spinlock_t lock; 749 750 struct net_device *dev; 751 struct napi_struct napi; 752 753 /* General data: 754 * Locking: spin_lock(&np->lock); */ 755 struct nv_ethtool_stats estats; 756 int in_shutdown; 757 u32 linkspeed; 758 int duplex; 759 int autoneg; 760 int fixed_mode; 761 int phyaddr; 762 int wolenabled; 763 unsigned int phy_oui; 764 unsigned int phy_model; 765 u16 gigabit; 766 int intr_test; 767 int recover_error; 768 769 /* General data: RO fields */ 770 dma_addr_t ring_addr; 771 struct pci_dev *pci_dev; 772 u32 orig_mac[2]; 773 u32 irqmask; 774 u32 desc_ver; 775 u32 txrxctl_bits; 776 u32 vlanctl_bits; 777 u32 driver_data; 778 u32 register_size; 779 int rx_csum; 780 u32 mac_in_use; 781 782 void __iomem *base; 783 784 /* rx specific fields. 785 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 786 */ 787 union ring_type get_rx, put_rx, first_rx, last_rx; 788 struct nv_skb_map *get_rx_ctx, *put_rx_ctx; 789 struct nv_skb_map *first_rx_ctx, *last_rx_ctx; 790 struct nv_skb_map *rx_skb; 791 792 union ring_type rx_ring; 793 unsigned int rx_buf_sz; 794 unsigned int pkt_limit; 795 struct timer_list oom_kick; 796 struct timer_list nic_poll; 797 struct timer_list stats_poll; 798 u32 nic_poll_irq; 799 int rx_ring_size; 800 801 /* media detection workaround. 802 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 803 */ 804 int need_linktimer; 805 unsigned long link_timeout; 806 /* 807 * tx specific fields. 808 */ 809 union ring_type get_tx, put_tx, first_tx, last_tx; 810 struct nv_skb_map *get_tx_ctx, *put_tx_ctx; 811 struct nv_skb_map *first_tx_ctx, *last_tx_ctx; 812 struct nv_skb_map *tx_skb; 813 814 union ring_type tx_ring; 815 u32 tx_flags; 816 int tx_ring_size; 817 int tx_stop; 818 819 /* vlan fields */ 820 struct vlan_group *vlangrp; 821 822 /* msi/msi-x fields */ 823 u32 msi_flags; 824 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; 825 826 /* flow control */ 827 u32 pause_flags; 828}; 829 830/* 831 * Maximum number of loops until we assume that a bit in the irq mask 832 * is stuck. Overridable with module param. 833 */ 834static int max_interrupt_work = 5; 835 836/* 837 * Optimization can be either throuput mode or cpu mode 838 * 839 * Throughput Mode: Every tx and rx packet will generate an interrupt. 840 * CPU Mode: Interrupts are controlled by a timer. 841 */ 842enum { 843 NV_OPTIMIZATION_MODE_THROUGHPUT, 844 NV_OPTIMIZATION_MODE_CPU 845}; 846static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; 847 848/* 849 * Poll interval for timer irq 850 * 851 * This interval determines how frequent an interrupt is generated. 852 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] 853 * Min = 0, and Max = 65535 854 */ 855static int poll_interval = -1; 856 857/* 858 * MSI interrupts 859 */ 860enum { 861 NV_MSI_INT_DISABLED, 862 NV_MSI_INT_ENABLED 863}; 864static int msi = NV_MSI_INT_ENABLED; 865 866/* 867 * MSIX interrupts 868 */ 869enum { 870 NV_MSIX_INT_DISABLED, 871 NV_MSIX_INT_ENABLED 872}; 873static int msix = NV_MSIX_INT_DISABLED; 874 875/* 876 * DMA 64bit 877 */ 878enum { 879 NV_DMA_64BIT_DISABLED, 880 NV_DMA_64BIT_ENABLED 881}; 882static int dma_64bit = NV_DMA_64BIT_ENABLED; 883 884static inline struct fe_priv *get_nvpriv(struct net_device *dev) 885{ 886 return netdev_priv(dev); 887} 888 889static inline u8 __iomem *get_hwbase(struct net_device *dev) 890{ 891 return ((struct fe_priv *)netdev_priv(dev))->base; 892} 893 894static inline void pci_push(u8 __iomem *base) 895{ 896 /* force out pending posted writes */ 897 readl(base); 898} 899 900static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 901{ 902 return le32_to_cpu(prd->flaglen) 903 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 904} 905 906static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 907{ 908 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; 909} 910 911static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 912 int delay, int delaymax, const char *msg) 913{ 914 u8 __iomem *base = get_hwbase(dev); 915 916 pci_push(base); 917 do { 918 udelay(delay); 919 delaymax -= delay; 920 if (delaymax < 0) { 921 if (msg) 922 printk(msg); 923 return 1; 924 } 925 } while ((readl(base + offset) & mask) != target); 926 return 0; 927} 928 929#define NV_SETUP_RX_RING 0x01 930#define NV_SETUP_TX_RING 0x02 931 932static inline u32 dma_low(dma_addr_t addr) 933{ 934 return addr; 935} 936 937static inline u32 dma_high(dma_addr_t addr) 938{ 939 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */ 940} 941 942static void setup_hw_rings(struct net_device *dev, int rxtx_flags) 943{ 944 struct fe_priv *np = get_nvpriv(dev); 945 u8 __iomem *base = get_hwbase(dev); 946 947 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 948 if (rxtx_flags & NV_SETUP_RX_RING) { 949 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 950 } 951 if (rxtx_flags & NV_SETUP_TX_RING) { 952 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 953 } 954 } else { 955 if (rxtx_flags & NV_SETUP_RX_RING) { 956 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 957 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); 958 } 959 if (rxtx_flags & NV_SETUP_TX_RING) { 960 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); 961 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh); 962 } 963 } 964} 965 966static void free_rings(struct net_device *dev) 967{ 968 struct fe_priv *np = get_nvpriv(dev); 969 970 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 971 if (np->rx_ring.orig) 972 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 973 np->rx_ring.orig, np->ring_addr); 974 } else { 975 if (np->rx_ring.ex) 976 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 977 np->rx_ring.ex, np->ring_addr); 978 } 979 if (np->rx_skb) 980 kfree(np->rx_skb); 981 if (np->tx_skb) 982 kfree(np->tx_skb); 983} 984 985static int using_multi_irqs(struct net_device *dev) 986{ 987 struct fe_priv *np = get_nvpriv(dev); 988 989 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 990 ((np->msi_flags & NV_MSI_X_ENABLED) && 991 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) 992 return 0; 993 else 994 return 1; 995} 996 997static void nv_enable_irq(struct net_device *dev) 998{ 999 struct fe_priv *np = get_nvpriv(dev); 1000 1001 if (!using_multi_irqs(dev)) { 1002 if (np->msi_flags & NV_MSI_X_ENABLED) 1003 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1004 else 1005 enable_irq(np->pci_dev->irq); 1006 } else { 1007 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1008 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1009 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1010 } 1011} 1012 1013static void nv_disable_irq(struct net_device *dev) 1014{ 1015 struct fe_priv *np = get_nvpriv(dev); 1016 1017 if (!using_multi_irqs(dev)) { 1018 if (np->msi_flags & NV_MSI_X_ENABLED) 1019 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1020 else 1021 disable_irq(np->pci_dev->irq); 1022 } else { 1023 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1024 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1025 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1026 } 1027} 1028 1029/* In MSIX mode, a write to irqmask behaves as XOR */ 1030static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) 1031{ 1032 u8 __iomem *base = get_hwbase(dev); 1033 1034 writel(mask, base + NvRegIrqMask); 1035} 1036 1037static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) 1038{ 1039 struct fe_priv *np = get_nvpriv(dev); 1040 u8 __iomem *base = get_hwbase(dev); 1041 1042 if (np->msi_flags & NV_MSI_X_ENABLED) { 1043 writel(mask, base + NvRegIrqMask); 1044 } else { 1045 if (np->msi_flags & NV_MSI_ENABLED) 1046 writel(0, base + NvRegMSIIrqMask); 1047 writel(0, base + NvRegIrqMask); 1048 } 1049} 1050 1051#define MII_READ (-1) 1052/* mii_rw: read/write a register on the PHY. 1053 * 1054 * Caller must guarantee serialization 1055 */ 1056static int mii_rw(struct net_device *dev, int addr, int miireg, int value) 1057{ 1058 u8 __iomem *base = get_hwbase(dev); 1059 u32 reg; 1060 int retval; 1061 1062 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 1063 1064 reg = readl(base + NvRegMIIControl); 1065 if (reg & NVREG_MIICTL_INUSE) { 1066 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); 1067 udelay(NV_MIIBUSY_DELAY); 1068 } 1069 1070 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; 1071 if (value != MII_READ) { 1072 writel(value, base + NvRegMIIData); 1073 reg |= NVREG_MIICTL_WRITE; 1074 } 1075 writel(reg, base + NvRegMIIControl); 1076 1077 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1078 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { 1079 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", 1080 dev->name, miireg, addr); 1081 retval = -1; 1082 } else if (value != MII_READ) { 1083 /* it was a write operation - fewer failures are detectable */ 1084 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", 1085 dev->name, value, miireg, addr); 1086 retval = 0; 1087 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1088 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", 1089 dev->name, miireg, addr); 1090 retval = -1; 1091 } else { 1092 retval = readl(base + NvRegMIIData); 1093 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", 1094 dev->name, miireg, addr, retval); 1095 } 1096 1097 return retval; 1098} 1099 1100static int phy_reset(struct net_device *dev, u32 bmcr_setup) 1101{ 1102 struct fe_priv *np = netdev_priv(dev); 1103 u32 miicontrol; 1104 unsigned int tries = 0; 1105 1106 miicontrol = BMCR_RESET | bmcr_setup; 1107 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1108 return -1; 1109 } 1110 1111 /* wait for 500ms */ 1112 msleep(500); 1113 1114 /* must wait till reset is deasserted */ 1115 while (miicontrol & BMCR_RESET) { 1116 msleep(10); 1117 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1118 /* FIXME: 100 tries seem excessive */ 1119 if (tries++ > 100) 1120 return -1; 1121 } 1122 return 0; 1123} 1124 1125static int phy_init(struct net_device *dev) 1126{ 1127 struct fe_priv *np = get_nvpriv(dev); 1128 u8 __iomem *base = get_hwbase(dev); 1129 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1130 1131 /* phy errata for E3016 phy */ 1132 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1133 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1134 reg &= ~PHY_MARVELL_E3016_INITMASK; 1135 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { 1136 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); 1137 return PHY_ERROR; 1138 } 1139 } 1140 if (np->phy_oui == PHY_OUI_REALTEK) { 1141 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1142 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1143 return PHY_ERROR; 1144 } 1145 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1146 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1147 return PHY_ERROR; 1148 } 1149 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1150 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1151 return PHY_ERROR; 1152 } 1153 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1154 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1155 return PHY_ERROR; 1156 } 1157 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1158 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1159 return PHY_ERROR; 1160 } 1161 } 1162 1163 /* set advertise register */ 1164 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1165 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1166 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1167 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); 1168 return PHY_ERROR; 1169 } 1170 1171 /* get phy interface type */ 1172 phyinterface = readl(base + NvRegPhyInterface); 1173 1174 /* see if gigabit phy */ 1175 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1176 if (mii_status & PHY_GIGABIT) { 1177 np->gigabit = PHY_GIGABIT; 1178 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 1179 mii_control_1000 &= ~ADVERTISE_1000HALF; 1180 if (phyinterface & PHY_RGMII) 1181 mii_control_1000 |= ADVERTISE_1000FULL; 1182 else 1183 mii_control_1000 &= ~ADVERTISE_1000FULL; 1184 1185 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1186 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1187 return PHY_ERROR; 1188 } 1189 } 1190 else 1191 np->gigabit = 0; 1192 1193 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1194 mii_control |= BMCR_ANENABLE; 1195 1196 /* reset the phy 1197 * (certain phys need bmcr to be setup with reset) 1198 */ 1199 if (phy_reset(dev, mii_control)) { 1200 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1201 return PHY_ERROR; 1202 } 1203 1204 /* phy vendor specific configuration */ 1205 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1206 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1207 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1208 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1209 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { 1210 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1211 return PHY_ERROR; 1212 } 1213 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1214 phy_reserved |= PHY_CICADA_INIT5; 1215 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { 1216 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1217 return PHY_ERROR; 1218 } 1219 } 1220 if (np->phy_oui == PHY_OUI_CICADA) { 1221 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); 1222 phy_reserved |= PHY_CICADA_INIT6; 1223 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { 1224 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1225 return PHY_ERROR; 1226 } 1227 } 1228 if (np->phy_oui == PHY_OUI_VITESSE) { 1229 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) { 1230 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1231 return PHY_ERROR; 1232 } 1233 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) { 1234 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1235 return PHY_ERROR; 1236 } 1237 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1238 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1239 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1240 return PHY_ERROR; 1241 } 1242 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1243 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1244 phy_reserved |= PHY_VITESSE_INIT3; 1245 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1246 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1247 return PHY_ERROR; 1248 } 1249 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) { 1250 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1251 return PHY_ERROR; 1252 } 1253 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) { 1254 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1255 return PHY_ERROR; 1256 } 1257 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1258 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1259 phy_reserved |= PHY_VITESSE_INIT3; 1260 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1261 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1262 return PHY_ERROR; 1263 } 1264 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1265 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1266 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1267 return PHY_ERROR; 1268 } 1269 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { 1270 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1271 return PHY_ERROR; 1272 } 1273 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { 1274 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1275 return PHY_ERROR; 1276 } 1277 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1278 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1279 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1280 return PHY_ERROR; 1281 } 1282 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1283 phy_reserved &= ~PHY_VITESSE_INIT_MSK2; 1284 phy_reserved |= PHY_VITESSE_INIT8; 1285 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1286 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1287 return PHY_ERROR; 1288 } 1289 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) { 1290 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1291 return PHY_ERROR; 1292 } 1293 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) { 1294 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1295 return PHY_ERROR; 1296 } 1297 } 1298 if (np->phy_oui == PHY_OUI_REALTEK) { 1299 /* reset could have cleared these out, set them back */ 1300 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1301 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1302 return PHY_ERROR; 1303 } 1304 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1305 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1306 return PHY_ERROR; 1307 } 1308 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1309 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1310 return PHY_ERROR; 1311 } 1312 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1313 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1314 return PHY_ERROR; 1315 } 1316 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1317 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1318 return PHY_ERROR; 1319 } 1320 } 1321 1322 /* some phys clear out pause advertisment on reset, set it back */ 1323 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); 1324 1325 /* restart auto negotiation */ 1326 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1327 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1328 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1329 return PHY_ERROR; 1330 } 1331 1332 return 0; 1333} 1334 1335static void nv_start_rx(struct net_device *dev) 1336{ 1337 struct fe_priv *np = netdev_priv(dev); 1338 u8 __iomem *base = get_hwbase(dev); 1339 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1340 1341 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 1342 /* Already running? Stop it. */ 1343 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1344 rx_ctrl &= ~NVREG_RCVCTL_START; 1345 writel(rx_ctrl, base + NvRegReceiverControl); 1346 pci_push(base); 1347 } 1348 writel(np->linkspeed, base + NvRegLinkSpeed); 1349 pci_push(base); 1350 rx_ctrl |= NVREG_RCVCTL_START; 1351 if (np->mac_in_use) 1352 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1353 writel(rx_ctrl, base + NvRegReceiverControl); 1354 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1355 dev->name, np->duplex, np->linkspeed); 1356 pci_push(base); 1357} 1358 1359static void nv_stop_rx(struct net_device *dev) 1360{ 1361 struct fe_priv *np = netdev_priv(dev); 1362 u8 __iomem *base = get_hwbase(dev); 1363 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1364 1365 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 1366 if (!np->mac_in_use) 1367 rx_ctrl &= ~NVREG_RCVCTL_START; 1368 else 1369 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1370 writel(rx_ctrl, base + NvRegReceiverControl); 1371 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1372 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1373 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1374 1375 udelay(NV_RXSTOP_DELAY2); 1376 if (!np->mac_in_use) 1377 writel(0, base + NvRegLinkSpeed); 1378} 1379 1380static void nv_start_tx(struct net_device *dev) 1381{ 1382 struct fe_priv *np = netdev_priv(dev); 1383 u8 __iomem *base = get_hwbase(dev); 1384 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1385 1386 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 1387 tx_ctrl |= NVREG_XMITCTL_START; 1388 if (np->mac_in_use) 1389 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1390 writel(tx_ctrl, base + NvRegTransmitterControl); 1391 pci_push(base); 1392} 1393 1394static void nv_stop_tx(struct net_device *dev) 1395{ 1396 struct fe_priv *np = netdev_priv(dev); 1397 u8 __iomem *base = get_hwbase(dev); 1398 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1399 1400 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 1401 if (!np->mac_in_use) 1402 tx_ctrl &= ~NVREG_XMITCTL_START; 1403 else 1404 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1405 writel(tx_ctrl, base + NvRegTransmitterControl); 1406 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1407 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1408 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1409 1410 udelay(NV_TXSTOP_DELAY2); 1411 if (!np->mac_in_use) 1412 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, 1413 base + NvRegTransmitPoll); 1414} 1415 1416static void nv_txrx_reset(struct net_device *dev) 1417{ 1418 struct fe_priv *np = netdev_priv(dev); 1419 u8 __iomem *base = get_hwbase(dev); 1420 1421 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); 1422 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1423 pci_push(base); 1424 udelay(NV_TXRX_RESET_DELAY); 1425 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1426 pci_push(base); 1427} 1428 1429static void nv_mac_reset(struct net_device *dev) 1430{ 1431 struct fe_priv *np = netdev_priv(dev); 1432 u8 __iomem *base = get_hwbase(dev); 1433 1434 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); 1435 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1436 pci_push(base); 1437 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); 1438 pci_push(base); 1439 udelay(NV_MAC_RESET_DELAY); 1440 writel(0, base + NvRegMacReset); 1441 pci_push(base); 1442 udelay(NV_MAC_RESET_DELAY); 1443 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1444 pci_push(base); 1445} 1446 1447static void nv_get_hw_stats(struct net_device *dev) 1448{ 1449 struct fe_priv *np = netdev_priv(dev); 1450 u8 __iomem *base = get_hwbase(dev); 1451 1452 np->estats.tx_bytes += readl(base + NvRegTxCnt); 1453 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); 1454 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); 1455 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); 1456 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); 1457 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); 1458 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); 1459 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); 1460 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); 1461 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); 1462 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); 1463 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); 1464 np->estats.rx_runt += readl(base + NvRegRxRunt); 1465 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); 1466 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); 1467 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); 1468 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); 1469 np->estats.rx_length_error += readl(base + NvRegRxLenErr); 1470 np->estats.rx_unicast += readl(base + NvRegRxUnicast); 1471 np->estats.rx_multicast += readl(base + NvRegRxMulticast); 1472 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); 1473 np->estats.rx_packets = 1474 np->estats.rx_unicast + 1475 np->estats.rx_multicast + 1476 np->estats.rx_broadcast; 1477 np->estats.rx_errors_total = 1478 np->estats.rx_crc_errors + 1479 np->estats.rx_over_errors + 1480 np->estats.rx_frame_error + 1481 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + 1482 np->estats.rx_late_collision + 1483 np->estats.rx_runt + 1484 np->estats.rx_frame_too_long; 1485 np->estats.tx_errors_total = 1486 np->estats.tx_late_collision + 1487 np->estats.tx_fifo_errors + 1488 np->estats.tx_carrier_errors + 1489 np->estats.tx_excess_deferral + 1490 np->estats.tx_retry_error; 1491 1492 if (np->driver_data & DEV_HAS_STATISTICS_V2) { 1493 np->estats.tx_deferral += readl(base + NvRegTxDef); 1494 np->estats.tx_packets += readl(base + NvRegTxFrame); 1495 np->estats.rx_bytes += readl(base + NvRegRxCnt); 1496 np->estats.tx_pause += readl(base + NvRegTxPause); 1497 np->estats.rx_pause += readl(base + NvRegRxPause); 1498 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1499 } 1500} 1501 1502/* 1503 * nv_get_stats: dev->get_stats function 1504 * Get latest stats value from the nic. 1505 * Called with read_lock(&dev_base_lock) held for read - 1506 * only synchronized against unregister_netdevice. 1507 */ 1508static struct net_device_stats *nv_get_stats(struct net_device *dev) 1509{ 1510 struct fe_priv *np = netdev_priv(dev); 1511 1512 /* If the nic supports hw counters then retrieve latest values */ 1513 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) { 1514 nv_get_hw_stats(dev); 1515 1516 /* copy to net_device stats */ 1517 dev->stats.tx_bytes = np->estats.tx_bytes; 1518 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1519 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1520 dev->stats.rx_crc_errors = np->estats.rx_crc_errors; 1521 dev->stats.rx_over_errors = np->estats.rx_over_errors; 1522 dev->stats.rx_errors = np->estats.rx_errors_total; 1523 dev->stats.tx_errors = np->estats.tx_errors_total; 1524 } 1525 1526 return &dev->stats; 1527} 1528 1529/* 1530 * nv_alloc_rx: fill rx ring entries. 1531 * Return 1 if the allocations for the skbs failed and the 1532 * rx engine is without Available descriptors 1533 */ 1534static int nv_alloc_rx(struct net_device *dev) 1535{ 1536 struct fe_priv *np = netdev_priv(dev); 1537 struct ring_desc* less_rx; 1538 1539 less_rx = np->get_rx.orig; 1540 if (less_rx-- == np->first_rx.orig) 1541 less_rx = np->last_rx.orig; 1542 1543 while (np->put_rx.orig != less_rx) { 1544 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1545 if (skb) { 1546 np->put_rx_ctx->skb = skb; 1547 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1548 skb->data, 1549 skb_tailroom(skb), 1550 PCI_DMA_FROMDEVICE); 1551 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1552 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); 1553 wmb(); 1554 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1555 if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) 1556 np->put_rx.orig = np->first_rx.orig; 1557 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1558 np->put_rx_ctx = np->first_rx_ctx; 1559 } else { 1560 return 1; 1561 } 1562 } 1563 return 0; 1564} 1565 1566static int nv_alloc_rx_optimized(struct net_device *dev) 1567{ 1568 struct fe_priv *np = netdev_priv(dev); 1569 struct ring_desc_ex* less_rx; 1570 1571 less_rx = np->get_rx.ex; 1572 if (less_rx-- == np->first_rx.ex) 1573 less_rx = np->last_rx.ex; 1574 1575 while (np->put_rx.ex != less_rx) { 1576 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1577 if (skb) { 1578 np->put_rx_ctx->skb = skb; 1579 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1580 skb->data, 1581 skb_tailroom(skb), 1582 PCI_DMA_FROMDEVICE); 1583 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1584 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); 1585 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); 1586 wmb(); 1587 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1588 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) 1589 np->put_rx.ex = np->first_rx.ex; 1590 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1591 np->put_rx_ctx = np->first_rx_ctx; 1592 } else { 1593 return 1; 1594 } 1595 } 1596 return 0; 1597} 1598 1599/* If rx bufs are exhausted called after 50ms to attempt to refresh */ 1600#ifdef CONFIG_FORCEDETH_NAPI 1601static void nv_do_rx_refill(unsigned long data) 1602{ 1603 struct net_device *dev = (struct net_device *) data; 1604 struct fe_priv *np = netdev_priv(dev); 1605 1606 /* Just reschedule NAPI rx processing */ 1607 netif_rx_schedule(dev, &np->napi); 1608} 1609#else 1610static void nv_do_rx_refill(unsigned long data) 1611{ 1612 struct net_device *dev = (struct net_device *) data; 1613 struct fe_priv *np = netdev_priv(dev); 1614 int retcode; 1615 1616 if (!using_multi_irqs(dev)) { 1617 if (np->msi_flags & NV_MSI_X_ENABLED) 1618 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1619 else 1620 disable_irq(np->pci_dev->irq); 1621 } else { 1622 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1623 } 1624 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1625 retcode = nv_alloc_rx(dev); 1626 else 1627 retcode = nv_alloc_rx_optimized(dev); 1628 if (retcode) { 1629 spin_lock_irq(&np->lock); 1630 if (!np->in_shutdown) 1631 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1632 spin_unlock_irq(&np->lock); 1633 } 1634 if (!using_multi_irqs(dev)) { 1635 if (np->msi_flags & NV_MSI_X_ENABLED) 1636 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1637 else 1638 enable_irq(np->pci_dev->irq); 1639 } else { 1640 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1641 } 1642} 1643#endif 1644 1645static void nv_init_rx(struct net_device *dev) 1646{ 1647 struct fe_priv *np = netdev_priv(dev); 1648 int i; 1649 np->get_rx = np->put_rx = np->first_rx = np->rx_ring; 1650 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1651 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; 1652 else 1653 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; 1654 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; 1655 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; 1656 1657 for (i = 0; i < np->rx_ring_size; i++) { 1658 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1659 np->rx_ring.orig[i].flaglen = 0; 1660 np->rx_ring.orig[i].buf = 0; 1661 } else { 1662 np->rx_ring.ex[i].flaglen = 0; 1663 np->rx_ring.ex[i].txvlan = 0; 1664 np->rx_ring.ex[i].bufhigh = 0; 1665 np->rx_ring.ex[i].buflow = 0; 1666 } 1667 np->rx_skb[i].skb = NULL; 1668 np->rx_skb[i].dma = 0; 1669 } 1670} 1671 1672static void nv_init_tx(struct net_device *dev) 1673{ 1674 struct fe_priv *np = netdev_priv(dev); 1675 int i; 1676 np->get_tx = np->put_tx = np->first_tx = np->tx_ring; 1677 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1678 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; 1679 else 1680 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; 1681 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; 1682 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; 1683 1684 for (i = 0; i < np->tx_ring_size; i++) { 1685 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1686 np->tx_ring.orig[i].flaglen = 0; 1687 np->tx_ring.orig[i].buf = 0; 1688 } else { 1689 np->tx_ring.ex[i].flaglen = 0; 1690 np->tx_ring.ex[i].txvlan = 0; 1691 np->tx_ring.ex[i].bufhigh = 0; 1692 np->tx_ring.ex[i].buflow = 0; 1693 } 1694 np->tx_skb[i].skb = NULL; 1695 np->tx_skb[i].dma = 0; 1696 } 1697} 1698 1699static int nv_init_ring(struct net_device *dev) 1700{ 1701 struct fe_priv *np = netdev_priv(dev); 1702 1703 nv_init_tx(dev); 1704 nv_init_rx(dev); 1705 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1706 return nv_alloc_rx(dev); 1707 else 1708 return nv_alloc_rx_optimized(dev); 1709} 1710 1711static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb) 1712{ 1713 struct fe_priv *np = netdev_priv(dev); 1714 1715 if (tx_skb->dma) { 1716 pci_unmap_page(np->pci_dev, tx_skb->dma, 1717 tx_skb->dma_len, 1718 PCI_DMA_TODEVICE); 1719 tx_skb->dma = 0; 1720 } 1721 if (tx_skb->skb) { 1722 dev_kfree_skb_any(tx_skb->skb); 1723 tx_skb->skb = NULL; 1724 return 1; 1725 } else { 1726 return 0; 1727 } 1728} 1729 1730static void nv_drain_tx(struct net_device *dev) 1731{ 1732 struct fe_priv *np = netdev_priv(dev); 1733 unsigned int i; 1734 1735 for (i = 0; i < np->tx_ring_size; i++) { 1736 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1737 np->tx_ring.orig[i].flaglen = 0; 1738 np->tx_ring.orig[i].buf = 0; 1739 } else { 1740 np->tx_ring.ex[i].flaglen = 0; 1741 np->tx_ring.ex[i].txvlan = 0; 1742 np->tx_ring.ex[i].bufhigh = 0; 1743 np->tx_ring.ex[i].buflow = 0; 1744 } 1745 if (nv_release_txskb(dev, &np->tx_skb[i])) 1746 dev->stats.tx_dropped++; 1747 } 1748} 1749 1750static void nv_drain_rx(struct net_device *dev) 1751{ 1752 struct fe_priv *np = netdev_priv(dev); 1753 int i; 1754 1755 for (i = 0; i < np->rx_ring_size; i++) { 1756 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1757 np->rx_ring.orig[i].flaglen = 0; 1758 np->rx_ring.orig[i].buf = 0; 1759 } else { 1760 np->rx_ring.ex[i].flaglen = 0; 1761 np->rx_ring.ex[i].txvlan = 0; 1762 np->rx_ring.ex[i].bufhigh = 0; 1763 np->rx_ring.ex[i].buflow = 0; 1764 } 1765 wmb(); 1766 if (np->rx_skb[i].skb) { 1767 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, 1768 (skb_end_pointer(np->rx_skb[i].skb) - 1769 np->rx_skb[i].skb->data), 1770 PCI_DMA_FROMDEVICE); 1771 dev_kfree_skb(np->rx_skb[i].skb); 1772 np->rx_skb[i].skb = NULL; 1773 } 1774 } 1775} 1776 1777static void drain_ring(struct net_device *dev) 1778{ 1779 nv_drain_tx(dev); 1780 nv_drain_rx(dev); 1781} 1782 1783static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) 1784{ 1785 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); 1786} 1787 1788/* 1789 * nv_start_xmit: dev->hard_start_xmit function 1790 * Called with netif_tx_lock held. 1791 */ 1792static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 1793{ 1794 struct fe_priv *np = netdev_priv(dev); 1795 u32 tx_flags = 0; 1796 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 1797 unsigned int fragments = skb_shinfo(skb)->nr_frags; 1798 unsigned int i; 1799 u32 offset = 0; 1800 u32 bcnt; 1801 u32 size = skb->len-skb->data_len; 1802 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1803 u32 empty_slots; 1804 struct ring_desc* put_tx; 1805 struct ring_desc* start_tx; 1806 struct ring_desc* prev_tx; 1807 struct nv_skb_map* prev_tx_ctx; 1808 1809 /* add fragments to entries count */ 1810 for (i = 0; i < fragments; i++) { 1811 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 1812 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1813 } 1814 1815 empty_slots = nv_get_empty_tx_slots(np); 1816 if (unlikely(empty_slots <= entries)) { 1817 spin_lock_irq(&np->lock); 1818 netif_stop_queue(dev); 1819 np->tx_stop = 1; 1820 spin_unlock_irq(&np->lock); 1821 return NETDEV_TX_BUSY; 1822 } 1823 1824 start_tx = put_tx = np->put_tx.orig; 1825 1826 /* setup the header buffer */ 1827 do { 1828 prev_tx = put_tx; 1829 prev_tx_ctx = np->put_tx_ctx; 1830 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1831 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 1832 PCI_DMA_TODEVICE); 1833 np->put_tx_ctx->dma_len = bcnt; 1834 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 1835 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1836 1837 tx_flags = np->tx_flags; 1838 offset += bcnt; 1839 size -= bcnt; 1840 if (unlikely(put_tx++ == np->last_tx.orig)) 1841 put_tx = np->first_tx.orig; 1842 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 1843 np->put_tx_ctx = np->first_tx_ctx; 1844 } while (size); 1845 1846 /* setup the fragments */ 1847 for (i = 0; i < fragments; i++) { 1848 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1849 u32 size = frag->size; 1850 offset = 0; 1851 1852 do { 1853 prev_tx = put_tx; 1854 prev_tx_ctx = np->put_tx_ctx; 1855 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1856 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 1857 PCI_DMA_TODEVICE); 1858 np->put_tx_ctx->dma_len = bcnt; 1859 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 1860 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1861 1862 offset += bcnt; 1863 size -= bcnt; 1864 if (unlikely(put_tx++ == np->last_tx.orig)) 1865 put_tx = np->first_tx.orig; 1866 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 1867 np->put_tx_ctx = np->first_tx_ctx; 1868 } while (size); 1869 } 1870 1871 /* set last fragment flag */ 1872 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); 1873 1874 /* save skb in this slot's context area */ 1875 prev_tx_ctx->skb = skb; 1876 1877 if (skb_is_gso(skb)) 1878 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1879 else 1880 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 1881 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 1882 1883 spin_lock_irq(&np->lock); 1884 1885 /* set tx flags */ 1886 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 1887 np->put_tx.orig = put_tx; 1888 1889 spin_unlock_irq(&np->lock); 1890 1891 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", 1892 dev->name, entries, tx_flags_extra); 1893 { 1894 int j; 1895 for (j=0; j<64; j++) { 1896 if ((j%16) == 0) 1897 dprintk("\n%03x:", j); 1898 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 1899 } 1900 dprintk("\n"); 1901 } 1902 1903 dev->trans_start = jiffies; 1904 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 1905 return NETDEV_TX_OK; 1906} 1907 1908static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) 1909{ 1910 struct fe_priv *np = netdev_priv(dev); 1911 u32 tx_flags = 0; 1912 u32 tx_flags_extra; 1913 unsigned int fragments = skb_shinfo(skb)->nr_frags; 1914 unsigned int i; 1915 u32 offset = 0; 1916 u32 bcnt; 1917 u32 size = skb->len-skb->data_len; 1918 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1919 u32 empty_slots; 1920 struct ring_desc_ex* put_tx; 1921 struct ring_desc_ex* start_tx; 1922 struct ring_desc_ex* prev_tx; 1923 struct nv_skb_map* prev_tx_ctx; 1924 1925 /* add fragments to entries count */ 1926 for (i = 0; i < fragments; i++) { 1927 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 1928 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1929 } 1930 1931 empty_slots = nv_get_empty_tx_slots(np); 1932 if (unlikely(empty_slots <= entries)) { 1933 spin_lock_irq(&np->lock); 1934 netif_stop_queue(dev); 1935 np->tx_stop = 1; 1936 spin_unlock_irq(&np->lock); 1937 return NETDEV_TX_BUSY; 1938 } 1939 1940 start_tx = put_tx = np->put_tx.ex; 1941 1942 /* setup the header buffer */ 1943 do { 1944 prev_tx = put_tx; 1945 prev_tx_ctx = np->put_tx_ctx; 1946 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1947 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 1948 PCI_DMA_TODEVICE); 1949 np->put_tx_ctx->dma_len = bcnt; 1950 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 1951 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 1952 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1953 1954 tx_flags = NV_TX2_VALID; 1955 offset += bcnt; 1956 size -= bcnt; 1957 if (unlikely(put_tx++ == np->last_tx.ex)) 1958 put_tx = np->first_tx.ex; 1959 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 1960 np->put_tx_ctx = np->first_tx_ctx; 1961 } while (size); 1962 1963 /* setup the fragments */ 1964 for (i = 0; i < fragments; i++) { 1965 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1966 u32 size = frag->size; 1967 offset = 0; 1968 1969 do { 1970 prev_tx = put_tx; 1971 prev_tx_ctx = np->put_tx_ctx; 1972 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1973 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 1974 PCI_DMA_TODEVICE); 1975 np->put_tx_ctx->dma_len = bcnt; 1976 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 1977 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 1978 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1979 1980 offset += bcnt; 1981 size -= bcnt; 1982 if (unlikely(put_tx++ == np->last_tx.ex)) 1983 put_tx = np->first_tx.ex; 1984 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 1985 np->put_tx_ctx = np->first_tx_ctx; 1986 } while (size); 1987 } 1988 1989 /* set last fragment flag */ 1990 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); 1991 1992 /* save skb in this slot's context area */ 1993 prev_tx_ctx->skb = skb; 1994 1995 if (skb_is_gso(skb)) 1996 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1997 else 1998 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 1999 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2000 2001 /* vlan tag */ 2002 if (likely(!np->vlangrp)) { 2003 start_tx->txvlan = 0; 2004 } else { 2005 if (vlan_tx_tag_present(skb)) 2006 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb)); 2007 else 2008 start_tx->txvlan = 0; 2009 } 2010 2011 spin_lock_irq(&np->lock); 2012 2013 /* set tx flags */ 2014 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2015 np->put_tx.ex = put_tx; 2016 2017 spin_unlock_irq(&np->lock); 2018 2019 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", 2020 dev->name, entries, tx_flags_extra); 2021 { 2022 int j; 2023 for (j=0; j<64; j++) { 2024 if ((j%16) == 0) 2025 dprintk("\n%03x:", j); 2026 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2027 } 2028 dprintk("\n"); 2029 } 2030 2031 dev->trans_start = jiffies; 2032 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2033 return NETDEV_TX_OK; 2034} 2035 2036/* 2037 * nv_tx_done: check for completed packets, release the skbs. 2038 * 2039 * Caller must own np->lock. 2040 */ 2041static void nv_tx_done(struct net_device *dev) 2042{ 2043 struct fe_priv *np = netdev_priv(dev); 2044 u32 flags; 2045 struct ring_desc* orig_get_tx = np->get_tx.orig; 2046 2047 while ((np->get_tx.orig != np->put_tx.orig) && 2048 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) { 2049 2050 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", 2051 dev->name, flags); 2052 2053 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, 2054 np->get_tx_ctx->dma_len, 2055 PCI_DMA_TODEVICE); 2056 np->get_tx_ctx->dma = 0; 2057 2058 if (np->desc_ver == DESC_VER_1) { 2059 if (flags & NV_TX_LASTPACKET) { 2060 if (flags & NV_TX_ERROR) { 2061 if (flags & NV_TX_UNDERFLOW) 2062 dev->stats.tx_fifo_errors++; 2063 if (flags & NV_TX_CARRIERLOST) 2064 dev->stats.tx_carrier_errors++; 2065 dev->stats.tx_errors++; 2066 } else { 2067 dev->stats.tx_packets++; 2068 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2069 } 2070 dev_kfree_skb_any(np->get_tx_ctx->skb); 2071 np->get_tx_ctx->skb = NULL; 2072 } 2073 } else { 2074 if (flags & NV_TX2_LASTPACKET) { 2075 if (flags & NV_TX2_ERROR) { 2076 if (flags & NV_TX2_UNDERFLOW) 2077 dev->stats.tx_fifo_errors++; 2078 if (flags & NV_TX2_CARRIERLOST) 2079 dev->stats.tx_carrier_errors++; 2080 dev->stats.tx_errors++; 2081 } else { 2082 dev->stats.tx_packets++; 2083 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2084 } 2085 dev_kfree_skb_any(np->get_tx_ctx->skb); 2086 np->get_tx_ctx->skb = NULL; 2087 } 2088 } 2089 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) 2090 np->get_tx.orig = np->first_tx.orig; 2091 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2092 np->get_tx_ctx = np->first_tx_ctx; 2093 } 2094 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { 2095 np->tx_stop = 0; 2096 netif_wake_queue(dev); 2097 } 2098} 2099 2100static void nv_tx_done_optimized(struct net_device *dev, int limit) 2101{ 2102 struct fe_priv *np = netdev_priv(dev); 2103 u32 flags; 2104 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 2105 2106 while ((np->get_tx.ex != np->put_tx.ex) && 2107 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) && 2108 (limit-- > 0)) { 2109 2110 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", 2111 dev->name, flags); 2112 2113 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, 2114 np->get_tx_ctx->dma_len, 2115 PCI_DMA_TODEVICE); 2116 np->get_tx_ctx->dma = 0; 2117 2118 if (flags & NV_TX2_LASTPACKET) { 2119 if (!(flags & NV_TX2_ERROR)) 2120 dev->stats.tx_packets++; 2121 dev_kfree_skb_any(np->get_tx_ctx->skb); 2122 np->get_tx_ctx->skb = NULL; 2123 } 2124 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 2125 np->get_tx.ex = np->first_tx.ex; 2126 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2127 np->get_tx_ctx = np->first_tx_ctx; 2128 } 2129 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { 2130 np->tx_stop = 0; 2131 netif_wake_queue(dev); 2132 } 2133} 2134 2135/* 2136 * nv_tx_timeout: dev->tx_timeout function 2137 * Called with netif_tx_lock held. 2138 */ 2139static void nv_tx_timeout(struct net_device *dev) 2140{ 2141 struct fe_priv *np = netdev_priv(dev); 2142 u8 __iomem *base = get_hwbase(dev); 2143 u32 status; 2144 2145 if (np->msi_flags & NV_MSI_X_ENABLED) 2146 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2147 else 2148 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2149 2150 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); 2151 2152 { 2153 int i; 2154 2155 printk(KERN_INFO "%s: Ring at %lx\n", 2156 dev->name, (unsigned long)np->ring_addr); 2157 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 2158 for (i=0;i<=np->register_size;i+= 32) { 2159 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 2160 i, 2161 readl(base + i + 0), readl(base + i + 4), 2162 readl(base + i + 8), readl(base + i + 12), 2163 readl(base + i + 16), readl(base + i + 20), 2164 readl(base + i + 24), readl(base + i + 28)); 2165 } 2166 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2167 for (i=0;i<np->tx_ring_size;i+= 4) { 2168 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 2169 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2170 i, 2171 le32_to_cpu(np->tx_ring.orig[i].buf), 2172 le32_to_cpu(np->tx_ring.orig[i].flaglen), 2173 le32_to_cpu(np->tx_ring.orig[i+1].buf), 2174 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), 2175 le32_to_cpu(np->tx_ring.orig[i+2].buf), 2176 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), 2177 le32_to_cpu(np->tx_ring.orig[i+3].buf), 2178 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); 2179 } else { 2180 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 2181 i, 2182 le32_to_cpu(np->tx_ring.ex[i].bufhigh), 2183 le32_to_cpu(np->tx_ring.ex[i].buflow), 2184 le32_to_cpu(np->tx_ring.ex[i].flaglen), 2185 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), 2186 le32_to_cpu(np->tx_ring.ex[i+1].buflow), 2187 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), 2188 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), 2189 le32_to_cpu(np->tx_ring.ex[i+2].buflow), 2190 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), 2191 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), 2192 le32_to_cpu(np->tx_ring.ex[i+3].buflow), 2193 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); 2194 } 2195 } 2196 } 2197 2198 spin_lock_irq(&np->lock); 2199 2200 /* 1) stop tx engine */ 2201 nv_stop_tx(dev); 2202 2203 /* 2) check that the packets were not sent already: */ 2204 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 2205 nv_tx_done(dev); 2206 else 2207 nv_tx_done_optimized(dev, np->tx_ring_size); 2208 2209 /* 3) if there are dead entries: clear everything */ 2210 if (np->get_tx_ctx != np->put_tx_ctx) { 2211 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 2212 nv_drain_tx(dev); 2213 nv_init_tx(dev); 2214 setup_hw_rings(dev, NV_SETUP_TX_RING); 2215 } 2216 2217 netif_wake_queue(dev); 2218 2219 /* 4) restart tx engine */ 2220 nv_start_tx(dev); 2221 spin_unlock_irq(&np->lock); 2222} 2223 2224/* 2225 * Called when the nic notices a mismatch between the actual data len on the 2226 * wire and the len indicated in the 802 header 2227 */ 2228static int nv_getlen(struct net_device *dev, void *packet, int datalen) 2229{ 2230 int hdrlen; /* length of the 802 header */ 2231 int protolen; /* length as stored in the proto field */ 2232 2233 /* 1) calculate len according to header */ 2234 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2235 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 2236 hdrlen = VLAN_HLEN; 2237 } else { 2238 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 2239 hdrlen = ETH_HLEN; 2240 } 2241 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", 2242 dev->name, datalen, protolen, hdrlen); 2243 if (protolen > ETH_DATA_LEN) 2244 return datalen; /* Value in proto field not a len, no checks possible */ 2245 2246 protolen += hdrlen; 2247 /* consistency checks: */ 2248 if (datalen > ETH_ZLEN) { 2249 if (datalen >= protolen) { 2250 /* more data on wire than in 802 header, trim of 2251 * additional data. 2252 */ 2253 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2254 dev->name, protolen); 2255 return protolen; 2256 } else { 2257 /* less data on wire than mentioned in header. 2258 * Discard the packet. 2259 */ 2260 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", 2261 dev->name); 2262 return -1; 2263 } 2264 } else { 2265 /* short packet. Accept only if 802 values are also short */ 2266 if (protolen > ETH_ZLEN) { 2267 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", 2268 dev->name); 2269 return -1; 2270 } 2271 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2272 dev->name, datalen); 2273 return datalen; 2274 } 2275} 2276 2277static int nv_rx_process(struct net_device *dev, int limit) 2278{ 2279 struct fe_priv *np = netdev_priv(dev); 2280 u32 flags; 2281 int rx_work = 0; 2282 struct sk_buff *skb; 2283 int len; 2284 2285 while((np->get_rx.orig != np->put_rx.orig) && 2286 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2287 (rx_work < limit)) { 2288 2289 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", 2290 dev->name, flags); 2291 2292 /* 2293 * the packet is for us - immediately tear down the pci mapping. 2294 * TODO: check if a prefetch of the first cacheline improves 2295 * the performance. 2296 */ 2297 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2298 np->get_rx_ctx->dma_len, 2299 PCI_DMA_FROMDEVICE); 2300 skb = np->get_rx_ctx->skb; 2301 np->get_rx_ctx->skb = NULL; 2302 2303 { 2304 int j; 2305 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2306 for (j=0; j<64; j++) { 2307 if ((j%16) == 0) 2308 dprintk("\n%03x:", j); 2309 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2310 } 2311 dprintk("\n"); 2312 } 2313 /* look at what we actually got: */ 2314 if (np->desc_ver == DESC_VER_1) { 2315 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2316 len = flags & LEN_MASK_V1; 2317 if (unlikely(flags & NV_RX_ERROR)) { 2318 if (flags & NV_RX_ERROR4) { 2319 len = nv_getlen(dev, skb->data, len); 2320 if (len < 0) { 2321 dev->stats.rx_errors++; 2322 dev_kfree_skb(skb); 2323 goto next_pkt; 2324 } 2325 } 2326 /* framing errors are soft errors */ 2327 else if (flags & NV_RX_FRAMINGERR) { 2328 if (flags & NV_RX_SUBSTRACT1) { 2329 len--; 2330 } 2331 } 2332 /* the rest are hard errors */ 2333 else { 2334 if (flags & NV_RX_MISSEDFRAME) 2335 dev->stats.rx_missed_errors++; 2336 if (flags & NV_RX_CRCERR) 2337 dev->stats.rx_crc_errors++; 2338 if (flags & NV_RX_OVERFLOW) 2339 dev->stats.rx_over_errors++; 2340 dev->stats.rx_errors++; 2341 dev_kfree_skb(skb); 2342 goto next_pkt; 2343 } 2344 } 2345 } else { 2346 dev_kfree_skb(skb); 2347 goto next_pkt; 2348 } 2349 } else { 2350 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2351 len = flags & LEN_MASK_V2; 2352 if (unlikely(flags & NV_RX2_ERROR)) { 2353 if (flags & NV_RX2_ERROR4) { 2354 len = nv_getlen(dev, skb->data, len); 2355 if (len < 0) { 2356 dev->stats.rx_errors++; 2357 dev_kfree_skb(skb); 2358 goto next_pkt; 2359 } 2360 } 2361 /* framing errors are soft errors */ 2362 else if (flags & NV_RX2_FRAMINGERR) { 2363 if (flags & NV_RX2_SUBSTRACT1) { 2364 len--; 2365 } 2366 } 2367 /* the rest are hard errors */ 2368 else { 2369 if (flags & NV_RX2_CRCERR) 2370 dev->stats.rx_crc_errors++; 2371 if (flags & NV_RX2_OVERFLOW) 2372 dev->stats.rx_over_errors++; 2373 dev->stats.rx_errors++; 2374 dev_kfree_skb(skb); 2375 goto next_pkt; 2376 } 2377 } 2378 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { 2379 skb->ip_summed = CHECKSUM_UNNECESSARY; 2380 } else { 2381 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || 2382 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { 2383 skb->ip_summed = CHECKSUM_UNNECESSARY; 2384 } 2385 } 2386 } else { 2387 dev_kfree_skb(skb); 2388 goto next_pkt; 2389 } 2390 } 2391 /* got a valid packet - forward it to the network core */ 2392 skb_put(skb, len); 2393 skb->protocol = eth_type_trans(skb, dev); 2394 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", 2395 dev->name, len, skb->protocol); 2396#ifdef CONFIG_FORCEDETH_NAPI 2397 netif_receive_skb(skb); 2398#else 2399 netif_rx(skb); 2400#endif 2401 dev->last_rx = jiffies; 2402 dev->stats.rx_packets++; 2403 dev->stats.rx_bytes += len; 2404next_pkt: 2405 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2406 np->get_rx.orig = np->first_rx.orig; 2407 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2408 np->get_rx_ctx = np->first_rx_ctx; 2409 2410 rx_work++; 2411 } 2412 2413 return rx_work; 2414} 2415 2416static int nv_rx_process_optimized(struct net_device *dev, int limit) 2417{ 2418 struct fe_priv *np = netdev_priv(dev); 2419 u32 flags; 2420 u32 vlanflags = 0; 2421 int rx_work = 0; 2422 struct sk_buff *skb; 2423 int len; 2424 2425 while((np->get_rx.ex != np->put_rx.ex) && 2426 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2427 (rx_work < limit)) { 2428 2429 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", 2430 dev->name, flags); 2431 2432 /* 2433 * the packet is for us - immediately tear down the pci mapping. 2434 * TODO: check if a prefetch of the first cacheline improves 2435 * the performance. 2436 */ 2437 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2438 np->get_rx_ctx->dma_len, 2439 PCI_DMA_FROMDEVICE); 2440 skb = np->get_rx_ctx->skb; 2441 np->get_rx_ctx->skb = NULL; 2442 2443 { 2444 int j; 2445 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2446 for (j=0; j<64; j++) { 2447 if ((j%16) == 0) 2448 dprintk("\n%03x:", j); 2449 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2450 } 2451 dprintk("\n"); 2452 } 2453 /* look at what we actually got: */ 2454 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2455 len = flags & LEN_MASK_V2; 2456 if (unlikely(flags & NV_RX2_ERROR)) { 2457 if (flags & NV_RX2_ERROR4) { 2458 len = nv_getlen(dev, skb->data, len); 2459 if (len < 0) { 2460 dev_kfree_skb(skb); 2461 goto next_pkt; 2462 } 2463 } 2464 /* framing errors are soft errors */ 2465 else if (flags & NV_RX2_FRAMINGERR) { 2466 if (flags & NV_RX2_SUBSTRACT1) { 2467 len--; 2468 } 2469 } 2470 /* the rest are hard errors */ 2471 else { 2472 dev_kfree_skb(skb); 2473 goto next_pkt; 2474 } 2475 } 2476 2477 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { 2478 skb->ip_summed = CHECKSUM_UNNECESSARY; 2479 } else { 2480 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || 2481 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { 2482 skb->ip_summed = CHECKSUM_UNNECESSARY; 2483 } 2484 } 2485 2486 /* got a valid packet - forward it to the network core */ 2487 skb_put(skb, len); 2488 skb->protocol = eth_type_trans(skb, dev); 2489 prefetch(skb->data); 2490 2491 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n", 2492 dev->name, len, skb->protocol); 2493 2494 if (likely(!np->vlangrp)) { 2495#ifdef CONFIG_FORCEDETH_NAPI 2496 netif_receive_skb(skb); 2497#else 2498 netif_rx(skb); 2499#endif 2500 } else { 2501 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2502 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2503#ifdef CONFIG_FORCEDETH_NAPI 2504 vlan_hwaccel_receive_skb(skb, np->vlangrp, 2505 vlanflags & NV_RX3_VLAN_TAG_MASK); 2506#else 2507 vlan_hwaccel_rx(skb, np->vlangrp, 2508 vlanflags & NV_RX3_VLAN_TAG_MASK); 2509#endif 2510 } else { 2511#ifdef CONFIG_FORCEDETH_NAPI 2512 netif_receive_skb(skb); 2513#else 2514 netif_rx(skb); 2515#endif 2516 } 2517 } 2518 2519 dev->last_rx = jiffies; 2520 dev->stats.rx_packets++; 2521 dev->stats.rx_bytes += len; 2522 } else { 2523 dev_kfree_skb(skb); 2524 } 2525next_pkt: 2526 if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) 2527 np->get_rx.ex = np->first_rx.ex; 2528 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2529 np->get_rx_ctx = np->first_rx_ctx; 2530 2531 rx_work++; 2532 } 2533 2534 return rx_work; 2535} 2536 2537static void set_bufsize(struct net_device *dev) 2538{ 2539 struct fe_priv *np = netdev_priv(dev); 2540 2541 if (dev->mtu <= ETH_DATA_LEN) 2542 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; 2543 else 2544 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; 2545} 2546 2547/* 2548 * nv_change_mtu: dev->change_mtu function 2549 * Called with dev_base_lock held for read. 2550 */ 2551static int nv_change_mtu(struct net_device *dev, int new_mtu) 2552{ 2553 struct fe_priv *np = netdev_priv(dev); 2554 int old_mtu; 2555 2556 if (new_mtu < 64 || new_mtu > np->pkt_limit) 2557 return -EINVAL; 2558 2559 old_mtu = dev->mtu; 2560 dev->mtu = new_mtu; 2561 2562 /* return early if the buffer sizes will not change */ 2563 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) 2564 return 0; 2565 if (old_mtu == new_mtu) 2566 return 0; 2567 2568 /* synchronized against open : rtnl_lock() held by caller */ 2569 if (netif_running(dev)) { 2570 u8 __iomem *base = get_hwbase(dev); 2571 /* 2572 * It seems that the nic preloads valid ring entries into an 2573 * internal buffer. The procedure for flushing everything is 2574 * guessed, there is probably a simpler approach. 2575 * Changing the MTU is a rare event, it shouldn't matter. 2576 */ 2577 nv_disable_irq(dev); 2578 netif_tx_lock_bh(dev); 2579 spin_lock(&np->lock); 2580 /* stop engines */ 2581 nv_stop_rx(dev); 2582 nv_stop_tx(dev); 2583 nv_txrx_reset(dev); 2584 /* drain rx queue */ 2585 nv_drain_rx(dev); 2586 nv_drain_tx(dev); 2587 /* reinit driver view of the rx queue */ 2588 set_bufsize(dev); 2589 if (nv_init_ring(dev)) { 2590 if (!np->in_shutdown) 2591 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2592 } 2593 /* reinit nic view of the rx queue */ 2594 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 2595 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 2596 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 2597 base + NvRegRingSizes); 2598 pci_push(base); 2599 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2600 pci_push(base); 2601 2602 /* restart rx engine */ 2603 nv_start_rx(dev); 2604 nv_start_tx(dev); 2605 spin_unlock(&np->lock); 2606 netif_tx_unlock_bh(dev); 2607 nv_enable_irq(dev); 2608 } 2609 return 0; 2610} 2611 2612static void nv_copy_mac_to_hw(struct net_device *dev) 2613{ 2614 u8 __iomem *base = get_hwbase(dev); 2615 u32 mac[2]; 2616 2617 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 2618 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 2619 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 2620 2621 writel(mac[0], base + NvRegMacAddrA); 2622 writel(mac[1], base + NvRegMacAddrB); 2623} 2624 2625/* 2626 * nv_set_mac_address: dev->set_mac_address function 2627 * Called with rtnl_lock() held. 2628 */ 2629static int nv_set_mac_address(struct net_device *dev, void *addr) 2630{ 2631 struct fe_priv *np = netdev_priv(dev); 2632 struct sockaddr *macaddr = (struct sockaddr*)addr; 2633 2634 if (!is_valid_ether_addr(macaddr->sa_data)) 2635 return -EADDRNOTAVAIL; 2636 2637 /* synchronized against open : rtnl_lock() held by caller */ 2638 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 2639 2640 if (netif_running(dev)) { 2641 netif_tx_lock_bh(dev); 2642 spin_lock_irq(&np->lock); 2643 2644 /* stop rx engine */ 2645 nv_stop_rx(dev); 2646 2647 /* set mac address */ 2648 nv_copy_mac_to_hw(dev); 2649 2650 /* restart rx engine */ 2651 nv_start_rx(dev); 2652 spin_unlock_irq(&np->lock); 2653 netif_tx_unlock_bh(dev); 2654 } else { 2655 nv_copy_mac_to_hw(dev); 2656 } 2657 return 0; 2658} 2659 2660/* 2661 * nv_set_multicast: dev->set_multicast function 2662 * Called with netif_tx_lock held. 2663 */ 2664static void nv_set_multicast(struct net_device *dev) 2665{ 2666 struct fe_priv *np = netdev_priv(dev); 2667 u8 __iomem *base = get_hwbase(dev); 2668 u32 addr[2]; 2669 u32 mask[2]; 2670 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; 2671 2672 memset(addr, 0, sizeof(addr)); 2673 memset(mask, 0, sizeof(mask)); 2674 2675 if (dev->flags & IFF_PROMISC) { 2676 pff |= NVREG_PFF_PROMISC; 2677 } else { 2678 pff |= NVREG_PFF_MYADDR; 2679 2680 if (dev->flags & IFF_ALLMULTI || dev->mc_list) { 2681 u32 alwaysOff[2]; 2682 u32 alwaysOn[2]; 2683 2684 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; 2685 if (dev->flags & IFF_ALLMULTI) { 2686 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 2687 } else { 2688 struct dev_mc_list *walk; 2689 2690 walk = dev->mc_list; 2691 while (walk != NULL) { 2692 u32 a, b; 2693 a = le32_to_cpu(*(__le32 *) walk->dmi_addr); 2694 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4])); 2695 alwaysOn[0] &= a; 2696 alwaysOff[0] &= ~a; 2697 alwaysOn[1] &= b; 2698 alwaysOff[1] &= ~b; 2699 walk = walk->next; 2700 } 2701 } 2702 addr[0] = alwaysOn[0]; 2703 addr[1] = alwaysOn[1]; 2704 mask[0] = alwaysOn[0] | alwaysOff[0]; 2705 mask[1] = alwaysOn[1] | alwaysOff[1]; 2706 } 2707 } 2708 addr[0] |= NVREG_MCASTADDRA_FORCE; 2709 pff |= NVREG_PFF_ALWAYS; 2710 spin_lock_irq(&np->lock); 2711 nv_stop_rx(dev); 2712 writel(addr[0], base + NvRegMulticastAddrA); 2713 writel(addr[1], base + NvRegMulticastAddrB); 2714 writel(mask[0], base + NvRegMulticastMaskA); 2715 writel(mask[1], base + NvRegMulticastMaskB); 2716 writel(pff, base + NvRegPacketFilterFlags); 2717 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", 2718 dev->name); 2719 nv_start_rx(dev); 2720 spin_unlock_irq(&np->lock); 2721} 2722 2723static void nv_update_pause(struct net_device *dev, u32 pause_flags) 2724{ 2725 struct fe_priv *np = netdev_priv(dev); 2726 u8 __iomem *base = get_hwbase(dev); 2727 2728 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); 2729 2730 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { 2731 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; 2732 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { 2733 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); 2734 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2735 } else { 2736 writel(pff, base + NvRegPacketFilterFlags); 2737 } 2738 } 2739 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 2740 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 2741 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 2742 writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame); 2743 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 2744 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2745 } else { 2746 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 2747 writel(regmisc, base + NvRegMisc1); 2748 } 2749 } 2750} 2751 2752/** 2753 * nv_update_linkspeed: Setup the MAC according to the link partner 2754 * @dev: Network device to be configured 2755 * 2756 * The function queries the PHY and checks if there is a link partner. 2757 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is 2758 * set to 10 MBit HD. 2759 * 2760 * The function returns 0 if there is no link partner and 1 if there is 2761 * a good link partner. 2762 */ 2763static int nv_update_linkspeed(struct net_device *dev) 2764{ 2765 struct fe_priv *np = netdev_priv(dev); 2766 u8 __iomem *base = get_hwbase(dev); 2767 int adv = 0; 2768 int lpa = 0; 2769 int adv_lpa, adv_pause, lpa_pause; 2770 int newls = np->linkspeed; 2771 int newdup = np->duplex; 2772 int mii_status; 2773 int retval = 0; 2774 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 2775 2776 /* BMSR_LSTATUS is latched, read it twice: 2777 * we want the current value. 2778 */ 2779 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 2780 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 2781 2782 if (!(mii_status & BMSR_LSTATUS)) { 2783 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", 2784 dev->name); 2785 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2786 newdup = 0; 2787 retval = 0; 2788 goto set_speed; 2789 } 2790 2791 if (np->autoneg == 0) { 2792 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", 2793 dev->name, np->fixed_mode); 2794 if (np->fixed_mode & LPA_100FULL) { 2795 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2796 newdup = 1; 2797 } else if (np->fixed_mode & LPA_100HALF) { 2798 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2799 newdup = 0; 2800 } else if (np->fixed_mode & LPA_10FULL) { 2801 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2802 newdup = 1; 2803 } else { 2804 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2805 newdup = 0; 2806 } 2807 retval = 1; 2808 goto set_speed; 2809 } 2810 /* check auto negotiation is complete */ 2811 if (!(mii_status & BMSR_ANEGCOMPLETE)) { 2812 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ 2813 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2814 newdup = 0; 2815 retval = 0; 2816 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); 2817 goto set_speed; 2818 } 2819 2820 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 2821 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 2822 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", 2823 dev->name, adv, lpa); 2824 2825 retval = 1; 2826 if (np->gigabit == PHY_GIGABIT) { 2827 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 2828 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); 2829 2830 if ((control_1000 & ADVERTISE_1000FULL) && 2831 (status_1000 & LPA_1000FULL)) { 2832 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", 2833 dev->name); 2834 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 2835 newdup = 1; 2836 goto set_speed; 2837 } 2838 } 2839 2840 /* FIXME: handle parallel detection properly */ 2841 adv_lpa = lpa & adv; 2842 if (adv_lpa & LPA_100FULL) { 2843 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2844 newdup = 1; 2845 } else if (adv_lpa & LPA_100HALF) { 2846 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2847 newdup = 0; 2848 } else if (adv_lpa & LPA_10FULL) { 2849 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2850 newdup = 1; 2851 } else if (adv_lpa & LPA_10HALF) { 2852 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2853 newdup = 0; 2854 } else { 2855 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); 2856 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2857 newdup = 0; 2858 } 2859 2860set_speed: 2861 if (np->duplex == newdup && np->linkspeed == newls) 2862 return retval; 2863 2864 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", 2865 dev->name, np->linkspeed, np->duplex, newls, newdup); 2866 2867 np->duplex = newdup; 2868 np->linkspeed = newls; 2869 2870 if (np->gigabit == PHY_GIGABIT) { 2871 phyreg = readl(base + NvRegRandomSeed); 2872 phyreg &= ~(0x3FF00); 2873 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) 2874 phyreg |= NVREG_RNDSEED_FORCE3; 2875 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) 2876 phyreg |= NVREG_RNDSEED_FORCE2; 2877 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 2878 phyreg |= NVREG_RNDSEED_FORCE; 2879 writel(phyreg, base + NvRegRandomSeed); 2880 } 2881 2882 phyreg = readl(base + NvRegPhyInterface); 2883 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 2884 if (np->duplex == 0) 2885 phyreg |= PHY_HALF; 2886 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 2887 phyreg |= PHY_100; 2888 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2889 phyreg |= PHY_1000; 2890 writel(phyreg, base + NvRegPhyInterface); 2891 2892 if (phyreg & PHY_RGMII) { 2893 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2894 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 2895 else 2896 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 2897 } else { 2898 txreg = NVREG_TX_DEFERRAL_DEFAULT; 2899 } 2900 writel(txreg, base + NvRegTxDeferral); 2901 2902 if (np->desc_ver == DESC_VER_1) { 2903 txreg = NVREG_TX_WM_DESC1_DEFAULT; 2904 } else { 2905 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2906 txreg = NVREG_TX_WM_DESC2_3_1000; 2907 else 2908 txreg = NVREG_TX_WM_DESC2_3_DEFAULT; 2909 } 2910 writel(txreg, base + NvRegTxWatermark); 2911 2912 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 2913 base + NvRegMisc1); 2914 pci_push(base); 2915 writel(np->linkspeed, base + NvRegLinkSpeed); 2916 pci_push(base); 2917 2918 pause_flags = 0; 2919 /* setup pause frame */ 2920 if (np->duplex != 0) { 2921 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 2922 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 2923 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 2924 2925 switch (adv_pause) { 2926 case ADVERTISE_PAUSE_CAP: 2927 if (lpa_pause & LPA_PAUSE_CAP) { 2928 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2929 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2930 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2931 } 2932 break; 2933 case ADVERTISE_PAUSE_ASYM: 2934 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 2935 { 2936 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2937 } 2938 break; 2939 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: 2940 if (lpa_pause & LPA_PAUSE_CAP) 2941 { 2942 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2943 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2944 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2945 } 2946 if (lpa_pause == LPA_PAUSE_ASYM) 2947 { 2948 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2949 } 2950 break; 2951 } 2952 } else { 2953 pause_flags = np->pause_flags; 2954 } 2955 } 2956 nv_update_pause(dev, pause_flags); 2957 2958 return retval; 2959} 2960 2961static void nv_linkchange(struct net_device *dev) 2962{ 2963 if (nv_update_linkspeed(dev)) { 2964 if (!netif_carrier_ok(dev)) { 2965 netif_carrier_on(dev); 2966 printk(KERN_INFO "%s: link up.\n", dev->name); 2967 nv_start_rx(dev); 2968 } 2969 } else { 2970 if (netif_carrier_ok(dev)) { 2971 netif_carrier_off(dev); 2972 printk(KERN_INFO "%s: link down.\n", dev->name); 2973 nv_stop_rx(dev); 2974 } 2975 } 2976} 2977 2978static void nv_link_irq(struct net_device *dev) 2979{ 2980 u8 __iomem *base = get_hwbase(dev); 2981 u32 miistat; 2982 2983 miistat = readl(base + NvRegMIIStatus); 2984 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 2985 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); 2986 2987 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 2988 nv_linkchange(dev); 2989 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); 2990} 2991 2992static irqreturn_t nv_nic_irq(int foo, void *data) 2993{ 2994 struct net_device *dev = (struct net_device *) data; 2995 struct fe_priv *np = netdev_priv(dev); 2996 u8 __iomem *base = get_hwbase(dev); 2997 u32 events; 2998 int i; 2999 3000 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 3001 3002 for (i=0; ; i++) { 3003 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3004 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3005 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3006 } else { 3007 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3008 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 3009 } 3010 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3011 if (!(events & np->irqmask)) 3012 break; 3013 3014 spin_lock(&np->lock); 3015 nv_tx_done(dev); 3016 spin_unlock(&np->lock); 3017 3018#ifdef CONFIG_FORCEDETH_NAPI 3019 if (events & NVREG_IRQ_RX_ALL) { 3020 netif_rx_schedule(dev, &np->napi); 3021 3022 /* Disable furthur receive irq's */ 3023 spin_lock(&np->lock); 3024 np->irqmask &= ~NVREG_IRQ_RX_ALL; 3025 3026 if (np->msi_flags & NV_MSI_X_ENABLED) 3027 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3028 else 3029 writel(np->irqmask, base + NvRegIrqMask); 3030 spin_unlock(&np->lock); 3031 } 3032#else 3033 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) { 3034 if (unlikely(nv_alloc_rx(dev))) { 3035 spin_lock(&np->lock); 3036 if (!np->in_shutdown) 3037 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3038 spin_unlock(&np->lock); 3039 } 3040 } 3041#endif 3042 if (unlikely(events & NVREG_IRQ_LINK)) { 3043 spin_lock(&np->lock); 3044 nv_link_irq(dev); 3045 spin_unlock(&np->lock); 3046 } 3047 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3048 spin_lock(&np->lock); 3049 nv_linkchange(dev); 3050 spin_unlock(&np->lock); 3051 np->link_timeout = jiffies + LINK_TIMEOUT; 3052 } 3053 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 3054 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3055 dev->name, events); 3056 } 3057 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { 3058 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3059 dev->name, events); 3060 } 3061 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { 3062 spin_lock(&np->lock); 3063 /* disable interrupts on the nic */ 3064 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3065 writel(0, base + NvRegIrqMask); 3066 else 3067 writel(np->irqmask, base + NvRegIrqMask); 3068 pci_push(base); 3069 3070 if (!np->in_shutdown) { 3071 np->nic_poll_irq = np->irqmask; 3072 np->recover_error = 1; 3073 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3074 } 3075 spin_unlock(&np->lock); 3076 break; 3077 } 3078 if (unlikely(i > max_interrupt_work)) { 3079 spin_lock(&np->lock); 3080 /* disable interrupts on the nic */ 3081 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3082 writel(0, base + NvRegIrqMask); 3083 else 3084 writel(np->irqmask, base + NvRegIrqMask); 3085 pci_push(base); 3086 3087 if (!np->in_shutdown) { 3088 np->nic_poll_irq = np->irqmask; 3089 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3090 } 3091 spin_unlock(&np->lock); 3092 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 3093 break; 3094 } 3095 3096 } 3097 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3098 3099 return IRQ_RETVAL(i); 3100} 3101 3102/** 3103 * All _optimized functions are used to help increase performance 3104 * (reduce CPU and increase throughput). They use descripter version 3, 3105 * compiler directives, and reduce memory accesses. 3106 */ 3107static irqreturn_t nv_nic_irq_optimized(int foo, void *data) 3108{ 3109 struct net_device *dev = (struct net_device *) data; 3110 struct fe_priv *np = netdev_priv(dev); 3111 u8 __iomem *base = get_hwbase(dev); 3112 u32 events; 3113 int i; 3114 3115 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); 3116 3117 for (i=0; ; i++) { 3118 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3119 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3120 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3121 } else { 3122 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3123 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 3124 } 3125 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3126 if (!(events & np->irqmask)) 3127 break; 3128 3129 spin_lock(&np->lock); 3130 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3131 spin_unlock(&np->lock); 3132 3133#ifdef CONFIG_FORCEDETH_NAPI 3134 if (events & NVREG_IRQ_RX_ALL) { 3135 netif_rx_schedule(dev, &np->napi); 3136 3137 /* Disable furthur receive irq's */ 3138 spin_lock(&np->lock); 3139 np->irqmask &= ~NVREG_IRQ_RX_ALL; 3140 3141 if (np->msi_flags & NV_MSI_X_ENABLED) 3142 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3143 else 3144 writel(np->irqmask, base + NvRegIrqMask); 3145 spin_unlock(&np->lock); 3146 } 3147#else 3148 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3149 if (unlikely(nv_alloc_rx_optimized(dev))) { 3150 spin_lock(&np->lock); 3151 if (!np->in_shutdown) 3152 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3153 spin_unlock(&np->lock); 3154 } 3155 } 3156#endif 3157 if (unlikely(events & NVREG_IRQ_LINK)) { 3158 spin_lock(&np->lock); 3159 nv_link_irq(dev); 3160 spin_unlock(&np->lock); 3161 } 3162 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3163 spin_lock(&np->lock); 3164 nv_linkchange(dev); 3165 spin_unlock(&np->lock); 3166 np->link_timeout = jiffies + LINK_TIMEOUT; 3167 } 3168 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 3169 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3170 dev->name, events); 3171 } 3172 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { 3173 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3174 dev->name, events); 3175 } 3176 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { 3177 spin_lock(&np->lock); 3178 /* disable interrupts on the nic */ 3179 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3180 writel(0, base + NvRegIrqMask); 3181 else 3182 writel(np->irqmask, base + NvRegIrqMask); 3183 pci_push(base); 3184 3185 if (!np->in_shutdown) { 3186 np->nic_poll_irq = np->irqmask; 3187 np->recover_error = 1; 3188 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3189 } 3190 spin_unlock(&np->lock); 3191 break; 3192 } 3193 3194 if (unlikely(i > max_interrupt_work)) { 3195 spin_lock(&np->lock); 3196 /* disable interrupts on the nic */ 3197 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3198 writel(0, base + NvRegIrqMask); 3199 else 3200 writel(np->irqmask, base + NvRegIrqMask); 3201 pci_push(base); 3202 3203 if (!np->in_shutdown) { 3204 np->nic_poll_irq = np->irqmask; 3205 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3206 } 3207 spin_unlock(&np->lock); 3208 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 3209 break; 3210 } 3211 3212 } 3213 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); 3214 3215 return IRQ_RETVAL(i); 3216} 3217 3218static irqreturn_t nv_nic_irq_tx(int foo, void *data) 3219{ 3220 struct net_device *dev = (struct net_device *) data; 3221 struct fe_priv *np = netdev_priv(dev); 3222 u8 __iomem *base = get_hwbase(dev); 3223 u32 events; 3224 int i; 3225 unsigned long flags; 3226 3227 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 3228 3229 for (i=0; ; i++) { 3230 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3231 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3232 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 3233 if (!(events & np->irqmask)) 3234 break; 3235 3236 spin_lock_irqsave(&np->lock, flags); 3237 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3238 spin_unlock_irqrestore(&np->lock, flags); 3239 3240 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 3241 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3242 dev->name, events); 3243 } 3244 if (unlikely(i > max_interrupt_work)) { 3245 spin_lock_irqsave(&np->lock, flags); 3246 /* disable interrupts on the nic */ 3247 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 3248 pci_push(base); 3249 3250 if (!np->in_shutdown) { 3251 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; 3252 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3253 } 3254 spin_unlock_irqrestore(&np->lock, flags); 3255 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 3256 break; 3257 } 3258 3259 } 3260 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); 3261 3262 return IRQ_RETVAL(i); 3263} 3264 3265#ifdef CONFIG_FORCEDETH_NAPI 3266static int nv_napi_poll(struct napi_struct *napi, int budget) 3267{ 3268 struct fe_priv *np = container_of(napi, struct fe_priv, napi); 3269 struct net_device *dev = np->dev; 3270 u8 __iomem *base = get_hwbase(dev); 3271 unsigned long flags; 3272 int pkts, retcode; 3273 3274 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3275 pkts = nv_rx_process(dev, budget); 3276 retcode = nv_alloc_rx(dev); 3277 } else { 3278 pkts = nv_rx_process_optimized(dev, budget); 3279 retcode = nv_alloc_rx_optimized(dev); 3280 } 3281 3282 if (retcode) { 3283 spin_lock_irqsave(&np->lock, flags); 3284 if (!np->in_shutdown) 3285 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3286 spin_unlock_irqrestore(&np->lock, flags); 3287 } 3288 3289 if (pkts < budget) { 3290 /* re-enable receive interrupts */ 3291 spin_lock_irqsave(&np->lock, flags); 3292 3293 __netif_rx_complete(dev, napi); 3294 3295 np->irqmask |= NVREG_IRQ_RX_ALL; 3296 if (np->msi_flags & NV_MSI_X_ENABLED) 3297 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3298 else 3299 writel(np->irqmask, base + NvRegIrqMask); 3300 3301 spin_unlock_irqrestore(&np->lock, flags); 3302 } 3303 return pkts; 3304} 3305#endif 3306 3307#ifdef CONFIG_FORCEDETH_NAPI 3308static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3309{ 3310 struct net_device *dev = (struct net_device *) data; 3311 struct fe_priv *np = netdev_priv(dev); 3312 u8 __iomem *base = get_hwbase(dev); 3313 u32 events; 3314 3315 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3316 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3317 3318 if (events) { 3319 netif_rx_schedule(dev, &np->napi); 3320 /* disable receive interrupts on the nic */ 3321 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3322 pci_push(base); 3323 } 3324 return IRQ_HANDLED; 3325} 3326#else 3327static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3328{ 3329 struct net_device *dev = (struct net_device *) data; 3330 struct fe_priv *np = netdev_priv(dev); 3331 u8 __iomem *base = get_hwbase(dev); 3332 u32 events; 3333 int i; 3334 unsigned long flags; 3335 3336 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 3337 3338 for (i=0; ; i++) { 3339 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3340 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3341 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 3342 if (!(events & np->irqmask)) 3343 break; 3344 3345 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3346 if (unlikely(nv_alloc_rx_optimized(dev))) { 3347 spin_lock_irqsave(&np->lock, flags); 3348 if (!np->in_shutdown) 3349 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3350 spin_unlock_irqrestore(&np->lock, flags); 3351 } 3352 } 3353 3354 if (unlikely(i > max_interrupt_work)) { 3355 spin_lock_irqsave(&np->lock, flags); 3356 /* disable interrupts on the nic */ 3357 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3358 pci_push(base); 3359 3360 if (!np->in_shutdown) { 3361 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; 3362 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3363 } 3364 spin_unlock_irqrestore(&np->lock, flags); 3365 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 3366 break; 3367 } 3368 } 3369 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 3370 3371 return IRQ_RETVAL(i); 3372} 3373#endif 3374 3375static irqreturn_t nv_nic_irq_other(int foo, void *data) 3376{ 3377 struct net_device *dev = (struct net_device *) data; 3378 struct fe_priv *np = netdev_priv(dev); 3379 u8 __iomem *base = get_hwbase(dev); 3380 u32 events; 3381 int i; 3382 unsigned long flags; 3383 3384 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 3385 3386 for (i=0; ; i++) { 3387 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3388 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3389 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3390 if (!(events & np->irqmask)) 3391 break; 3392 3393 /* check tx in case we reached max loop limit in tx isr */ 3394 spin_lock_irqsave(&np->lock, flags); 3395 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3396 spin_unlock_irqrestore(&np->lock, flags); 3397 3398 if (events & NVREG_IRQ_LINK) { 3399 spin_lock_irqsave(&np->lock, flags); 3400 nv_link_irq(dev); 3401 spin_unlock_irqrestore(&np->lock, flags); 3402 } 3403 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 3404 spin_lock_irqsave(&np->lock, flags); 3405 nv_linkchange(dev); 3406 spin_unlock_irqrestore(&np->lock, flags); 3407 np->link_timeout = jiffies + LINK_TIMEOUT; 3408 } 3409 if (events & NVREG_IRQ_RECOVER_ERROR) { 3410 spin_lock_irq(&np->lock); 3411 /* disable interrupts on the nic */ 3412 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3413 pci_push(base); 3414 3415 if (!np->in_shutdown) { 3416 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3417 np->recover_error = 1; 3418 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3419 } 3420 spin_unlock_irq(&np->lock); 3421 break; 3422 } 3423 if (events & (NVREG_IRQ_UNKNOWN)) { 3424 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3425 dev->name, events); 3426 } 3427 if (unlikely(i > max_interrupt_work)) { 3428 spin_lock_irqsave(&np->lock, flags); 3429 /* disable interrupts on the nic */ 3430 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3431 pci_push(base); 3432 3433 if (!np->in_shutdown) { 3434 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3435 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3436 } 3437 spin_unlock_irqrestore(&np->lock, flags); 3438 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 3439 break; 3440 } 3441 3442 } 3443 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); 3444 3445 return IRQ_RETVAL(i); 3446} 3447 3448static irqreturn_t nv_nic_irq_test(int foo, void *data) 3449{ 3450 struct net_device *dev = (struct net_device *) data; 3451 struct fe_priv *np = netdev_priv(dev); 3452 u8 __iomem *base = get_hwbase(dev); 3453 u32 events; 3454 3455 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); 3456 3457 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3458 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3459 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3460 } else { 3461 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3462 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3463 } 3464 pci_push(base); 3465 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3466 if (!(events & NVREG_IRQ_TIMER)) 3467 return IRQ_RETVAL(0); 3468 3469 spin_lock(&np->lock); 3470 np->intr_test = 1; 3471 spin_unlock(&np->lock); 3472 3473 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); 3474 3475 return IRQ_RETVAL(1); 3476} 3477 3478static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) 3479{ 3480 u8 __iomem *base = get_hwbase(dev); 3481 int i; 3482 u32 msixmap = 0; 3483 3484 /* Each interrupt bit can be mapped to a MSIX vector (4 bits). 3485 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents 3486 * the remaining 8 interrupts. 3487 */ 3488 for (i = 0; i < 8; i++) { 3489 if ((irqmask >> i) & 0x1) { 3490 msixmap |= vector << (i << 2); 3491 } 3492 } 3493 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3494 3495 msixmap = 0; 3496 for (i = 0; i < 8; i++) { 3497 if ((irqmask >> (i + 8)) & 0x1) { 3498 msixmap |= vector << (i << 2); 3499 } 3500 } 3501 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3502} 3503 3504static int nv_request_irq(struct net_device *dev, int intr_test) 3505{ 3506 struct fe_priv *np = get_nvpriv(dev); 3507 u8 __iomem *base = get_hwbase(dev); 3508 int ret = 1; 3509 int i; 3510 irqreturn_t (*handler)(int foo, void *data); 3511 3512 if (intr_test) { 3513 handler = nv_nic_irq_test; 3514 } else { 3515 if (np->desc_ver == DESC_VER_3) 3516 handler = nv_nic_irq_optimized; 3517 else 3518 handler = nv_nic_irq; 3519 } 3520 3521 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3522 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3523 np->msi_x_entry[i].entry = i; 3524 } 3525 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 3526 np->msi_flags |= NV_MSI_X_ENABLED; 3527 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3528 /* Request irq for rx handling */ 3529 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) { 3530 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 3531 pci_disable_msix(np->pci_dev); 3532 np->msi_flags &= ~NV_MSI_X_ENABLED; 3533 goto out_err; 3534 } 3535 /* Request irq for tx handling */ 3536 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) { 3537 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 3538 pci_disable_msix(np->pci_dev); 3539 np->msi_flags &= ~NV_MSI_X_ENABLED; 3540 goto out_free_rx; 3541 } 3542 /* Request irq for link and timer handling */ 3543 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) { 3544 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 3545 pci_disable_msix(np->pci_dev); 3546 np->msi_flags &= ~NV_MSI_X_ENABLED; 3547 goto out_free_tx; 3548 } 3549 /* map interrupts to their respective vector */ 3550 writel(0, base + NvRegMSIXMap0); 3551 writel(0, base + NvRegMSIXMap1); 3552 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); 3553 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); 3554 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 3555 } else { 3556 /* Request irq for all interrupts */ 3557 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { 3558 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3559 pci_disable_msix(np->pci_dev); 3560 np->msi_flags &= ~NV_MSI_X_ENABLED; 3561 goto out_err; 3562 } 3563 3564 /* map interrupts to vector 0 */ 3565 writel(0, base + NvRegMSIXMap0); 3566 writel(0, base + NvRegMSIXMap1); 3567 } 3568 } 3569 } 3570 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3571 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3572 np->msi_flags |= NV_MSI_ENABLED; 3573 dev->irq = np->pci_dev->irq; 3574 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3575 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3576 pci_disable_msi(np->pci_dev); 3577 np->msi_flags &= ~NV_MSI_ENABLED; 3578 dev->irq = np->pci_dev->irq; 3579 goto out_err; 3580 } 3581 3582 /* map interrupts to vector 0 */ 3583 writel(0, base + NvRegMSIMap0); 3584 writel(0, base + NvRegMSIMap1); 3585 /* enable msi vector 0 */ 3586 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 3587 } 3588 } 3589 if (ret != 0) { 3590 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) 3591 goto out_err; 3592 3593 } 3594 3595 return 0; 3596out_free_tx: 3597 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); 3598out_free_rx: 3599 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); 3600out_err: 3601 return 1; 3602} 3603 3604static void nv_free_irq(struct net_device *dev) 3605{ 3606 struct fe_priv *np = get_nvpriv(dev); 3607 int i; 3608 3609 if (np->msi_flags & NV_MSI_X_ENABLED) { 3610 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3611 free_irq(np->msi_x_entry[i].vector, dev); 3612 } 3613 pci_disable_msix(np->pci_dev); 3614 np->msi_flags &= ~NV_MSI_X_ENABLED; 3615 } else { 3616 free_irq(np->pci_dev->irq, dev); 3617 if (np->msi_flags & NV_MSI_ENABLED) { 3618 pci_disable_msi(np->pci_dev); 3619 np->msi_flags &= ~NV_MSI_ENABLED; 3620 } 3621 } 3622} 3623 3624static void nv_do_nic_poll(unsigned long data) 3625{ 3626 struct net_device *dev = (struct net_device *) data; 3627 struct fe_priv *np = netdev_priv(dev); 3628 u8 __iomem *base = get_hwbase(dev); 3629 u32 mask = 0; 3630 3631 /* 3632 * First disable irq(s) and then 3633 * reenable interrupts on the nic, we have to do this before calling 3634 * nv_nic_irq because that may decide to do otherwise 3635 */ 3636 3637 if (!using_multi_irqs(dev)) { 3638 if (np->msi_flags & NV_MSI_X_ENABLED) 3639 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 3640 else 3641 disable_irq_lockdep(np->pci_dev->irq); 3642 mask = np->irqmask; 3643 } else { 3644 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 3645 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 3646 mask |= NVREG_IRQ_RX_ALL; 3647 } 3648 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 3649 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 3650 mask |= NVREG_IRQ_TX_ALL; 3651 } 3652 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 3653 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 3654 mask |= NVREG_IRQ_OTHER; 3655 } 3656 } 3657 np->nic_poll_irq = 0; 3658 3659 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */ 3660 3661 if (np->recover_error) { 3662 np->recover_error = 0; 3663 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n"); 3664 if (netif_running(dev)) { 3665 netif_tx_lock_bh(dev); 3666 spin_lock(&np->lock); 3667 /* stop engines */ 3668 nv_stop_rx(dev); 3669 nv_stop_tx(dev); 3670 nv_txrx_reset(dev); 3671 /* drain rx queue */ 3672 nv_drain_rx(dev); 3673 nv_drain_tx(dev); 3674 /* reinit driver view of the rx queue */ 3675 set_bufsize(dev); 3676 if (nv_init_ring(dev)) { 3677 if (!np->in_shutdown) 3678 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3679 } 3680 /* reinit nic view of the rx queue */ 3681 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3682 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3683 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3684 base + NvRegRingSizes); 3685 pci_push(base); 3686 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3687 pci_push(base); 3688 3689 /* restart rx engine */ 3690 nv_start_rx(dev); 3691 nv_start_tx(dev); 3692 spin_unlock(&np->lock); 3693 netif_tx_unlock_bh(dev); 3694 } 3695 } 3696 3697 3698 writel(mask, base + NvRegIrqMask); 3699 pci_push(base); 3700 3701 if (!using_multi_irqs(dev)) { 3702 if (np->desc_ver == DESC_VER_3) 3703 nv_nic_irq_optimized(0, dev); 3704 else 3705 nv_nic_irq(0, dev); 3706 if (np->msi_flags & NV_MSI_X_ENABLED) 3707 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 3708 else 3709 enable_irq_lockdep(np->pci_dev->irq); 3710 } else { 3711 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 3712 nv_nic_irq_rx(0, dev); 3713 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 3714 } 3715 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 3716 nv_nic_irq_tx(0, dev); 3717 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 3718 } 3719 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 3720 nv_nic_irq_other(0, dev); 3721 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 3722 } 3723 } 3724} 3725 3726#ifdef CONFIG_NET_POLL_CONTROLLER 3727static void nv_poll_controller(struct net_device *dev) 3728{ 3729 nv_do_nic_poll((unsigned long) dev); 3730} 3731#endif 3732 3733static void nv_do_stats_poll(unsigned long data) 3734{ 3735 struct net_device *dev = (struct net_device *) data; 3736 struct fe_priv *np = netdev_priv(dev); 3737 3738 nv_get_hw_stats(dev); 3739 3740 if (!np->in_shutdown) 3741 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 3742} 3743 3744static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 3745{ 3746 struct fe_priv *np = netdev_priv(dev); 3747 strcpy(info->driver, DRV_NAME); 3748 strcpy(info->version, FORCEDETH_VERSION); 3749 strcpy(info->bus_info, pci_name(np->pci_dev)); 3750} 3751 3752static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 3753{ 3754 struct fe_priv *np = netdev_priv(dev); 3755 wolinfo->supported = WAKE_MAGIC; 3756 3757 spin_lock_irq(&np->lock); 3758 if (np->wolenabled) 3759 wolinfo->wolopts = WAKE_MAGIC; 3760 spin_unlock_irq(&np->lock); 3761} 3762 3763static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 3764{ 3765 struct fe_priv *np = netdev_priv(dev); 3766 u8 __iomem *base = get_hwbase(dev); 3767 u32 flags = 0; 3768 3769 if (wolinfo->wolopts == 0) { 3770 np->wolenabled = 0; 3771 } else if (wolinfo->wolopts & WAKE_MAGIC) { 3772 np->wolenabled = 1; 3773 flags = NVREG_WAKEUPFLAGS_ENABLE; 3774 } 3775 if (netif_running(dev)) { 3776 spin_lock_irq(&np->lock); 3777 writel(flags, base + NvRegWakeUpFlags); 3778 spin_unlock_irq(&np->lock); 3779 } 3780 return 0; 3781} 3782 3783static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 3784{ 3785 struct fe_priv *np = netdev_priv(dev); 3786 int adv; 3787 3788 spin_lock_irq(&np->lock); 3789 ecmd->port = PORT_MII; 3790 if (!netif_running(dev)) { 3791 /* We do not track link speed / duplex setting if the 3792 * interface is disabled. Force a link check */ 3793 if (nv_update_linkspeed(dev)) { 3794 if (!netif_carrier_ok(dev)) 3795 netif_carrier_on(dev); 3796 } else { 3797 if (netif_carrier_ok(dev)) 3798 netif_carrier_off(dev); 3799 } 3800 } 3801 3802 if (netif_carrier_ok(dev)) { 3803 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 3804 case NVREG_LINKSPEED_10: 3805 ecmd->speed = SPEED_10; 3806 break; 3807 case NVREG_LINKSPEED_100: 3808 ecmd->speed = SPEED_100; 3809 break; 3810 case NVREG_LINKSPEED_1000: 3811 ecmd->speed = SPEED_1000; 3812 break; 3813 } 3814 ecmd->duplex = DUPLEX_HALF; 3815 if (np->duplex) 3816 ecmd->duplex = DUPLEX_FULL; 3817 } else { 3818 ecmd->speed = -1; 3819 ecmd->duplex = -1; 3820 } 3821 3822 ecmd->autoneg = np->autoneg; 3823 3824 ecmd->advertising = ADVERTISED_MII; 3825 if (np->autoneg) { 3826 ecmd->advertising |= ADVERTISED_Autoneg; 3827 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3828 if (adv & ADVERTISE_10HALF) 3829 ecmd->advertising |= ADVERTISED_10baseT_Half; 3830 if (adv & ADVERTISE_10FULL) 3831 ecmd->advertising |= ADVERTISED_10baseT_Full; 3832 if (adv & ADVERTISE_100HALF) 3833 ecmd->advertising |= ADVERTISED_100baseT_Half; 3834 if (adv & ADVERTISE_100FULL) 3835 ecmd->advertising |= ADVERTISED_100baseT_Full; 3836 if (np->gigabit == PHY_GIGABIT) { 3837 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3838 if (adv & ADVERTISE_1000FULL) 3839 ecmd->advertising |= ADVERTISED_1000baseT_Full; 3840 } 3841 } 3842 ecmd->supported = (SUPPORTED_Autoneg | 3843 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 3844 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 3845 SUPPORTED_MII); 3846 if (np->gigabit == PHY_GIGABIT) 3847 ecmd->supported |= SUPPORTED_1000baseT_Full; 3848 3849 ecmd->phy_address = np->phyaddr; 3850 ecmd->transceiver = XCVR_EXTERNAL; 3851 3852 /* ignore maxtxpkt, maxrxpkt for now */ 3853 spin_unlock_irq(&np->lock); 3854 return 0; 3855} 3856 3857static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 3858{ 3859 struct fe_priv *np = netdev_priv(dev); 3860 3861 if (ecmd->port != PORT_MII) 3862 return -EINVAL; 3863 if (ecmd->transceiver != XCVR_EXTERNAL) 3864 return -EINVAL; 3865 if (ecmd->phy_address != np->phyaddr) { 3866 /* TODO: support switching between multiple phys. Should be 3867 * trivial, but not enabled due to lack of test hardware. */ 3868 return -EINVAL; 3869 } 3870 if (ecmd->autoneg == AUTONEG_ENABLE) { 3871 u32 mask; 3872 3873 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 3874 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 3875 if (np->gigabit == PHY_GIGABIT) 3876 mask |= ADVERTISED_1000baseT_Full; 3877 3878 if ((ecmd->advertising & mask) == 0) 3879 return -EINVAL; 3880 3881 } else if (ecmd->autoneg == AUTONEG_DISABLE) { 3882 /* Note: autonegotiation disable, speed 1000 intentionally 3883 * forbidden - noone should need that. */ 3884 3885 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) 3886 return -EINVAL; 3887 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 3888 return -EINVAL; 3889 } else { 3890 return -EINVAL; 3891 } 3892 3893 netif_carrier_off(dev); 3894 if (netif_running(dev)) { 3895 nv_disable_irq(dev); 3896 netif_tx_lock_bh(dev); 3897 spin_lock(&np->lock); 3898 /* stop engines */ 3899 nv_stop_rx(dev); 3900 nv_stop_tx(dev); 3901 spin_unlock(&np->lock); 3902 netif_tx_unlock_bh(dev); 3903 } 3904 3905 if (ecmd->autoneg == AUTONEG_ENABLE) { 3906 int adv, bmcr; 3907 3908 np->autoneg = 1; 3909 3910 /* advertise only what has been requested */ 3911 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3912 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3913 if (ecmd->advertising & ADVERTISED_10baseT_Half) 3914 adv |= ADVERTISE_10HALF; 3915 if (ecmd->advertising & ADVERTISED_10baseT_Full) 3916 adv |= ADVERTISE_10FULL; 3917 if (ecmd->advertising & ADVERTISED_100baseT_Half) 3918 adv |= ADVERTISE_100HALF; 3919 if (ecmd->advertising & ADVERTISED_100baseT_Full) 3920 adv |= ADVERTISE_100FULL; 3921 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 3922 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 3923 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3924 adv |= ADVERTISE_PAUSE_ASYM; 3925 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 3926 3927 if (np->gigabit == PHY_GIGABIT) { 3928 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3929 adv &= ~ADVERTISE_1000FULL; 3930 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 3931 adv |= ADVERTISE_1000FULL; 3932 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 3933 } 3934 3935 if (netif_running(dev)) 3936 printk(KERN_INFO "%s: link down.\n", dev->name); 3937 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3938 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 3939 bmcr |= BMCR_ANENABLE; 3940 /* reset the phy in order for settings to stick, 3941 * and cause autoneg to start */ 3942 if (phy_reset(dev, bmcr)) { 3943 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 3944 return -EINVAL; 3945 } 3946 } else { 3947 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 3948 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3949 } 3950 } else { 3951 int adv, bmcr; 3952 3953 np->autoneg = 0; 3954 3955 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3956 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3957 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) 3958 adv |= ADVERTISE_10HALF; 3959 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) 3960 adv |= ADVERTISE_10FULL; 3961 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) 3962 adv |= ADVERTISE_100HALF; 3963 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) 3964 adv |= ADVERTISE_100FULL; 3965 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 3966 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ 3967 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 3968 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3969 } 3970 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { 3971 adv |= ADVERTISE_PAUSE_ASYM; 3972 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3973 } 3974 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 3975 np->fixed_mode = adv; 3976 3977 if (np->gigabit == PHY_GIGABIT) { 3978 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3979 adv &= ~ADVERTISE_1000FULL; 3980 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 3981 } 3982 3983 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3984 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); 3985 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) 3986 bmcr |= BMCR_FULLDPLX; 3987 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 3988 bmcr |= BMCR_SPEED100; 3989 if (np->phy_oui == PHY_OUI_MARVELL) { 3990 /* reset the phy in order for forced mode settings to stick */ 3991 if (phy_reset(dev, bmcr)) { 3992 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 3993 return -EINVAL; 3994 } 3995 } else { 3996 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3997 if (netif_running(dev)) { 3998 /* Wait a bit and then reconfigure the nic. */ 3999 udelay(10); 4000 nv_linkchange(dev); 4001 } 4002 } 4003 } 4004 4005 if (netif_running(dev)) { 4006 nv_start_rx(dev); 4007 nv_start_tx(dev); 4008 nv_enable_irq(dev); 4009 } 4010 4011 return 0; 4012} 4013 4014#define FORCEDETH_REGS_VER 1 4015 4016static int nv_get_regs_len(struct net_device *dev) 4017{ 4018 struct fe_priv *np = netdev_priv(dev); 4019 return np->register_size; 4020} 4021 4022static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 4023{ 4024 struct fe_priv *np = netdev_priv(dev); 4025 u8 __iomem *base = get_hwbase(dev); 4026 u32 *rbuf = buf; 4027 int i; 4028 4029 regs->version = FORCEDETH_REGS_VER; 4030 spin_lock_irq(&np->lock); 4031 for (i = 0;i <= np->register_size/sizeof(u32); i++) 4032 rbuf[i] = readl(base + i*sizeof(u32)); 4033 spin_unlock_irq(&np->lock); 4034} 4035 4036static int nv_nway_reset(struct net_device *dev) 4037{ 4038 struct fe_priv *np = netdev_priv(dev); 4039 int ret; 4040 4041 if (np->autoneg) { 4042 int bmcr; 4043 4044 netif_carrier_off(dev); 4045 if (netif_running(dev)) { 4046 nv_disable_irq(dev); 4047 netif_tx_lock_bh(dev); 4048 spin_lock(&np->lock); 4049 /* stop engines */ 4050 nv_stop_rx(dev); 4051 nv_stop_tx(dev); 4052 spin_unlock(&np->lock); 4053 netif_tx_unlock_bh(dev); 4054 printk(KERN_INFO "%s: link down.\n", dev->name); 4055 } 4056 4057 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4058 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4059 bmcr |= BMCR_ANENABLE; 4060 /* reset the phy in order for settings to stick*/ 4061 if (phy_reset(dev, bmcr)) { 4062 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4063 return -EINVAL; 4064 } 4065 } else { 4066 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4067 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4068 } 4069 4070 if (netif_running(dev)) { 4071 nv_start_rx(dev); 4072 nv_start_tx(dev); 4073 nv_enable_irq(dev); 4074 } 4075 ret = 0; 4076 } else { 4077 ret = -EINVAL; 4078 } 4079 4080 return ret; 4081} 4082 4083static int nv_set_tso(struct net_device *dev, u32 value) 4084{ 4085 struct fe_priv *np = netdev_priv(dev); 4086 4087 if ((np->driver_data & DEV_HAS_CHECKSUM)) 4088 return ethtool_op_set_tso(dev, value); 4089 else 4090 return -EOPNOTSUPP; 4091} 4092 4093static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4094{ 4095 struct fe_priv *np = netdev_priv(dev); 4096 4097 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4098 ring->rx_mini_max_pending = 0; 4099 ring->rx_jumbo_max_pending = 0; 4100 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4101 4102 ring->rx_pending = np->rx_ring_size; 4103 ring->rx_mini_pending = 0; 4104 ring->rx_jumbo_pending = 0; 4105 ring->tx_pending = np->tx_ring_size; 4106} 4107 4108static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4109{ 4110 struct fe_priv *np = netdev_priv(dev); 4111 u8 __iomem *base = get_hwbase(dev); 4112 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; 4113 dma_addr_t ring_addr; 4114 4115 if (ring->rx_pending < RX_RING_MIN || 4116 ring->tx_pending < TX_RING_MIN || 4117 ring->rx_mini_pending != 0 || 4118 ring->rx_jumbo_pending != 0 || 4119 (np->desc_ver == DESC_VER_1 && 4120 (ring->rx_pending > RING_MAX_DESC_VER_1 || 4121 ring->tx_pending > RING_MAX_DESC_VER_1)) || 4122 (np->desc_ver != DESC_VER_1 && 4123 (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 4124 ring->tx_pending > RING_MAX_DESC_VER_2_3))) { 4125 return -EINVAL; 4126 } 4127 4128 /* allocate new rings */ 4129 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4130 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4131 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4132 &ring_addr); 4133 } else { 4134 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4135 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4136 &ring_addr); 4137 } 4138 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); 4139 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); 4140 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { 4141 /* fall back to old rings */ 4142 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4143 if (rxtx_ring) 4144 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4145 rxtx_ring, ring_addr); 4146 } else { 4147 if (rxtx_ring) 4148 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4149 rxtx_ring, ring_addr); 4150 } 4151 if (rx_skbuff) 4152 kfree(rx_skbuff); 4153 if (tx_skbuff) 4154 kfree(tx_skbuff); 4155 goto exit; 4156 } 4157 4158 if (netif_running(dev)) { 4159 nv_disable_irq(dev); 4160 netif_tx_lock_bh(dev); 4161 spin_lock(&np->lock); 4162 /* stop engines */ 4163 nv_stop_rx(dev); 4164 nv_stop_tx(dev); 4165 nv_txrx_reset(dev); 4166 /* drain queues */ 4167 nv_drain_rx(dev); 4168 nv_drain_tx(dev); 4169 /* delete queues */ 4170 free_rings(dev); 4171 } 4172 4173 /* set new values */ 4174 np->rx_ring_size = ring->rx_pending; 4175 np->tx_ring_size = ring->tx_pending; 4176 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4177 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4178 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4179 } else { 4180 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4181 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4182 } 4183 np->rx_skb = (struct nv_skb_map*)rx_skbuff; 4184 np->tx_skb = (struct nv_skb_map*)tx_skbuff; 4185 np->ring_addr = ring_addr; 4186 4187 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4188 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); 4189 4190 if (netif_running(dev)) { 4191 /* reinit driver view of the queues */ 4192 set_bufsize(dev); 4193 if (nv_init_ring(dev)) { 4194 if (!np->in_shutdown) 4195 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4196 } 4197 4198 /* reinit nic view of the queues */ 4199 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4200 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4201 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4202 base + NvRegRingSizes); 4203 pci_push(base); 4204 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4205 pci_push(base); 4206 4207 /* restart engines */ 4208 nv_start_rx(dev); 4209 nv_start_tx(dev); 4210 spin_unlock(&np->lock); 4211 netif_tx_unlock_bh(dev); 4212 nv_enable_irq(dev); 4213 } 4214 return 0; 4215exit: 4216 return -ENOMEM; 4217} 4218 4219static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4220{ 4221 struct fe_priv *np = netdev_priv(dev); 4222 4223 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; 4224 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; 4225 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; 4226} 4227 4228static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4229{ 4230 struct fe_priv *np = netdev_priv(dev); 4231 int adv, bmcr; 4232 4233 if ((!np->autoneg && np->duplex == 0) || 4234 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 4235 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 4236 dev->name); 4237 return -EINVAL; 4238 } 4239 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 4240 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); 4241 return -EINVAL; 4242 } 4243 4244 netif_carrier_off(dev); 4245 if (netif_running(dev)) { 4246 nv_disable_irq(dev); 4247 netif_tx_lock_bh(dev); 4248 spin_lock(&np->lock); 4249 /* stop engines */ 4250 nv_stop_rx(dev); 4251 nv_stop_tx(dev); 4252 spin_unlock(&np->lock); 4253 netif_tx_unlock_bh(dev); 4254 } 4255 4256 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); 4257 if (pause->rx_pause) 4258 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; 4259 if (pause->tx_pause) 4260 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; 4261 4262 if (np->autoneg && pause->autoneg) { 4263 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; 4264 4265 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4266 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4267 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 4268 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4269 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4270 adv |= ADVERTISE_PAUSE_ASYM; 4271 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4272 4273 if (netif_running(dev)) 4274 printk(KERN_INFO "%s: link down.\n", dev->name); 4275 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4276 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4277 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4278 } else { 4279 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4280 if (pause->rx_pause) 4281 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4282 if (pause->tx_pause) 4283 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4284 4285 if (!netif_running(dev)) 4286 nv_update_linkspeed(dev); 4287 else 4288 nv_update_pause(dev, np->pause_flags); 4289 } 4290 4291 if (netif_running(dev)) { 4292 nv_start_rx(dev); 4293 nv_start_tx(dev); 4294 nv_enable_irq(dev); 4295 } 4296 return 0; 4297} 4298 4299static u32 nv_get_rx_csum(struct net_device *dev) 4300{ 4301 struct fe_priv *np = netdev_priv(dev); 4302 return (np->rx_csum) != 0; 4303} 4304 4305static int nv_set_rx_csum(struct net_device *dev, u32 data) 4306{ 4307 struct fe_priv *np = netdev_priv(dev); 4308 u8 __iomem *base = get_hwbase(dev); 4309 int retcode = 0; 4310 4311 if (np->driver_data & DEV_HAS_CHECKSUM) { 4312 if (data) { 4313 np->rx_csum = 1; 4314 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4315 } else { 4316 np->rx_csum = 0; 4317 /* vlan is dependent on rx checksum offload */ 4318 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) 4319 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; 4320 } 4321 if (netif_running(dev)) { 4322 spin_lock_irq(&np->lock); 4323 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4324 spin_unlock_irq(&np->lock); 4325 } 4326 } else { 4327 return -EINVAL; 4328 } 4329 4330 return retcode; 4331} 4332 4333static int nv_set_tx_csum(struct net_device *dev, u32 data) 4334{ 4335 struct fe_priv *np = netdev_priv(dev); 4336 4337 if (np->driver_data & DEV_HAS_CHECKSUM) 4338 return ethtool_op_set_tx_hw_csum(dev, data); 4339 else 4340 return -EOPNOTSUPP; 4341} 4342 4343static int nv_set_sg(struct net_device *dev, u32 data) 4344{ 4345 struct fe_priv *np = netdev_priv(dev); 4346 4347 if (np->driver_data & DEV_HAS_CHECKSUM) 4348 return ethtool_op_set_sg(dev, data); 4349 else 4350 return -EOPNOTSUPP; 4351} 4352 4353static int nv_get_sset_count(struct net_device *dev, int sset) 4354{ 4355 struct fe_priv *np = netdev_priv(dev); 4356 4357 switch (sset) { 4358 case ETH_SS_TEST: 4359 if (np->driver_data & DEV_HAS_TEST_EXTENDED) 4360 return NV_TEST_COUNT_EXTENDED; 4361 else 4362 return NV_TEST_COUNT_BASE; 4363 case ETH_SS_STATS: 4364 if (np->driver_data & DEV_HAS_STATISTICS_V1) 4365 return NV_DEV_STATISTICS_V1_COUNT; 4366 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4367 return NV_DEV_STATISTICS_V2_COUNT; 4368 else 4369 return 0; 4370 default: 4371 return -EOPNOTSUPP; 4372 } 4373} 4374 4375static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) 4376{ 4377 struct fe_priv *np = netdev_priv(dev); 4378 4379 /* update stats */ 4380 nv_do_stats_poll((unsigned long)dev); 4381 4382 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); 4383} 4384 4385static int nv_link_test(struct net_device *dev) 4386{ 4387 struct fe_priv *np = netdev_priv(dev); 4388 int mii_status; 4389 4390 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4391 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4392 4393 /* check phy link status */ 4394 if (!(mii_status & BMSR_LSTATUS)) 4395 return 0; 4396 else 4397 return 1; 4398} 4399 4400static int nv_register_test(struct net_device *dev) 4401{ 4402 u8 __iomem *base = get_hwbase(dev); 4403 int i = 0; 4404 u32 orig_read, new_read; 4405 4406 do { 4407 orig_read = readl(base + nv_registers_test[i].reg); 4408 4409 /* xor with mask to toggle bits */ 4410 orig_read ^= nv_registers_test[i].mask; 4411 4412 writel(orig_read, base + nv_registers_test[i].reg); 4413 4414 new_read = readl(base + nv_registers_test[i].reg); 4415 4416 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) 4417 return 0; 4418 4419 /* restore original value */ 4420 orig_read ^= nv_registers_test[i].mask; 4421 writel(orig_read, base + nv_registers_test[i].reg); 4422 4423 } while (nv_registers_test[++i].reg != 0); 4424 4425 return 1; 4426} 4427 4428static int nv_interrupt_test(struct net_device *dev) 4429{ 4430 struct fe_priv *np = netdev_priv(dev); 4431 u8 __iomem *base = get_hwbase(dev); 4432 int ret = 1; 4433 int testcnt; 4434 u32 save_msi_flags, save_poll_interval = 0; 4435 4436 if (netif_running(dev)) { 4437 /* free current irq */ 4438 nv_free_irq(dev); 4439 save_poll_interval = readl(base+NvRegPollingInterval); 4440 } 4441 4442 /* flag to test interrupt handler */ 4443 np->intr_test = 0; 4444 4445 /* setup test irq */ 4446 save_msi_flags = np->msi_flags; 4447 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; 4448 np->msi_flags |= 0x001; /* setup 1 vector */ 4449 if (nv_request_irq(dev, 1)) 4450 return 0; 4451 4452 /* setup timer interrupt */ 4453 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 4454 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4455 4456 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4457 4458 /* wait for at least one interrupt */ 4459 msleep(100); 4460 4461 spin_lock_irq(&np->lock); 4462 4463 /* flag should be set within ISR */ 4464 testcnt = np->intr_test; 4465 if (!testcnt) 4466 ret = 2; 4467 4468 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4469 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 4470 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4471 else 4472 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4473 4474 spin_unlock_irq(&np->lock); 4475 4476 nv_free_irq(dev); 4477 4478 np->msi_flags = save_msi_flags; 4479 4480 if (netif_running(dev)) { 4481 writel(save_poll_interval, base + NvRegPollingInterval); 4482 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4483 /* restore original irq */ 4484 if (nv_request_irq(dev, 0)) 4485 return 0; 4486 } 4487 4488 return ret; 4489} 4490 4491static int nv_loopback_test(struct net_device *dev) 4492{ 4493 struct fe_priv *np = netdev_priv(dev); 4494 u8 __iomem *base = get_hwbase(dev); 4495 struct sk_buff *tx_skb, *rx_skb; 4496 dma_addr_t test_dma_addr; 4497 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 4498 u32 flags; 4499 int len, i, pkt_len; 4500 u8 *pkt_data; 4501 u32 filter_flags = 0; 4502 u32 misc1_flags = 0; 4503 int ret = 1; 4504 4505 if (netif_running(dev)) { 4506 nv_disable_irq(dev); 4507 filter_flags = readl(base + NvRegPacketFilterFlags); 4508 misc1_flags = readl(base + NvRegMisc1); 4509 } else { 4510 nv_txrx_reset(dev); 4511 } 4512 4513 /* reinit driver view of the rx queue */ 4514 set_bufsize(dev); 4515 nv_init_ring(dev); 4516 4517 /* setup hardware for loopback */ 4518 writel(NVREG_MISC1_FORCE, base + NvRegMisc1); 4519 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); 4520 4521 /* reinit nic view of the rx queue */ 4522 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4523 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4524 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4525 base + NvRegRingSizes); 4526 pci_push(base); 4527 4528 /* restart rx engine */ 4529 nv_start_rx(dev); 4530 nv_start_tx(dev); 4531 4532 /* setup packet for tx */ 4533 pkt_len = ETH_DATA_LEN; 4534 tx_skb = dev_alloc_skb(pkt_len); 4535 if (!tx_skb) { 4536 printk(KERN_ERR "dev_alloc_skb() failed during loopback test" 4537 " of %s\n", dev->name); 4538 ret = 0; 4539 goto out; 4540 } 4541 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, 4542 skb_tailroom(tx_skb), 4543 PCI_DMA_FROMDEVICE); 4544 pkt_data = skb_put(tx_skb, pkt_len); 4545 for (i = 0; i < pkt_len; i++) 4546 pkt_data[i] = (u8)(i & 0xff); 4547 4548 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4549 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 4550 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4551 } else { 4552 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); 4553 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); 4554 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4555 } 4556 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4557 pci_push(get_hwbase(dev)); 4558 4559 msleep(500); 4560 4561 /* check for rx of the packet */ 4562 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4563 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); 4564 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 4565 4566 } else { 4567 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); 4568 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 4569 } 4570 4571 if (flags & NV_RX_AVAIL) { 4572 ret = 0; 4573 } else if (np->desc_ver == DESC_VER_1) { 4574 if (flags & NV_RX_ERROR) 4575 ret = 0; 4576 } else { 4577 if (flags & NV_RX2_ERROR) { 4578 ret = 0; 4579 } 4580 } 4581 4582 if (ret) { 4583 if (len != pkt_len) { 4584 ret = 0; 4585 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 4586 dev->name, len, pkt_len); 4587 } else { 4588 rx_skb = np->rx_skb[0].skb; 4589 for (i = 0; i < pkt_len; i++) { 4590 if (rx_skb->data[i] != (u8)(i & 0xff)) { 4591 ret = 0; 4592 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", 4593 dev->name, i); 4594 break; 4595 } 4596 } 4597 } 4598 } else { 4599 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); 4600 } 4601 4602 pci_unmap_page(np->pci_dev, test_dma_addr, 4603 (skb_end_pointer(tx_skb) - tx_skb->data), 4604 PCI_DMA_TODEVICE); 4605 dev_kfree_skb_any(tx_skb); 4606 out: 4607 /* stop engines */ 4608 nv_stop_rx(dev); 4609 nv_stop_tx(dev); 4610 nv_txrx_reset(dev); 4611 /* drain rx queue */ 4612 nv_drain_rx(dev); 4613 nv_drain_tx(dev); 4614 4615 if (netif_running(dev)) { 4616 writel(misc1_flags, base + NvRegMisc1); 4617 writel(filter_flags, base + NvRegPacketFilterFlags); 4618 nv_enable_irq(dev); 4619 } 4620 4621 return ret; 4622} 4623 4624static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) 4625{ 4626 struct fe_priv *np = netdev_priv(dev); 4627 u8 __iomem *base = get_hwbase(dev); 4628 int result; 4629 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); 4630 4631 if (!nv_link_test(dev)) { 4632 test->flags |= ETH_TEST_FL_FAILED; 4633 buffer[0] = 1; 4634 } 4635 4636 if (test->flags & ETH_TEST_FL_OFFLINE) { 4637 if (netif_running(dev)) { 4638 netif_stop_queue(dev); 4639#ifdef CONFIG_FORCEDETH_NAPI 4640 napi_disable(&np->napi); 4641#endif 4642 netif_tx_lock_bh(dev); 4643 spin_lock_irq(&np->lock); 4644 nv_disable_hw_interrupts(dev, np->irqmask); 4645 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 4646 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4647 } else { 4648 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4649 } 4650 /* stop engines */ 4651 nv_stop_rx(dev); 4652 nv_stop_tx(dev); 4653 nv_txrx_reset(dev); 4654 /* drain rx queue */ 4655 nv_drain_rx(dev); 4656 nv_drain_tx(dev); 4657 spin_unlock_irq(&np->lock); 4658 netif_tx_unlock_bh(dev); 4659 } 4660 4661 if (!nv_register_test(dev)) { 4662 test->flags |= ETH_TEST_FL_FAILED; 4663 buffer[1] = 1; 4664 } 4665 4666 result = nv_interrupt_test(dev); 4667 if (result != 1) { 4668 test->flags |= ETH_TEST_FL_FAILED; 4669 buffer[2] = 1; 4670 } 4671 if (result == 0) { 4672 /* bail out */ 4673 return; 4674 } 4675 4676 if (!nv_loopback_test(dev)) { 4677 test->flags |= ETH_TEST_FL_FAILED; 4678 buffer[3] = 1; 4679 } 4680 4681 if (netif_running(dev)) { 4682 /* reinit driver view of the rx queue */ 4683 set_bufsize(dev); 4684 if (nv_init_ring(dev)) { 4685 if (!np->in_shutdown) 4686 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4687 } 4688 /* reinit nic view of the rx queue */ 4689 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4690 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4691 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4692 base + NvRegRingSizes); 4693 pci_push(base); 4694 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4695 pci_push(base); 4696 /* restart rx engine */ 4697 nv_start_rx(dev); 4698 nv_start_tx(dev); 4699 netif_start_queue(dev); 4700#ifdef CONFIG_FORCEDETH_NAPI 4701 napi_enable(&np->napi); 4702#endif 4703 nv_enable_hw_interrupts(dev, np->irqmask); 4704 } 4705 } 4706} 4707 4708static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) 4709{ 4710 switch (stringset) { 4711 case ETH_SS_STATS: 4712 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); 4713 break; 4714 case ETH_SS_TEST: 4715 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); 4716 break; 4717 } 4718} 4719 4720static const struct ethtool_ops ops = { 4721 .get_drvinfo = nv_get_drvinfo, 4722 .get_link = ethtool_op_get_link, 4723 .get_wol = nv_get_wol, 4724 .set_wol = nv_set_wol, 4725 .get_settings = nv_get_settings, 4726 .set_settings = nv_set_settings, 4727 .get_regs_len = nv_get_regs_len, 4728 .get_regs = nv_get_regs, 4729 .nway_reset = nv_nway_reset, 4730 .set_tso = nv_set_tso, 4731 .get_ringparam = nv_get_ringparam, 4732 .set_ringparam = nv_set_ringparam, 4733 .get_pauseparam = nv_get_pauseparam, 4734 .set_pauseparam = nv_set_pauseparam, 4735 .get_rx_csum = nv_get_rx_csum, 4736 .set_rx_csum = nv_set_rx_csum, 4737 .set_tx_csum = nv_set_tx_csum, 4738 .set_sg = nv_set_sg, 4739 .get_strings = nv_get_strings, 4740 .get_ethtool_stats = nv_get_ethtool_stats, 4741 .get_sset_count = nv_get_sset_count, 4742 .self_test = nv_self_test, 4743}; 4744 4745static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 4746{ 4747 struct fe_priv *np = get_nvpriv(dev); 4748 4749 spin_lock_irq(&np->lock); 4750 4751 /* save vlan group */ 4752 np->vlangrp = grp; 4753 4754 if (grp) { 4755 /* enable vlan on MAC */ 4756 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; 4757 } else { 4758 /* disable vlan on MAC */ 4759 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; 4760 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; 4761 } 4762 4763 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4764 4765 spin_unlock_irq(&np->lock); 4766} 4767 4768/* The mgmt unit and driver use a semaphore to access the phy during init */ 4769static int nv_mgmt_acquire_sema(struct net_device *dev) 4770{ 4771 u8 __iomem *base = get_hwbase(dev); 4772 int i; 4773 u32 tx_ctrl, mgmt_sema; 4774 4775 for (i = 0; i < 10; i++) { 4776 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; 4777 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) 4778 break; 4779 msleep(500); 4780 } 4781 4782 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) 4783 return 0; 4784 4785 for (i = 0; i < 2; i++) { 4786 tx_ctrl = readl(base + NvRegTransmitterControl); 4787 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; 4788 writel(tx_ctrl, base + NvRegTransmitterControl); 4789 4790 /* verify that semaphore was acquired */ 4791 tx_ctrl = readl(base + NvRegTransmitterControl); 4792 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && 4793 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) 4794 return 1; 4795 else 4796 udelay(50); 4797 } 4798 4799 return 0; 4800} 4801 4802static int nv_open(struct net_device *dev) 4803{ 4804 struct fe_priv *np = netdev_priv(dev); 4805 u8 __iomem *base = get_hwbase(dev); 4806 int ret = 1; 4807 int oom, i; 4808 4809 dprintk(KERN_DEBUG "nv_open: begin\n"); 4810 4811 /* erase previous misconfiguration */ 4812 if (np->driver_data & DEV_HAS_POWER_CNTRL) 4813 nv_mac_reset(dev); 4814 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 4815 writel(0, base + NvRegMulticastAddrB); 4816 writel(0, base + NvRegMulticastMaskA); 4817 writel(0, base + NvRegMulticastMaskB); 4818 writel(0, base + NvRegPacketFilterFlags); 4819 4820 writel(0, base + NvRegTransmitterControl); 4821 writel(0, base + NvRegReceiverControl); 4822 4823 writel(0, base + NvRegAdapterControl); 4824 4825 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 4826 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 4827 4828 /* initialize descriptor rings */ 4829 set_bufsize(dev); 4830 oom = nv_init_ring(dev); 4831 4832 writel(0, base + NvRegLinkSpeed); 4833 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 4834 nv_txrx_reset(dev); 4835 writel(0, base + NvRegUnknownSetupReg6); 4836 4837 np->in_shutdown = 0; 4838 4839 /* give hw rings */ 4840 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4841 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4842 base + NvRegRingSizes); 4843 4844 writel(np->linkspeed, base + NvRegLinkSpeed); 4845 if (np->desc_ver == DESC_VER_1) 4846 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 4847 else 4848 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); 4849 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4850 writel(np->vlanctl_bits, base + NvRegVlanControl); 4851 pci_push(base); 4852 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 4853 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 4854 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 4855 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 4856 4857 writel(0, base + NvRegMIIMask); 4858 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4859 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 4860 4861 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 4862 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 4863 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 4864 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4865 4866 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 4867 get_random_bytes(&i, sizeof(i)); 4868 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); 4869 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); 4870 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); 4871 if (poll_interval == -1) { 4872 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 4873 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 4874 else 4875 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 4876 } 4877 else 4878 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 4879 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4880 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 4881 base + NvRegAdapterControl); 4882 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); 4883 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); 4884 if (np->wolenabled) 4885 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 4886 4887 i = readl(base + NvRegPowerState); 4888 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 4889 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 4890 4891 pci_push(base); 4892 udelay(10); 4893 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 4894 4895 nv_disable_hw_interrupts(dev, np->irqmask); 4896 pci_push(base); 4897 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 4898 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4899 pci_push(base); 4900 4901 if (nv_request_irq(dev, 0)) { 4902 goto out_drain; 4903 } 4904 4905 /* ask for interrupts */ 4906 nv_enable_hw_interrupts(dev, np->irqmask); 4907 4908 spin_lock_irq(&np->lock); 4909 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 4910 writel(0, base + NvRegMulticastAddrB); 4911 writel(0, base + NvRegMulticastMaskA); 4912 writel(0, base + NvRegMulticastMaskB); 4913 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 4914 /* One manual link speed update: Interrupts are enabled, future link 4915 * speed changes cause interrupts and are handled by nv_link_irq(). 4916 */ 4917 { 4918 u32 miistat; 4919 miistat = readl(base + NvRegMIIStatus); 4920 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 4921 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); 4922 } 4923 /* set linkspeed to invalid value, thus force nv_update_linkspeed 4924 * to init hw */ 4925 np->linkspeed = 0; 4926 ret = nv_update_linkspeed(dev); 4927 nv_start_rx(dev); 4928 nv_start_tx(dev); 4929 netif_start_queue(dev); 4930#ifdef CONFIG_FORCEDETH_NAPI 4931 napi_enable(&np->napi); 4932#endif 4933 4934 if (ret) { 4935 netif_carrier_on(dev); 4936 } else { 4937 printk(KERN_INFO "%s: no link during initialization.\n", dev->name); 4938 netif_carrier_off(dev); 4939 } 4940 if (oom) 4941 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4942 4943 /* start statistics timer */ 4944 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) 4945 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 4946 4947 spin_unlock_irq(&np->lock); 4948 4949 return 0; 4950out_drain: 4951 drain_ring(dev); 4952 return ret; 4953} 4954 4955static int nv_close(struct net_device *dev) 4956{ 4957 struct fe_priv *np = netdev_priv(dev); 4958 u8 __iomem *base; 4959 4960 spin_lock_irq(&np->lock); 4961 np->in_shutdown = 1; 4962 spin_unlock_irq(&np->lock); 4963#ifdef CONFIG_FORCEDETH_NAPI 4964 napi_disable(&np->napi); 4965#endif 4966 synchronize_irq(np->pci_dev->irq); 4967 4968 del_timer_sync(&np->oom_kick); 4969 del_timer_sync(&np->nic_poll); 4970 del_timer_sync(&np->stats_poll); 4971 4972 netif_stop_queue(dev); 4973 spin_lock_irq(&np->lock); 4974 nv_stop_tx(dev); 4975 nv_stop_rx(dev); 4976 nv_txrx_reset(dev); 4977 4978 /* disable interrupts on the nic or we will lock up */ 4979 base = get_hwbase(dev); 4980 nv_disable_hw_interrupts(dev, np->irqmask); 4981 pci_push(base); 4982 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 4983 4984 spin_unlock_irq(&np->lock); 4985 4986 nv_free_irq(dev); 4987 4988 drain_ring(dev); 4989 4990 if (np->wolenabled) { 4991 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 4992 nv_start_rx(dev); 4993 } 4994 4995 /* FIXME: power down nic */ 4996 4997 return 0; 4998} 4999 5000static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 5001{ 5002 struct net_device *dev; 5003 struct fe_priv *np; 5004 unsigned long addr; 5005 u8 __iomem *base; 5006 int err, i; 5007 u32 powerstate, txreg; 5008 u32 phystate_orig = 0, phystate; 5009 int phyinitialized = 0; 5010 DECLARE_MAC_BUF(mac); 5011 static int printed_version; 5012 5013 if (!printed_version++) 5014 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet" 5015 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION); 5016 5017 dev = alloc_etherdev(sizeof(struct fe_priv)); 5018 err = -ENOMEM; 5019 if (!dev) 5020 goto out; 5021 5022 np = netdev_priv(dev); 5023 np->dev = dev; 5024 np->pci_dev = pci_dev; 5025 spin_lock_init(&np->lock); 5026 SET_NETDEV_DEV(dev, &pci_dev->dev); 5027 5028 init_timer(&np->oom_kick); 5029 np->oom_kick.data = (unsigned long) dev; 5030 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ 5031 init_timer(&np->nic_poll); 5032 np->nic_poll.data = (unsigned long) dev; 5033 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ 5034 init_timer(&np->stats_poll); 5035 np->stats_poll.data = (unsigned long) dev; 5036 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ 5037 5038 err = pci_enable_device(pci_dev); 5039 if (err) 5040 goto out_free; 5041 5042 pci_set_master(pci_dev); 5043 5044 err = pci_request_regions(pci_dev, DRV_NAME); 5045 if (err < 0) 5046 goto out_disable; 5047 5048 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2)) 5049 np->register_size = NV_PCI_REGSZ_VER3; 5050 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 5051 np->register_size = NV_PCI_REGSZ_VER2; 5052 else 5053 np->register_size = NV_PCI_REGSZ_VER1; 5054 5055 err = -EINVAL; 5056 addr = 0; 5057 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5058 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", 5059 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), 5060 pci_resource_len(pci_dev, i), 5061 pci_resource_flags(pci_dev, i)); 5062 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 5063 pci_resource_len(pci_dev, i) >= np->register_size) { 5064 addr = pci_resource_start(pci_dev, i); 5065 break; 5066 } 5067 } 5068 if (i == DEVICE_COUNT_RESOURCE) { 5069 dev_printk(KERN_INFO, &pci_dev->dev, 5070 "Couldn't find register window\n"); 5071 goto out_relreg; 5072 } 5073 5074 /* copy of driver data */ 5075 np->driver_data = id->driver_data; 5076 5077 /* handle different descriptor versions */ 5078 if (id->driver_data & DEV_HAS_HIGH_DMA) { 5079 /* packet format 3: supports 40-bit addressing */ 5080 np->desc_ver = DESC_VER_3; 5081 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 5082 if (dma_64bit) { 5083 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) 5084 dev_printk(KERN_INFO, &pci_dev->dev, 5085 "64-bit DMA failed, using 32-bit addressing\n"); 5086 else 5087 dev->features |= NETIF_F_HIGHDMA; 5088 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) { 5089 dev_printk(KERN_INFO, &pci_dev->dev, 5090 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); 5091 } 5092 } 5093 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 5094 /* packet format 2: supports jumbo frames */ 5095 np->desc_ver = DESC_VER_2; 5096 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; 5097 } else { 5098 /* original packet format */ 5099 np->desc_ver = DESC_VER_1; 5100 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; 5101 } 5102 5103 np->pkt_limit = NV_PKTLIMIT_1; 5104 if (id->driver_data & DEV_HAS_LARGEDESC) 5105 np->pkt_limit = NV_PKTLIMIT_2; 5106 5107 if (id->driver_data & DEV_HAS_CHECKSUM) { 5108 np->rx_csum = 1; 5109 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5110 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 5111 dev->features |= NETIF_F_TSO; 5112 } 5113 5114 np->vlanctl_bits = 0; 5115 if (id->driver_data & DEV_HAS_VLAN) { 5116 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 5117 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; 5118 dev->vlan_rx_register = nv_vlan_rx_register; 5119 } 5120 5121 np->msi_flags = 0; 5122 if ((id->driver_data & DEV_HAS_MSI) && msi) { 5123 np->msi_flags |= NV_MSI_CAPABLE; 5124 } 5125 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 5126 np->msi_flags |= NV_MSI_X_CAPABLE; 5127 } 5128 5129 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 5130 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { 5131 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 5132 } 5133 5134 5135 err = -ENOMEM; 5136 np->base = ioremap(addr, np->register_size); 5137 if (!np->base) 5138 goto out_relreg; 5139 dev->base_addr = (unsigned long)np->base; 5140 5141 dev->irq = pci_dev->irq; 5142 5143 np->rx_ring_size = RX_RING_DEFAULT; 5144 np->tx_ring_size = TX_RING_DEFAULT; 5145 5146 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 5147 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 5148 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 5149 &np->ring_addr); 5150 if (!np->rx_ring.orig) 5151 goto out_unmap; 5152 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 5153 } else { 5154 np->rx_ring.ex = pci_alloc_consistent(pci_dev, 5155 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 5156 &np->ring_addr); 5157 if (!np->rx_ring.ex) 5158 goto out_unmap; 5159 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 5160 } 5161 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5162 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5163 if (!np->rx_skb || !np->tx_skb) 5164 goto out_freering; 5165 5166 dev->open = nv_open; 5167 dev->stop = nv_close; 5168 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 5169 dev->hard_start_xmit = nv_start_xmit; 5170 else 5171 dev->hard_start_xmit = nv_start_xmit_optimized; 5172 dev->get_stats = nv_get_stats; 5173 dev->change_mtu = nv_change_mtu; 5174 dev->set_mac_address = nv_set_mac_address; 5175 dev->set_multicast_list = nv_set_multicast; 5176#ifdef CONFIG_NET_POLL_CONTROLLER 5177 dev->poll_controller = nv_poll_controller; 5178#endif 5179#ifdef CONFIG_FORCEDETH_NAPI 5180 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5181#endif 5182 SET_ETHTOOL_OPS(dev, &ops); 5183 dev->tx_timeout = nv_tx_timeout; 5184 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5185 5186 pci_set_drvdata(pci_dev, dev); 5187 5188 /* read the mac address */ 5189 base = get_hwbase(dev); 5190 np->orig_mac[0] = readl(base + NvRegMacAddrA); 5191 np->orig_mac[1] = readl(base + NvRegMacAddrB); 5192 5193 /* check the workaround bit for correct mac address order */ 5194 txreg = readl(base + NvRegTransmitPoll); 5195 if ((txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) || 5196 (id->driver_data & DEV_HAS_CORRECT_MACADDR)) { 5197 /* mac address is already in correct order */ 5198 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5199 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5200 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5201 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5202 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5203 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5204 } else { 5205 /* need to reverse mac address to correct order */ 5206 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 5207 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 5208 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 5209 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 5210 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 5211 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 5212 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5213 } 5214 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 5215 5216 if (!is_valid_ether_addr(dev->perm_addr)) { 5217 /* 5218 * Bad mac address. At least one bios sets the mac address 5219 * to 01:23:45:67:89:ab 5220 */ 5221 dev_printk(KERN_ERR, &pci_dev->dev, 5222 "Invalid Mac address detected: %s\n", 5223 print_mac(mac, dev->dev_addr)); 5224 dev_printk(KERN_ERR, &pci_dev->dev, 5225 "Please complain to your hardware vendor. Switching to a random MAC.\n"); 5226 dev->dev_addr[0] = 0x00; 5227 dev->dev_addr[1] = 0x00; 5228 dev->dev_addr[2] = 0x6c; 5229 get_random_bytes(&dev->dev_addr[3], 3); 5230 } 5231 5232 dprintk(KERN_DEBUG "%s: MAC Address %s\n", 5233 pci_name(pci_dev), print_mac(mac, dev->dev_addr)); 5234 5235 /* set mac address */ 5236 nv_copy_mac_to_hw(dev); 5237 5238 /* disable WOL */ 5239 writel(0, base + NvRegWakeUpFlags); 5240 np->wolenabled = 0; 5241 5242 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5243 5244 /* take phy and nic out of low power mode */ 5245 powerstate = readl(base + NvRegPowerState2); 5246 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; 5247 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || 5248 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && 5249 pci_dev->revision >= 0xA3) 5250 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; 5251 writel(powerstate, base + NvRegPowerState2); 5252 } 5253 5254 if (np->desc_ver == DESC_VER_1) { 5255 np->tx_flags = NV_TX_VALID; 5256 } else { 5257 np->tx_flags = NV_TX2_VALID; 5258 } 5259 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { 5260 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5261 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5262 np->msi_flags |= 0x0003; 5263 } else { 5264 np->irqmask = NVREG_IRQMASK_CPU; 5265 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5266 np->msi_flags |= 0x0001; 5267 } 5268 5269 if (id->driver_data & DEV_NEED_TIMERIRQ) 5270 np->irqmask |= NVREG_IRQ_TIMER; 5271 if (id->driver_data & DEV_NEED_LINKTIMER) { 5272 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); 5273 np->need_linktimer = 1; 5274 np->link_timeout = jiffies + LINK_TIMEOUT; 5275 } else { 5276 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); 5277 np->need_linktimer = 0; 5278 } 5279 5280 /* clear phy state and temporarily halt phy interrupts */ 5281 writel(0, base + NvRegMIIMask); 5282 phystate = readl(base + NvRegAdapterControl); 5283 if (phystate & NVREG_ADAPTCTL_RUNNING) { 5284 phystate_orig = 1; 5285 phystate &= ~NVREG_ADAPTCTL_RUNNING; 5286 writel(phystate, base + NvRegAdapterControl); 5287 } 5288 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 5289 5290 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 5291 /* management unit running on the mac? */ 5292 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) { 5293 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; 5294 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use); 5295 if (nv_mgmt_acquire_sema(dev)) { 5296 /* management unit setup the phy already? */ 5297 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 5298 NVREG_XMITCTL_SYNC_PHY_INIT) { 5299 /* phy is inited by mgmt unit */ 5300 phyinitialized = 1; 5301 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); 5302 } else { 5303 /* we need to init the phy */ 5304 } 5305 } 5306 } 5307 } 5308 5309 /* find a suitable phy */ 5310 for (i = 1; i <= 32; i++) { 5311 int id1, id2; 5312 int phyaddr = i & 0x1F; 5313 5314 spin_lock_irq(&np->lock); 5315 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); 5316 spin_unlock_irq(&np->lock); 5317 if (id1 < 0 || id1 == 0xffff) 5318 continue; 5319 spin_lock_irq(&np->lock); 5320 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); 5321 spin_unlock_irq(&np->lock); 5322 if (id2 < 0 || id2 == 0xffff) 5323 continue; 5324 5325 np->phy_model = id2 & PHYID2_MODEL_MASK; 5326 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 5327 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 5328 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 5329 pci_name(pci_dev), id1, id2, phyaddr); 5330 np->phyaddr = phyaddr; 5331 np->phy_oui = id1 | id2; 5332 break; 5333 } 5334 if (i == 33) { 5335 dev_printk(KERN_INFO, &pci_dev->dev, 5336 "open: Could not find a valid PHY.\n"); 5337 goto out_error; 5338 } 5339 5340 if (!phyinitialized) { 5341 /* reset it */ 5342 phy_init(dev); 5343 } else { 5344 /* see if it is a gigabit phy */ 5345 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 5346 if (mii_status & PHY_GIGABIT) { 5347 np->gigabit = PHY_GIGABIT; 5348 } 5349 } 5350 5351 /* set default link speed settings */ 5352 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 5353 np->duplex = 0; 5354 np->autoneg = 1; 5355 5356 err = register_netdev(dev); 5357 if (err) { 5358 dev_printk(KERN_INFO, &pci_dev->dev, 5359 "unable to register netdev: %d\n", err); 5360 goto out_error; 5361 } 5362 5363 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, " 5364 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 5365 dev->name, 5366 np->phy_oui, 5367 np->phyaddr, 5368 dev->dev_addr[0], 5369 dev->dev_addr[1], 5370 dev->dev_addr[2], 5371 dev->dev_addr[3], 5372 dev->dev_addr[4], 5373 dev->dev_addr[5]); 5374 5375 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 5376 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5377 dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ? 5378 "csum " : "", 5379 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 5380 "vlan " : "", 5381 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", 5382 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", 5383 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", 5384 np->gigabit == PHY_GIGABIT ? "gbit " : "", 5385 np->need_linktimer ? "lnktim " : "", 5386 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", 5387 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", 5388 np->desc_ver); 5389 5390 return 0; 5391 5392out_error: 5393 if (phystate_orig) 5394 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 5395 pci_set_drvdata(pci_dev, NULL); 5396out_freering: 5397 free_rings(dev); 5398out_unmap: 5399 iounmap(get_hwbase(dev)); 5400out_relreg: 5401 pci_release_regions(pci_dev); 5402out_disable: 5403 pci_disable_device(pci_dev); 5404out_free: 5405 free_netdev(dev); 5406out: 5407 return err; 5408} 5409 5410static void __devexit nv_remove(struct pci_dev *pci_dev) 5411{ 5412 struct net_device *dev = pci_get_drvdata(pci_dev); 5413 struct fe_priv *np = netdev_priv(dev); 5414 u8 __iomem *base = get_hwbase(dev); 5415 5416 unregister_netdev(dev); 5417 5418 /* special op: write back the misordered MAC address - otherwise 5419 * the next nv_probe would see a wrong address. 5420 */ 5421 writel(np->orig_mac[0], base + NvRegMacAddrA); 5422 writel(np->orig_mac[1], base + NvRegMacAddrB); 5423 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV, 5424 base + NvRegTransmitPoll); 5425 5426 /* free all structures */ 5427 free_rings(dev); 5428 iounmap(get_hwbase(dev)); 5429 pci_release_regions(pci_dev); 5430 pci_disable_device(pci_dev); 5431 free_netdev(dev); 5432 pci_set_drvdata(pci_dev, NULL); 5433} 5434 5435#ifdef CONFIG_PM 5436static int nv_suspend(struct pci_dev *pdev, pm_message_t state) 5437{ 5438 struct net_device *dev = pci_get_drvdata(pdev); 5439 struct fe_priv *np = netdev_priv(dev); 5440 5441 if (!netif_running(dev)) 5442 goto out; 5443 5444 netif_device_detach(dev); 5445 5446 // Gross. 5447 nv_close(dev); 5448 5449 pci_save_state(pdev); 5450 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); 5451 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5452out: 5453 return 0; 5454} 5455 5456static int nv_resume(struct pci_dev *pdev) 5457{ 5458 struct net_device *dev = pci_get_drvdata(pdev); 5459 int rc = 0; 5460 5461 if (!netif_running(dev)) 5462 goto out; 5463 5464 netif_device_attach(dev); 5465 5466 pci_set_power_state(pdev, PCI_D0); 5467 pci_restore_state(pdev); 5468 pci_enable_wake(pdev, PCI_D0, 0); 5469 5470 rc = nv_open(dev); 5471out: 5472 return rc; 5473} 5474#else 5475#define nv_suspend NULL 5476#define nv_resume NULL 5477#endif /* CONFIG_PM */ 5478 5479static struct pci_device_id pci_tbl[] = { 5480 { /* nForce Ethernet Controller */ 5481 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), 5482 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5483 }, 5484 { /* nForce2 Ethernet Controller */ 5485 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), 5486 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5487 }, 5488 { /* nForce3 Ethernet Controller */ 5489 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), 5490 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5491 }, 5492 { /* nForce3 Ethernet Controller */ 5493 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), 5494 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5495 }, 5496 { /* nForce3 Ethernet Controller */ 5497 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), 5498 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5499 }, 5500 { /* nForce3 Ethernet Controller */ 5501 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), 5502 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5503 }, 5504 { /* nForce3 Ethernet Controller */ 5505 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), 5506 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5507 }, 5508 { /* CK804 Ethernet Controller */ 5509 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), 5510 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, 5511 }, 5512 { /* CK804 Ethernet Controller */ 5513 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), 5514 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, 5515 }, 5516 { /* MCP04 Ethernet Controller */ 5517 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), 5518 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, 5519 }, 5520 { /* MCP04 Ethernet Controller */ 5521 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), 5522 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, 5523 }, 5524 { /* MCP51 Ethernet Controller */ 5525 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 5526 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, 5527 }, 5528 { /* MCP51 Ethernet Controller */ 5529 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 5530 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, 5531 }, 5532 { /* MCP55 Ethernet Controller */ 5533 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 5534 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5535 }, 5536 { /* MCP55 Ethernet Controller */ 5537 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 5538 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5539 }, 5540 { /* MCP61 Ethernet Controller */ 5541 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 5542 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5543 }, 5544 { /* MCP61 Ethernet Controller */ 5545 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 5546 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5547 }, 5548 { /* MCP61 Ethernet Controller */ 5549 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 5550 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5551 }, 5552 { /* MCP61 Ethernet Controller */ 5553 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 5554 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5555 }, 5556 { /* MCP65 Ethernet Controller */ 5557 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 5558 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5559 }, 5560 { /* MCP65 Ethernet Controller */ 5561 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 5562 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5563 }, 5564 { /* MCP65 Ethernet Controller */ 5565 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 5566 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5567 }, 5568 { /* MCP65 Ethernet Controller */ 5569 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 5570 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5571 }, 5572 { /* MCP67 Ethernet Controller */ 5573 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 5574 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5575 }, 5576 { /* MCP67 Ethernet Controller */ 5577 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 5578 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5579 }, 5580 { /* MCP67 Ethernet Controller */ 5581 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 5582 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5583 }, 5584 { /* MCP67 Ethernet Controller */ 5585 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 5586 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5587 }, 5588 { /* MCP73 Ethernet Controller */ 5589 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28), 5590 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5591 }, 5592 { /* MCP73 Ethernet Controller */ 5593 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29), 5594 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5595 }, 5596 { /* MCP73 Ethernet Controller */ 5597 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30), 5598 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5599 }, 5600 { /* MCP73 Ethernet Controller */ 5601 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), 5602 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5603 }, 5604 { /* MCP77 Ethernet Controller */ 5605 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 5606 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5607 }, 5608 { /* MCP77 Ethernet Controller */ 5609 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 5610 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5611 }, 5612 { /* MCP77 Ethernet Controller */ 5613 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 5614 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5615 }, 5616 { /* MCP77 Ethernet Controller */ 5617 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 5618 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5619 }, 5620 { /* MCP79 Ethernet Controller */ 5621 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 5622 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5623 }, 5624 { /* MCP79 Ethernet Controller */ 5625 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 5626 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5627 }, 5628 { /* MCP79 Ethernet Controller */ 5629 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 5630 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5631 }, 5632 { /* MCP79 Ethernet Controller */ 5633 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 5634 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5635 }, 5636 {0,}, 5637}; 5638 5639static struct pci_driver driver = { 5640 .name = DRV_NAME, 5641 .id_table = pci_tbl, 5642 .probe = nv_probe, 5643 .remove = __devexit_p(nv_remove), 5644 .suspend = nv_suspend, 5645 .resume = nv_resume, 5646}; 5647 5648static int __init init_nic(void) 5649{ 5650 return pci_register_driver(&driver); 5651} 5652 5653static void __exit exit_nic(void) 5654{ 5655 pci_unregister_driver(&driver); 5656} 5657 5658module_param(max_interrupt_work, int, 0); 5659MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 5660module_param(optimization_mode, int, 0); 5661MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); 5662module_param(poll_interval, int, 0); 5663MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 5664module_param(msi, int, 0); 5665MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); 5666module_param(msix, int, 0); 5667MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); 5668module_param(dma_64bit, int, 0); 5669MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 5670 5671MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 5672MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 5673MODULE_LICENSE("GPL"); 5674 5675MODULE_DEVICE_TABLE(pci, pci_tbl); 5676 5677module_init(init_nic); 5678module_exit(exit_nic);