at v2.6.25-rc2 5734 lines 178 kB view raw
1/* 2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. 3 * 4 * Note: This driver is a cleanroom reimplementation based on reverse 5 * engineered documentation written by Carl-Daniel Hailfinger 6 * and Andrew de Quincey. 7 * 8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered 9 * trademarks of NVIDIA Corporation in the United States and other 10 * countries. 11 * 12 * Copyright (C) 2003,4,5 Manfred Spraul 13 * Copyright (C) 2004 Andrew de Quincey (wol support) 14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 15 * IRQ rate fixes, bigendian fixes, cleanups, verification) 16 * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation 17 * 18 * This program is free software; you can redistribute it and/or modify 19 * it under the terms of the GNU General Public License as published by 20 * the Free Software Foundation; either version 2 of the License, or 21 * (at your option) any later version. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, write to the Free Software 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 31 * 32 * Changelog: 33 * 0.01: 05 Oct 2003: First release that compiles without warnings. 34 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs. 35 * Check all PCI BARs for the register window. 36 * udelay added to mii_rw. 37 * 0.03: 06 Oct 2003: Initialize dev->irq. 38 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks. 39 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout. 40 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated, 41 * irq mask updated 42 * 0.07: 14 Oct 2003: Further irq mask updates. 43 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill 44 * added into irq handler, NULL check for drain_ring. 45 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the 46 * requested interrupt sources. 47 * 0.10: 20 Oct 2003: First cleanup for release. 48 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased. 49 * MAC Address init fix, set_multicast cleanup. 50 * 0.12: 23 Oct 2003: Cleanups for release. 51 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10. 52 * Set link speed correctly. start rx before starting 53 * tx (nv_start_rx sets the link speed). 54 * 0.14: 25 Oct 2003: Nic dependant irq mask. 55 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during 56 * open. 57 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size 58 * increased to 1628 bytes. 59 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from 60 * the tx length. 61 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats 62 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac 63 * addresses, really stop rx if already running 64 * in nv_start_rx, clean up a bit. 65 * 0.20: 07 Dec 2003: alloc fixes 66 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix. 67 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup 68 * on close. 69 * 0.23: 26 Jan 2004: various small cleanups 70 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces 71 * 0.25: 09 Mar 2004: wol support 72 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes 73 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings, 74 * added CK804/MCP04 device IDs, code fixes 75 * for registers, link status and other minor fixes. 76 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe 77 * 0.29: 31 Aug 2004: Add backup timer for link change notification. 78 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset 79 * into nv_close, otherwise reenabling for wol can 80 * cause DMA to kfree'd memory. 81 * 0.31: 14 Nov 2004: ethtool support for getting/setting link 82 * capabilities. 83 * 0.32: 16 Apr 2005: RX_ERROR4 handling added. 84 * 0.33: 16 May 2005: Support for MCP51 added. 85 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. 86 * 0.35: 26 Jun 2005: Support for MCP55 added. 87 * 0.36: 28 Jun 2005: Add jumbo frame support. 88 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list 89 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of 90 * per-packet flags. 91 * 0.39: 18 Jul 2005: Add 64bit descriptor support. 92 * 0.40: 19 Jul 2005: Add support for mac address change. 93 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead 94 * of nv_remove 95 * 0.42: 06 Aug 2005: Fix lack of link speed initialization 96 * in the second (and later) nv_open call 97 * 0.43: 10 Aug 2005: Add support for tx checksum. 98 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation. 99 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check 100 * 0.46: 20 Oct 2005: Add irq optimization modes. 101 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. 102 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single 103 * 0.49: 10 Dec 2005: Fix tso for large buffers. 104 * 0.50: 20 Jan 2006: Add 8021pq tagging support. 105 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. 106 * 0.52: 20 Jan 2006: Add MSI/MSIX support. 107 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. 108 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. 109 * 0.55: 22 Mar 2006: Add flow control (pause frame). 110 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. 111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections. 112 * 0.58: 30 Oct 2006: Added support for sideband management unit. 113 * 0.59: 30 Oct 2006: Added support for recoverable error. 114 * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats. 115 * 116 * Known bugs: 117 * We suspect that on some hardware no TX done interrupts are generated. 118 * This means recovery from netif_stop_queue only happens if the hw timer 119 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) 120 * and the timer is active in the IRQMask, or if a rx packet arrives by chance. 121 * If your hardware reliably generates tx done interrupts, then you can remove 122 * DEV_NEED_TIMERIRQ from the driver_data flags. 123 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 124 * superfluous timer interrupts from the nic. 125 */ 126#ifdef CONFIG_FORCEDETH_NAPI 127#define DRIVERNAPI "-NAPI" 128#else 129#define DRIVERNAPI 130#endif 131#define FORCEDETH_VERSION "0.61" 132#define DRV_NAME "forcedeth" 133 134#include <linux/module.h> 135#include <linux/types.h> 136#include <linux/pci.h> 137#include <linux/interrupt.h> 138#include <linux/netdevice.h> 139#include <linux/etherdevice.h> 140#include <linux/delay.h> 141#include <linux/spinlock.h> 142#include <linux/ethtool.h> 143#include <linux/timer.h> 144#include <linux/skbuff.h> 145#include <linux/mii.h> 146#include <linux/random.h> 147#include <linux/init.h> 148#include <linux/if_vlan.h> 149#include <linux/dma-mapping.h> 150 151#include <asm/irq.h> 152#include <asm/io.h> 153#include <asm/uaccess.h> 154#include <asm/system.h> 155 156#if 0 157#define dprintk printk 158#else 159#define dprintk(x...) do { } while (0) 160#endif 161 162#define TX_WORK_PER_LOOP 64 163#define RX_WORK_PER_LOOP 64 164 165/* 166 * Hardware access: 167 */ 168 169#define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */ 170#define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */ 171#define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */ 172#define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */ 173#define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */ 174#define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */ 175#define DEV_HAS_MSI 0x00040 /* device supports MSI */ 176#define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */ 177#define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */ 178#define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */ 179#define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */ 180#define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */ 181#define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */ 182#define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */ 183#define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */ 184#define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */ 185#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */ 186#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */ 187 188enum { 189 NvRegIrqStatus = 0x000, 190#define NVREG_IRQSTAT_MIIEVENT 0x040 191#define NVREG_IRQSTAT_MASK 0x81ff 192 NvRegIrqMask = 0x004, 193#define NVREG_IRQ_RX_ERROR 0x0001 194#define NVREG_IRQ_RX 0x0002 195#define NVREG_IRQ_RX_NOBUF 0x0004 196#define NVREG_IRQ_TX_ERR 0x0008 197#define NVREG_IRQ_TX_OK 0x0010 198#define NVREG_IRQ_TIMER 0x0020 199#define NVREG_IRQ_LINK 0x0040 200#define NVREG_IRQ_RX_FORCED 0x0080 201#define NVREG_IRQ_TX_FORCED 0x0100 202#define NVREG_IRQ_RECOVER_ERROR 0x8000 203#define NVREG_IRQMASK_THROUGHPUT 0x00df 204#define NVREG_IRQMASK_CPU 0x0060 205#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) 206#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) 207#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) 208 209#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ 210 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ 211 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR)) 212 213 NvRegUnknownSetupReg6 = 0x008, 214#define NVREG_UNKSETUP6_VAL 3 215 216/* 217 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic 218 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 219 */ 220 NvRegPollingInterval = 0x00c, 221#define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */ 222#define NVREG_POLL_DEFAULT_CPU 13 223 NvRegMSIMap0 = 0x020, 224 NvRegMSIMap1 = 0x024, 225 NvRegMSIIrqMask = 0x030, 226#define NVREG_MSI_VECTOR_0_ENABLED 0x01 227 NvRegMisc1 = 0x080, 228#define NVREG_MISC1_PAUSE_TX 0x01 229#define NVREG_MISC1_HD 0x02 230#define NVREG_MISC1_FORCE 0x3b0f3c 231 232 NvRegMacReset = 0x34, 233#define NVREG_MAC_RESET_ASSERT 0x0F3 234 NvRegTransmitterControl = 0x084, 235#define NVREG_XMITCTL_START 0x01 236#define NVREG_XMITCTL_MGMT_ST 0x40000000 237#define NVREG_XMITCTL_SYNC_MASK 0x000f0000 238#define NVREG_XMITCTL_SYNC_NOT_READY 0x0 239#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 240#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 241#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 242#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 243#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 244#define NVREG_XMITCTL_HOST_LOADED 0x00004000 245#define NVREG_XMITCTL_TX_PATH_EN 0x01000000 246 NvRegTransmitterStatus = 0x088, 247#define NVREG_XMITSTAT_BUSY 0x01 248 249 NvRegPacketFilterFlags = 0x8c, 250#define NVREG_PFF_PAUSE_RX 0x08 251#define NVREG_PFF_ALWAYS 0x7F0000 252#define NVREG_PFF_PROMISC 0x80 253#define NVREG_PFF_MYADDR 0x20 254#define NVREG_PFF_LOOPBACK 0x10 255 256 NvRegOffloadConfig = 0x90, 257#define NVREG_OFFLOAD_HOMEPHY 0x601 258#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 259 NvRegReceiverControl = 0x094, 260#define NVREG_RCVCTL_START 0x01 261#define NVREG_RCVCTL_RX_PATH_EN 0x01000000 262 NvRegReceiverStatus = 0x98, 263#define NVREG_RCVSTAT_BUSY 0x01 264 265 NvRegRandomSeed = 0x9c, 266#define NVREG_RNDSEED_MASK 0x00ff 267#define NVREG_RNDSEED_FORCE 0x7f00 268#define NVREG_RNDSEED_FORCE2 0x2d00 269#define NVREG_RNDSEED_FORCE3 0x7400 270 271 NvRegTxDeferral = 0xA0, 272#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 273#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 274#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 275#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f 276#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f 277#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000 278 NvRegRxDeferral = 0xA4, 279#define NVREG_RX_DEFERRAL_DEFAULT 0x16 280 NvRegMacAddrA = 0xA8, 281 NvRegMacAddrB = 0xAC, 282 NvRegMulticastAddrA = 0xB0, 283#define NVREG_MCASTADDRA_FORCE 0x01 284 NvRegMulticastAddrB = 0xB4, 285 NvRegMulticastMaskA = 0xB8, 286#define NVREG_MCASTMASKA_NONE 0xffffffff 287 NvRegMulticastMaskB = 0xBC, 288#define NVREG_MCASTMASKB_NONE 0xffff 289 290 NvRegPhyInterface = 0xC0, 291#define PHY_RGMII 0x10000000 292 293 NvRegTxRingPhysAddr = 0x100, 294 NvRegRxRingPhysAddr = 0x104, 295 NvRegRingSizes = 0x108, 296#define NVREG_RINGSZ_TXSHIFT 0 297#define NVREG_RINGSZ_RXSHIFT 16 298 NvRegTransmitPoll = 0x10c, 299#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 300 NvRegLinkSpeed = 0x110, 301#define NVREG_LINKSPEED_FORCE 0x10000 302#define NVREG_LINKSPEED_10 1000 303#define NVREG_LINKSPEED_100 100 304#define NVREG_LINKSPEED_1000 50 305#define NVREG_LINKSPEED_MASK (0xFFF) 306 NvRegUnknownSetupReg5 = 0x130, 307#define NVREG_UNKSETUP5_BIT31 (1<<31) 308 NvRegTxWatermark = 0x13c, 309#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 310#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 311#define NVREG_TX_WM_DESC2_3_1000 0xfe08000 312 NvRegTxRxControl = 0x144, 313#define NVREG_TXRXCTL_KICK 0x0001 314#define NVREG_TXRXCTL_BIT1 0x0002 315#define NVREG_TXRXCTL_BIT2 0x0004 316#define NVREG_TXRXCTL_IDLE 0x0008 317#define NVREG_TXRXCTL_RESET 0x0010 318#define NVREG_TXRXCTL_RXCHECK 0x0400 319#define NVREG_TXRXCTL_DESC_1 0 320#define NVREG_TXRXCTL_DESC_2 0x002100 321#define NVREG_TXRXCTL_DESC_3 0xc02200 322#define NVREG_TXRXCTL_VLANSTRIP 0x00040 323#define NVREG_TXRXCTL_VLANINS 0x00080 324 NvRegTxRingPhysAddrHigh = 0x148, 325 NvRegRxRingPhysAddrHigh = 0x14C, 326 NvRegTxPauseFrame = 0x170, 327#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080 328#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 329#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 330#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 331 NvRegMIIStatus = 0x180, 332#define NVREG_MIISTAT_ERROR 0x0001 333#define NVREG_MIISTAT_LINKCHANGE 0x0008 334#define NVREG_MIISTAT_MASK_RW 0x0007 335#define NVREG_MIISTAT_MASK_ALL 0x000f 336 NvRegMIIMask = 0x184, 337#define NVREG_MII_LINKCHANGE 0x0008 338 339 NvRegAdapterControl = 0x188, 340#define NVREG_ADAPTCTL_START 0x02 341#define NVREG_ADAPTCTL_LINKUP 0x04 342#define NVREG_ADAPTCTL_PHYVALID 0x40000 343#define NVREG_ADAPTCTL_RUNNING 0x100000 344#define NVREG_ADAPTCTL_PHYSHIFT 24 345 NvRegMIISpeed = 0x18c, 346#define NVREG_MIISPEED_BIT8 (1<<8) 347#define NVREG_MIIDELAY 5 348 NvRegMIIControl = 0x190, 349#define NVREG_MIICTL_INUSE 0x08000 350#define NVREG_MIICTL_WRITE 0x00400 351#define NVREG_MIICTL_ADDRSHIFT 5 352 NvRegMIIData = 0x194, 353 NvRegWakeUpFlags = 0x200, 354#define NVREG_WAKEUPFLAGS_VAL 0x7770 355#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 356#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 357#define NVREG_WAKEUPFLAGS_D3SHIFT 12 358#define NVREG_WAKEUPFLAGS_D2SHIFT 8 359#define NVREG_WAKEUPFLAGS_D1SHIFT 4 360#define NVREG_WAKEUPFLAGS_D0SHIFT 0 361#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 362#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 363#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 364#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 365 366 NvRegPatternCRC = 0x204, 367 NvRegPatternMask = 0x208, 368 NvRegPowerCap = 0x268, 369#define NVREG_POWERCAP_D3SUPP (1<<30) 370#define NVREG_POWERCAP_D2SUPP (1<<26) 371#define NVREG_POWERCAP_D1SUPP (1<<25) 372 NvRegPowerState = 0x26c, 373#define NVREG_POWERSTATE_POWEREDUP 0x8000 374#define NVREG_POWERSTATE_VALID 0x0100 375#define NVREG_POWERSTATE_MASK 0x0003 376#define NVREG_POWERSTATE_D0 0x0000 377#define NVREG_POWERSTATE_D1 0x0001 378#define NVREG_POWERSTATE_D2 0x0002 379#define NVREG_POWERSTATE_D3 0x0003 380 NvRegTxCnt = 0x280, 381 NvRegTxZeroReXmt = 0x284, 382 NvRegTxOneReXmt = 0x288, 383 NvRegTxManyReXmt = 0x28c, 384 NvRegTxLateCol = 0x290, 385 NvRegTxUnderflow = 0x294, 386 NvRegTxLossCarrier = 0x298, 387 NvRegTxExcessDef = 0x29c, 388 NvRegTxRetryErr = 0x2a0, 389 NvRegRxFrameErr = 0x2a4, 390 NvRegRxExtraByte = 0x2a8, 391 NvRegRxLateCol = 0x2ac, 392 NvRegRxRunt = 0x2b0, 393 NvRegRxFrameTooLong = 0x2b4, 394 NvRegRxOverflow = 0x2b8, 395 NvRegRxFCSErr = 0x2bc, 396 NvRegRxFrameAlignErr = 0x2c0, 397 NvRegRxLenErr = 0x2c4, 398 NvRegRxUnicast = 0x2c8, 399 NvRegRxMulticast = 0x2cc, 400 NvRegRxBroadcast = 0x2d0, 401 NvRegTxDef = 0x2d4, 402 NvRegTxFrame = 0x2d8, 403 NvRegRxCnt = 0x2dc, 404 NvRegTxPause = 0x2e0, 405 NvRegRxPause = 0x2e4, 406 NvRegRxDropFrame = 0x2e8, 407 NvRegVlanControl = 0x300, 408#define NVREG_VLANCONTROL_ENABLE 0x2000 409 NvRegMSIXMap0 = 0x3e0, 410 NvRegMSIXMap1 = 0x3e4, 411 NvRegMSIXIrqStatus = 0x3f0, 412 413 NvRegPowerState2 = 0x600, 414#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11 415#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 416}; 417 418/* Big endian: should work, but is untested */ 419struct ring_desc { 420 __le32 buf; 421 __le32 flaglen; 422}; 423 424struct ring_desc_ex { 425 __le32 bufhigh; 426 __le32 buflow; 427 __le32 txvlan; 428 __le32 flaglen; 429}; 430 431union ring_type { 432 struct ring_desc* orig; 433 struct ring_desc_ex* ex; 434}; 435 436#define FLAG_MASK_V1 0xffff0000 437#define FLAG_MASK_V2 0xffffc000 438#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 439#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) 440 441#define NV_TX_LASTPACKET (1<<16) 442#define NV_TX_RETRYERROR (1<<19) 443#define NV_TX_FORCED_INTERRUPT (1<<24) 444#define NV_TX_DEFERRED (1<<26) 445#define NV_TX_CARRIERLOST (1<<27) 446#define NV_TX_LATECOLLISION (1<<28) 447#define NV_TX_UNDERFLOW (1<<29) 448#define NV_TX_ERROR (1<<30) 449#define NV_TX_VALID (1<<31) 450 451#define NV_TX2_LASTPACKET (1<<29) 452#define NV_TX2_RETRYERROR (1<<18) 453#define NV_TX2_FORCED_INTERRUPT (1<<30) 454#define NV_TX2_DEFERRED (1<<25) 455#define NV_TX2_CARRIERLOST (1<<26) 456#define NV_TX2_LATECOLLISION (1<<27) 457#define NV_TX2_UNDERFLOW (1<<28) 458/* error and valid are the same for both */ 459#define NV_TX2_ERROR (1<<30) 460#define NV_TX2_VALID (1<<31) 461#define NV_TX2_TSO (1<<28) 462#define NV_TX2_TSO_SHIFT 14 463#define NV_TX2_TSO_MAX_SHIFT 14 464#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) 465#define NV_TX2_CHECKSUM_L3 (1<<27) 466#define NV_TX2_CHECKSUM_L4 (1<<26) 467 468#define NV_TX3_VLAN_TAG_PRESENT (1<<18) 469 470#define NV_RX_DESCRIPTORVALID (1<<16) 471#define NV_RX_MISSEDFRAME (1<<17) 472#define NV_RX_SUBSTRACT1 (1<<18) 473#define NV_RX_ERROR1 (1<<23) 474#define NV_RX_ERROR2 (1<<24) 475#define NV_RX_ERROR3 (1<<25) 476#define NV_RX_ERROR4 (1<<26) 477#define NV_RX_CRCERR (1<<27) 478#define NV_RX_OVERFLOW (1<<28) 479#define NV_RX_FRAMINGERR (1<<29) 480#define NV_RX_ERROR (1<<30) 481#define NV_RX_AVAIL (1<<31) 482 483#define NV_RX2_CHECKSUMMASK (0x1C000000) 484#define NV_RX2_CHECKSUM_IP (0x10000000) 485#define NV_RX2_CHECKSUM_IP_TCP (0x14000000) 486#define NV_RX2_CHECKSUM_IP_UDP (0x18000000) 487#define NV_RX2_DESCRIPTORVALID (1<<29) 488#define NV_RX2_SUBSTRACT1 (1<<25) 489#define NV_RX2_ERROR1 (1<<18) 490#define NV_RX2_ERROR2 (1<<19) 491#define NV_RX2_ERROR3 (1<<20) 492#define NV_RX2_ERROR4 (1<<21) 493#define NV_RX2_CRCERR (1<<22) 494#define NV_RX2_OVERFLOW (1<<23) 495#define NV_RX2_FRAMINGERR (1<<24) 496/* error and avail are the same for both */ 497#define NV_RX2_ERROR (1<<30) 498#define NV_RX2_AVAIL (1<<31) 499 500#define NV_RX3_VLAN_TAG_PRESENT (1<<16) 501#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 502 503/* Miscelaneous hardware related defines: */ 504#define NV_PCI_REGSZ_VER1 0x270 505#define NV_PCI_REGSZ_VER2 0x2d4 506#define NV_PCI_REGSZ_VER3 0x604 507 508/* various timeout delays: all in usec */ 509#define NV_TXRX_RESET_DELAY 4 510#define NV_TXSTOP_DELAY1 10 511#define NV_TXSTOP_DELAY1MAX 500000 512#define NV_TXSTOP_DELAY2 100 513#define NV_RXSTOP_DELAY1 10 514#define NV_RXSTOP_DELAY1MAX 500000 515#define NV_RXSTOP_DELAY2 100 516#define NV_SETUP5_DELAY 5 517#define NV_SETUP5_DELAYMAX 50000 518#define NV_POWERUP_DELAY 5 519#define NV_POWERUP_DELAYMAX 5000 520#define NV_MIIBUSY_DELAY 50 521#define NV_MIIPHY_DELAY 10 522#define NV_MIIPHY_DELAYMAX 10000 523#define NV_MAC_RESET_DELAY 64 524 525#define NV_WAKEUPPATTERNS 5 526#define NV_WAKEUPMASKENTRIES 4 527 528/* General driver defaults */ 529#define NV_WATCHDOG_TIMEO (5*HZ) 530 531#define RX_RING_DEFAULT 128 532#define TX_RING_DEFAULT 256 533#define RX_RING_MIN 128 534#define TX_RING_MIN 64 535#define RING_MAX_DESC_VER_1 1024 536#define RING_MAX_DESC_VER_2_3 16384 537 538/* rx/tx mac addr + type + vlan + align + slack*/ 539#define NV_RX_HEADERS (64) 540/* even more slack. */ 541#define NV_RX_ALLOC_PAD (64) 542 543/* maximum mtu size */ 544#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ 545#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ 546 547#define OOM_REFILL (1+HZ/20) 548#define POLL_WAIT (1+HZ/100) 549#define LINK_TIMEOUT (3*HZ) 550#define STATS_INTERVAL (10*HZ) 551 552/* 553 * desc_ver values: 554 * The nic supports three different descriptor types: 555 * - DESC_VER_1: Original 556 * - DESC_VER_2: support for jumbo frames. 557 * - DESC_VER_3: 64-bit format. 558 */ 559#define DESC_VER_1 1 560#define DESC_VER_2 2 561#define DESC_VER_3 3 562 563/* PHY defines */ 564#define PHY_OUI_MARVELL 0x5043 565#define PHY_OUI_CICADA 0x03f1 566#define PHY_OUI_VITESSE 0x01c1 567#define PHY_OUI_REALTEK 0x0732 568#define PHYID1_OUI_MASK 0x03ff 569#define PHYID1_OUI_SHFT 6 570#define PHYID2_OUI_MASK 0xfc00 571#define PHYID2_OUI_SHFT 10 572#define PHYID2_MODEL_MASK 0x03f0 573#define PHY_MODEL_MARVELL_E3016 0x220 574#define PHY_MARVELL_E3016_INITMASK 0x0300 575#define PHY_CICADA_INIT1 0x0f000 576#define PHY_CICADA_INIT2 0x0e00 577#define PHY_CICADA_INIT3 0x01000 578#define PHY_CICADA_INIT4 0x0200 579#define PHY_CICADA_INIT5 0x0004 580#define PHY_CICADA_INIT6 0x02000 581#define PHY_VITESSE_INIT_REG1 0x1f 582#define PHY_VITESSE_INIT_REG2 0x10 583#define PHY_VITESSE_INIT_REG3 0x11 584#define PHY_VITESSE_INIT_REG4 0x12 585#define PHY_VITESSE_INIT_MSK1 0xc 586#define PHY_VITESSE_INIT_MSK2 0x0180 587#define PHY_VITESSE_INIT1 0x52b5 588#define PHY_VITESSE_INIT2 0xaf8a 589#define PHY_VITESSE_INIT3 0x8 590#define PHY_VITESSE_INIT4 0x8f8a 591#define PHY_VITESSE_INIT5 0xaf86 592#define PHY_VITESSE_INIT6 0x8f86 593#define PHY_VITESSE_INIT7 0xaf82 594#define PHY_VITESSE_INIT8 0x0100 595#define PHY_VITESSE_INIT9 0x8f82 596#define PHY_VITESSE_INIT10 0x0 597#define PHY_REALTEK_INIT_REG1 0x1f 598#define PHY_REALTEK_INIT_REG2 0x19 599#define PHY_REALTEK_INIT_REG3 0x13 600#define PHY_REALTEK_INIT1 0x0000 601#define PHY_REALTEK_INIT2 0x8e00 602#define PHY_REALTEK_INIT3 0x0001 603#define PHY_REALTEK_INIT4 0xad17 604 605#define PHY_GIGABIT 0x0100 606 607#define PHY_TIMEOUT 0x1 608#define PHY_ERROR 0x2 609 610#define PHY_100 0x1 611#define PHY_1000 0x2 612#define PHY_HALF 0x100 613 614#define NV_PAUSEFRAME_RX_CAPABLE 0x0001 615#define NV_PAUSEFRAME_TX_CAPABLE 0x0002 616#define NV_PAUSEFRAME_RX_ENABLE 0x0004 617#define NV_PAUSEFRAME_TX_ENABLE 0x0008 618#define NV_PAUSEFRAME_RX_REQ 0x0010 619#define NV_PAUSEFRAME_TX_REQ 0x0020 620#define NV_PAUSEFRAME_AUTONEG 0x0040 621 622/* MSI/MSI-X defines */ 623#define NV_MSI_X_MAX_VECTORS 8 624#define NV_MSI_X_VECTORS_MASK 0x000f 625#define NV_MSI_CAPABLE 0x0010 626#define NV_MSI_X_CAPABLE 0x0020 627#define NV_MSI_ENABLED 0x0040 628#define NV_MSI_X_ENABLED 0x0080 629 630#define NV_MSI_X_VECTOR_ALL 0x0 631#define NV_MSI_X_VECTOR_RX 0x0 632#define NV_MSI_X_VECTOR_TX 0x1 633#define NV_MSI_X_VECTOR_OTHER 0x2 634 635#define NV_RESTART_TX 0x1 636#define NV_RESTART_RX 0x2 637 638/* statistics */ 639struct nv_ethtool_str { 640 char name[ETH_GSTRING_LEN]; 641}; 642 643static const struct nv_ethtool_str nv_estats_str[] = { 644 { "tx_bytes" }, 645 { "tx_zero_rexmt" }, 646 { "tx_one_rexmt" }, 647 { "tx_many_rexmt" }, 648 { "tx_late_collision" }, 649 { "tx_fifo_errors" }, 650 { "tx_carrier_errors" }, 651 { "tx_excess_deferral" }, 652 { "tx_retry_error" }, 653 { "rx_frame_error" }, 654 { "rx_extra_byte" }, 655 { "rx_late_collision" }, 656 { "rx_runt" }, 657 { "rx_frame_too_long" }, 658 { "rx_over_errors" }, 659 { "rx_crc_errors" }, 660 { "rx_frame_align_error" }, 661 { "rx_length_error" }, 662 { "rx_unicast" }, 663 { "rx_multicast" }, 664 { "rx_broadcast" }, 665 { "rx_packets" }, 666 { "rx_errors_total" }, 667 { "tx_errors_total" }, 668 669 /* version 2 stats */ 670 { "tx_deferral" }, 671 { "tx_packets" }, 672 { "rx_bytes" }, 673 { "tx_pause" }, 674 { "rx_pause" }, 675 { "rx_drop_frame" } 676}; 677 678struct nv_ethtool_stats { 679 u64 tx_bytes; 680 u64 tx_zero_rexmt; 681 u64 tx_one_rexmt; 682 u64 tx_many_rexmt; 683 u64 tx_late_collision; 684 u64 tx_fifo_errors; 685 u64 tx_carrier_errors; 686 u64 tx_excess_deferral; 687 u64 tx_retry_error; 688 u64 rx_frame_error; 689 u64 rx_extra_byte; 690 u64 rx_late_collision; 691 u64 rx_runt; 692 u64 rx_frame_too_long; 693 u64 rx_over_errors; 694 u64 rx_crc_errors; 695 u64 rx_frame_align_error; 696 u64 rx_length_error; 697 u64 rx_unicast; 698 u64 rx_multicast; 699 u64 rx_broadcast; 700 u64 rx_packets; 701 u64 rx_errors_total; 702 u64 tx_errors_total; 703 704 /* version 2 stats */ 705 u64 tx_deferral; 706 u64 tx_packets; 707 u64 rx_bytes; 708 u64 tx_pause; 709 u64 rx_pause; 710 u64 rx_drop_frame; 711}; 712 713#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 714#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 715 716/* diagnostics */ 717#define NV_TEST_COUNT_BASE 3 718#define NV_TEST_COUNT_EXTENDED 4 719 720static const struct nv_ethtool_str nv_etests_str[] = { 721 { "link (online/offline)" }, 722 { "register (offline) " }, 723 { "interrupt (offline) " }, 724 { "loopback (offline) " } 725}; 726 727struct register_test { 728 __u32 reg; 729 __u32 mask; 730}; 731 732static const struct register_test nv_registers_test[] = { 733 { NvRegUnknownSetupReg6, 0x01 }, 734 { NvRegMisc1, 0x03c }, 735 { NvRegOffloadConfig, 0x03ff }, 736 { NvRegMulticastAddrA, 0xffffffff }, 737 { NvRegTxWatermark, 0x0ff }, 738 { NvRegWakeUpFlags, 0x07777 }, 739 { 0,0 } 740}; 741 742struct nv_skb_map { 743 struct sk_buff *skb; 744 dma_addr_t dma; 745 unsigned int dma_len; 746}; 747 748/* 749 * SMP locking: 750 * All hardware access under dev->priv->lock, except the performance 751 * critical parts: 752 * - rx is (pseudo-) lockless: it relies on the single-threading provided 753 * by the arch code for interrupts. 754 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 755 * needs dev->priv->lock :-( 756 * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 757 */ 758 759/* in dev: base, irq */ 760struct fe_priv { 761 spinlock_t lock; 762 763 struct net_device *dev; 764 struct napi_struct napi; 765 766 /* General data: 767 * Locking: spin_lock(&np->lock); */ 768 struct nv_ethtool_stats estats; 769 int in_shutdown; 770 u32 linkspeed; 771 int duplex; 772 int autoneg; 773 int fixed_mode; 774 int phyaddr; 775 int wolenabled; 776 unsigned int phy_oui; 777 unsigned int phy_model; 778 u16 gigabit; 779 int intr_test; 780 int recover_error; 781 782 /* General data: RO fields */ 783 dma_addr_t ring_addr; 784 struct pci_dev *pci_dev; 785 u32 orig_mac[2]; 786 u32 irqmask; 787 u32 desc_ver; 788 u32 txrxctl_bits; 789 u32 vlanctl_bits; 790 u32 driver_data; 791 u32 register_size; 792 int rx_csum; 793 u32 mac_in_use; 794 795 void __iomem *base; 796 797 /* rx specific fields. 798 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 799 */ 800 union ring_type get_rx, put_rx, first_rx, last_rx; 801 struct nv_skb_map *get_rx_ctx, *put_rx_ctx; 802 struct nv_skb_map *first_rx_ctx, *last_rx_ctx; 803 struct nv_skb_map *rx_skb; 804 805 union ring_type rx_ring; 806 unsigned int rx_buf_sz; 807 unsigned int pkt_limit; 808 struct timer_list oom_kick; 809 struct timer_list nic_poll; 810 struct timer_list stats_poll; 811 u32 nic_poll_irq; 812 int rx_ring_size; 813 814 /* media detection workaround. 815 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 816 */ 817 int need_linktimer; 818 unsigned long link_timeout; 819 /* 820 * tx specific fields. 821 */ 822 union ring_type get_tx, put_tx, first_tx, last_tx; 823 struct nv_skb_map *get_tx_ctx, *put_tx_ctx; 824 struct nv_skb_map *first_tx_ctx, *last_tx_ctx; 825 struct nv_skb_map *tx_skb; 826 827 union ring_type tx_ring; 828 u32 tx_flags; 829 int tx_ring_size; 830 int tx_stop; 831 832 /* vlan fields */ 833 struct vlan_group *vlangrp; 834 835 /* msi/msi-x fields */ 836 u32 msi_flags; 837 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; 838 839 /* flow control */ 840 u32 pause_flags; 841}; 842 843/* 844 * Maximum number of loops until we assume that a bit in the irq mask 845 * is stuck. Overridable with module param. 846 */ 847static int max_interrupt_work = 5; 848 849/* 850 * Optimization can be either throuput mode or cpu mode 851 * 852 * Throughput Mode: Every tx and rx packet will generate an interrupt. 853 * CPU Mode: Interrupts are controlled by a timer. 854 */ 855enum { 856 NV_OPTIMIZATION_MODE_THROUGHPUT, 857 NV_OPTIMIZATION_MODE_CPU 858}; 859static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; 860 861/* 862 * Poll interval for timer irq 863 * 864 * This interval determines how frequent an interrupt is generated. 865 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] 866 * Min = 0, and Max = 65535 867 */ 868static int poll_interval = -1; 869 870/* 871 * MSI interrupts 872 */ 873enum { 874 NV_MSI_INT_DISABLED, 875 NV_MSI_INT_ENABLED 876}; 877static int msi = NV_MSI_INT_ENABLED; 878 879/* 880 * MSIX interrupts 881 */ 882enum { 883 NV_MSIX_INT_DISABLED, 884 NV_MSIX_INT_ENABLED 885}; 886static int msix = NV_MSIX_INT_DISABLED; 887 888/* 889 * DMA 64bit 890 */ 891enum { 892 NV_DMA_64BIT_DISABLED, 893 NV_DMA_64BIT_ENABLED 894}; 895static int dma_64bit = NV_DMA_64BIT_ENABLED; 896 897static inline struct fe_priv *get_nvpriv(struct net_device *dev) 898{ 899 return netdev_priv(dev); 900} 901 902static inline u8 __iomem *get_hwbase(struct net_device *dev) 903{ 904 return ((struct fe_priv *)netdev_priv(dev))->base; 905} 906 907static inline void pci_push(u8 __iomem *base) 908{ 909 /* force out pending posted writes */ 910 readl(base); 911} 912 913static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 914{ 915 return le32_to_cpu(prd->flaglen) 916 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 917} 918 919static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 920{ 921 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; 922} 923 924static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 925 int delay, int delaymax, const char *msg) 926{ 927 u8 __iomem *base = get_hwbase(dev); 928 929 pci_push(base); 930 do { 931 udelay(delay); 932 delaymax -= delay; 933 if (delaymax < 0) { 934 if (msg) 935 printk(msg); 936 return 1; 937 } 938 } while ((readl(base + offset) & mask) != target); 939 return 0; 940} 941 942#define NV_SETUP_RX_RING 0x01 943#define NV_SETUP_TX_RING 0x02 944 945static inline u32 dma_low(dma_addr_t addr) 946{ 947 return addr; 948} 949 950static inline u32 dma_high(dma_addr_t addr) 951{ 952 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */ 953} 954 955static void setup_hw_rings(struct net_device *dev, int rxtx_flags) 956{ 957 struct fe_priv *np = get_nvpriv(dev); 958 u8 __iomem *base = get_hwbase(dev); 959 960 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 961 if (rxtx_flags & NV_SETUP_RX_RING) { 962 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 963 } 964 if (rxtx_flags & NV_SETUP_TX_RING) { 965 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 966 } 967 } else { 968 if (rxtx_flags & NV_SETUP_RX_RING) { 969 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 970 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); 971 } 972 if (rxtx_flags & NV_SETUP_TX_RING) { 973 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); 974 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh); 975 } 976 } 977} 978 979static void free_rings(struct net_device *dev) 980{ 981 struct fe_priv *np = get_nvpriv(dev); 982 983 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 984 if (np->rx_ring.orig) 985 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 986 np->rx_ring.orig, np->ring_addr); 987 } else { 988 if (np->rx_ring.ex) 989 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 990 np->rx_ring.ex, np->ring_addr); 991 } 992 if (np->rx_skb) 993 kfree(np->rx_skb); 994 if (np->tx_skb) 995 kfree(np->tx_skb); 996} 997 998static int using_multi_irqs(struct net_device *dev) 999{ 1000 struct fe_priv *np = get_nvpriv(dev); 1001 1002 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1003 ((np->msi_flags & NV_MSI_X_ENABLED) && 1004 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) 1005 return 0; 1006 else 1007 return 1; 1008} 1009 1010static void nv_enable_irq(struct net_device *dev) 1011{ 1012 struct fe_priv *np = get_nvpriv(dev); 1013 1014 if (!using_multi_irqs(dev)) { 1015 if (np->msi_flags & NV_MSI_X_ENABLED) 1016 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1017 else 1018 enable_irq(np->pci_dev->irq); 1019 } else { 1020 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1021 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1022 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1023 } 1024} 1025 1026static void nv_disable_irq(struct net_device *dev) 1027{ 1028 struct fe_priv *np = get_nvpriv(dev); 1029 1030 if (!using_multi_irqs(dev)) { 1031 if (np->msi_flags & NV_MSI_X_ENABLED) 1032 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1033 else 1034 disable_irq(np->pci_dev->irq); 1035 } else { 1036 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1037 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1038 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1039 } 1040} 1041 1042/* In MSIX mode, a write to irqmask behaves as XOR */ 1043static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) 1044{ 1045 u8 __iomem *base = get_hwbase(dev); 1046 1047 writel(mask, base + NvRegIrqMask); 1048} 1049 1050static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) 1051{ 1052 struct fe_priv *np = get_nvpriv(dev); 1053 u8 __iomem *base = get_hwbase(dev); 1054 1055 if (np->msi_flags & NV_MSI_X_ENABLED) { 1056 writel(mask, base + NvRegIrqMask); 1057 } else { 1058 if (np->msi_flags & NV_MSI_ENABLED) 1059 writel(0, base + NvRegMSIIrqMask); 1060 writel(0, base + NvRegIrqMask); 1061 } 1062} 1063 1064#define MII_READ (-1) 1065/* mii_rw: read/write a register on the PHY. 1066 * 1067 * Caller must guarantee serialization 1068 */ 1069static int mii_rw(struct net_device *dev, int addr, int miireg, int value) 1070{ 1071 u8 __iomem *base = get_hwbase(dev); 1072 u32 reg; 1073 int retval; 1074 1075 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus); 1076 1077 reg = readl(base + NvRegMIIControl); 1078 if (reg & NVREG_MIICTL_INUSE) { 1079 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); 1080 udelay(NV_MIIBUSY_DELAY); 1081 } 1082 1083 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; 1084 if (value != MII_READ) { 1085 writel(value, base + NvRegMIIData); 1086 reg |= NVREG_MIICTL_WRITE; 1087 } 1088 writel(reg, base + NvRegMIIControl); 1089 1090 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1091 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { 1092 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", 1093 dev->name, miireg, addr); 1094 retval = -1; 1095 } else if (value != MII_READ) { 1096 /* it was a write operation - fewer failures are detectable */ 1097 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", 1098 dev->name, value, miireg, addr); 1099 retval = 0; 1100 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1101 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", 1102 dev->name, miireg, addr); 1103 retval = -1; 1104 } else { 1105 retval = readl(base + NvRegMIIData); 1106 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", 1107 dev->name, miireg, addr, retval); 1108 } 1109 1110 return retval; 1111} 1112 1113static int phy_reset(struct net_device *dev, u32 bmcr_setup) 1114{ 1115 struct fe_priv *np = netdev_priv(dev); 1116 u32 miicontrol; 1117 unsigned int tries = 0; 1118 1119 miicontrol = BMCR_RESET | bmcr_setup; 1120 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1121 return -1; 1122 } 1123 1124 /* wait for 500ms */ 1125 msleep(500); 1126 1127 /* must wait till reset is deasserted */ 1128 while (miicontrol & BMCR_RESET) { 1129 msleep(10); 1130 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1131 /* FIXME: 100 tries seem excessive */ 1132 if (tries++ > 100) 1133 return -1; 1134 } 1135 return 0; 1136} 1137 1138static int phy_init(struct net_device *dev) 1139{ 1140 struct fe_priv *np = get_nvpriv(dev); 1141 u8 __iomem *base = get_hwbase(dev); 1142 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1143 1144 /* phy errata for E3016 phy */ 1145 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1146 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1147 reg &= ~PHY_MARVELL_E3016_INITMASK; 1148 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { 1149 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); 1150 return PHY_ERROR; 1151 } 1152 } 1153 if (np->phy_oui == PHY_OUI_REALTEK) { 1154 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1155 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1156 return PHY_ERROR; 1157 } 1158 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1159 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1160 return PHY_ERROR; 1161 } 1162 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1163 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1164 return PHY_ERROR; 1165 } 1166 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1167 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1168 return PHY_ERROR; 1169 } 1170 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1171 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1172 return PHY_ERROR; 1173 } 1174 } 1175 1176 /* set advertise register */ 1177 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1178 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1179 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1180 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); 1181 return PHY_ERROR; 1182 } 1183 1184 /* get phy interface type */ 1185 phyinterface = readl(base + NvRegPhyInterface); 1186 1187 /* see if gigabit phy */ 1188 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1189 if (mii_status & PHY_GIGABIT) { 1190 np->gigabit = PHY_GIGABIT; 1191 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 1192 mii_control_1000 &= ~ADVERTISE_1000HALF; 1193 if (phyinterface & PHY_RGMII) 1194 mii_control_1000 |= ADVERTISE_1000FULL; 1195 else 1196 mii_control_1000 &= ~ADVERTISE_1000FULL; 1197 1198 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1199 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1200 return PHY_ERROR; 1201 } 1202 } 1203 else 1204 np->gigabit = 0; 1205 1206 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1207 mii_control |= BMCR_ANENABLE; 1208 1209 /* reset the phy 1210 * (certain phys need bmcr to be setup with reset) 1211 */ 1212 if (phy_reset(dev, mii_control)) { 1213 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1214 return PHY_ERROR; 1215 } 1216 1217 /* phy vendor specific configuration */ 1218 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1219 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1220 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1221 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1222 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { 1223 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1224 return PHY_ERROR; 1225 } 1226 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1227 phy_reserved |= PHY_CICADA_INIT5; 1228 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { 1229 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1230 return PHY_ERROR; 1231 } 1232 } 1233 if (np->phy_oui == PHY_OUI_CICADA) { 1234 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); 1235 phy_reserved |= PHY_CICADA_INIT6; 1236 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { 1237 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1238 return PHY_ERROR; 1239 } 1240 } 1241 if (np->phy_oui == PHY_OUI_VITESSE) { 1242 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) { 1243 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1244 return PHY_ERROR; 1245 } 1246 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) { 1247 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1248 return PHY_ERROR; 1249 } 1250 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1251 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1252 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1253 return PHY_ERROR; 1254 } 1255 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1256 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1257 phy_reserved |= PHY_VITESSE_INIT3; 1258 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1259 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1260 return PHY_ERROR; 1261 } 1262 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) { 1263 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1264 return PHY_ERROR; 1265 } 1266 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) { 1267 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1268 return PHY_ERROR; 1269 } 1270 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1271 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1272 phy_reserved |= PHY_VITESSE_INIT3; 1273 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1274 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1275 return PHY_ERROR; 1276 } 1277 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1278 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1279 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1280 return PHY_ERROR; 1281 } 1282 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { 1283 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1284 return PHY_ERROR; 1285 } 1286 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { 1287 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1288 return PHY_ERROR; 1289 } 1290 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1291 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1292 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1293 return PHY_ERROR; 1294 } 1295 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1296 phy_reserved &= ~PHY_VITESSE_INIT_MSK2; 1297 phy_reserved |= PHY_VITESSE_INIT8; 1298 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1299 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1300 return PHY_ERROR; 1301 } 1302 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) { 1303 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1304 return PHY_ERROR; 1305 } 1306 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) { 1307 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1308 return PHY_ERROR; 1309 } 1310 } 1311 if (np->phy_oui == PHY_OUI_REALTEK) { 1312 /* reset could have cleared these out, set them back */ 1313 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1314 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1315 return PHY_ERROR; 1316 } 1317 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1318 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1319 return PHY_ERROR; 1320 } 1321 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1322 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1323 return PHY_ERROR; 1324 } 1325 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1326 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1327 return PHY_ERROR; 1328 } 1329 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1330 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1331 return PHY_ERROR; 1332 } 1333 } 1334 1335 /* some phys clear out pause advertisment on reset, set it back */ 1336 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); 1337 1338 /* restart auto negotiation */ 1339 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1340 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1341 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1342 return PHY_ERROR; 1343 } 1344 1345 return 0; 1346} 1347 1348static void nv_start_rx(struct net_device *dev) 1349{ 1350 struct fe_priv *np = netdev_priv(dev); 1351 u8 __iomem *base = get_hwbase(dev); 1352 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1353 1354 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 1355 /* Already running? Stop it. */ 1356 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1357 rx_ctrl &= ~NVREG_RCVCTL_START; 1358 writel(rx_ctrl, base + NvRegReceiverControl); 1359 pci_push(base); 1360 } 1361 writel(np->linkspeed, base + NvRegLinkSpeed); 1362 pci_push(base); 1363 rx_ctrl |= NVREG_RCVCTL_START; 1364 if (np->mac_in_use) 1365 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1366 writel(rx_ctrl, base + NvRegReceiverControl); 1367 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1368 dev->name, np->duplex, np->linkspeed); 1369 pci_push(base); 1370} 1371 1372static void nv_stop_rx(struct net_device *dev) 1373{ 1374 struct fe_priv *np = netdev_priv(dev); 1375 u8 __iomem *base = get_hwbase(dev); 1376 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1377 1378 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 1379 if (!np->mac_in_use) 1380 rx_ctrl &= ~NVREG_RCVCTL_START; 1381 else 1382 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1383 writel(rx_ctrl, base + NvRegReceiverControl); 1384 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1385 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1386 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1387 1388 udelay(NV_RXSTOP_DELAY2); 1389 if (!np->mac_in_use) 1390 writel(0, base + NvRegLinkSpeed); 1391} 1392 1393static void nv_start_tx(struct net_device *dev) 1394{ 1395 struct fe_priv *np = netdev_priv(dev); 1396 u8 __iomem *base = get_hwbase(dev); 1397 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1398 1399 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 1400 tx_ctrl |= NVREG_XMITCTL_START; 1401 if (np->mac_in_use) 1402 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1403 writel(tx_ctrl, base + NvRegTransmitterControl); 1404 pci_push(base); 1405} 1406 1407static void nv_stop_tx(struct net_device *dev) 1408{ 1409 struct fe_priv *np = netdev_priv(dev); 1410 u8 __iomem *base = get_hwbase(dev); 1411 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1412 1413 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 1414 if (!np->mac_in_use) 1415 tx_ctrl &= ~NVREG_XMITCTL_START; 1416 else 1417 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1418 writel(tx_ctrl, base + NvRegTransmitterControl); 1419 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1420 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1421 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1422 1423 udelay(NV_TXSTOP_DELAY2); 1424 if (!np->mac_in_use) 1425 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, 1426 base + NvRegTransmitPoll); 1427} 1428 1429static void nv_txrx_reset(struct net_device *dev) 1430{ 1431 struct fe_priv *np = netdev_priv(dev); 1432 u8 __iomem *base = get_hwbase(dev); 1433 1434 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); 1435 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1436 pci_push(base); 1437 udelay(NV_TXRX_RESET_DELAY); 1438 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1439 pci_push(base); 1440} 1441 1442static void nv_mac_reset(struct net_device *dev) 1443{ 1444 struct fe_priv *np = netdev_priv(dev); 1445 u8 __iomem *base = get_hwbase(dev); 1446 u32 temp1, temp2, temp3; 1447 1448 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); 1449 1450 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1451 pci_push(base); 1452 1453 /* save registers since they will be cleared on reset */ 1454 temp1 = readl(base + NvRegMacAddrA); 1455 temp2 = readl(base + NvRegMacAddrB); 1456 temp3 = readl(base + NvRegTransmitPoll); 1457 1458 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); 1459 pci_push(base); 1460 udelay(NV_MAC_RESET_DELAY); 1461 writel(0, base + NvRegMacReset); 1462 pci_push(base); 1463 udelay(NV_MAC_RESET_DELAY); 1464 1465 /* restore saved registers */ 1466 writel(temp1, base + NvRegMacAddrA); 1467 writel(temp2, base + NvRegMacAddrB); 1468 writel(temp3, base + NvRegTransmitPoll); 1469 1470 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1471 pci_push(base); 1472} 1473 1474static void nv_get_hw_stats(struct net_device *dev) 1475{ 1476 struct fe_priv *np = netdev_priv(dev); 1477 u8 __iomem *base = get_hwbase(dev); 1478 1479 np->estats.tx_bytes += readl(base + NvRegTxCnt); 1480 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); 1481 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); 1482 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); 1483 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); 1484 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); 1485 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); 1486 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); 1487 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); 1488 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); 1489 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); 1490 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); 1491 np->estats.rx_runt += readl(base + NvRegRxRunt); 1492 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); 1493 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); 1494 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); 1495 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); 1496 np->estats.rx_length_error += readl(base + NvRegRxLenErr); 1497 np->estats.rx_unicast += readl(base + NvRegRxUnicast); 1498 np->estats.rx_multicast += readl(base + NvRegRxMulticast); 1499 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); 1500 np->estats.rx_packets = 1501 np->estats.rx_unicast + 1502 np->estats.rx_multicast + 1503 np->estats.rx_broadcast; 1504 np->estats.rx_errors_total = 1505 np->estats.rx_crc_errors + 1506 np->estats.rx_over_errors + 1507 np->estats.rx_frame_error + 1508 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + 1509 np->estats.rx_late_collision + 1510 np->estats.rx_runt + 1511 np->estats.rx_frame_too_long; 1512 np->estats.tx_errors_total = 1513 np->estats.tx_late_collision + 1514 np->estats.tx_fifo_errors + 1515 np->estats.tx_carrier_errors + 1516 np->estats.tx_excess_deferral + 1517 np->estats.tx_retry_error; 1518 1519 if (np->driver_data & DEV_HAS_STATISTICS_V2) { 1520 np->estats.tx_deferral += readl(base + NvRegTxDef); 1521 np->estats.tx_packets += readl(base + NvRegTxFrame); 1522 np->estats.rx_bytes += readl(base + NvRegRxCnt); 1523 np->estats.tx_pause += readl(base + NvRegTxPause); 1524 np->estats.rx_pause += readl(base + NvRegRxPause); 1525 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1526 } 1527} 1528 1529/* 1530 * nv_get_stats: dev->get_stats function 1531 * Get latest stats value from the nic. 1532 * Called with read_lock(&dev_base_lock) held for read - 1533 * only synchronized against unregister_netdevice. 1534 */ 1535static struct net_device_stats *nv_get_stats(struct net_device *dev) 1536{ 1537 struct fe_priv *np = netdev_priv(dev); 1538 1539 /* If the nic supports hw counters then retrieve latest values */ 1540 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) { 1541 nv_get_hw_stats(dev); 1542 1543 /* copy to net_device stats */ 1544 dev->stats.tx_bytes = np->estats.tx_bytes; 1545 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1546 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1547 dev->stats.rx_crc_errors = np->estats.rx_crc_errors; 1548 dev->stats.rx_over_errors = np->estats.rx_over_errors; 1549 dev->stats.rx_errors = np->estats.rx_errors_total; 1550 dev->stats.tx_errors = np->estats.tx_errors_total; 1551 } 1552 1553 return &dev->stats; 1554} 1555 1556/* 1557 * nv_alloc_rx: fill rx ring entries. 1558 * Return 1 if the allocations for the skbs failed and the 1559 * rx engine is without Available descriptors 1560 */ 1561static int nv_alloc_rx(struct net_device *dev) 1562{ 1563 struct fe_priv *np = netdev_priv(dev); 1564 struct ring_desc* less_rx; 1565 1566 less_rx = np->get_rx.orig; 1567 if (less_rx-- == np->first_rx.orig) 1568 less_rx = np->last_rx.orig; 1569 1570 while (np->put_rx.orig != less_rx) { 1571 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1572 if (skb) { 1573 np->put_rx_ctx->skb = skb; 1574 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1575 skb->data, 1576 skb_tailroom(skb), 1577 PCI_DMA_FROMDEVICE); 1578 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1579 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); 1580 wmb(); 1581 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1582 if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) 1583 np->put_rx.orig = np->first_rx.orig; 1584 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1585 np->put_rx_ctx = np->first_rx_ctx; 1586 } else { 1587 return 1; 1588 } 1589 } 1590 return 0; 1591} 1592 1593static int nv_alloc_rx_optimized(struct net_device *dev) 1594{ 1595 struct fe_priv *np = netdev_priv(dev); 1596 struct ring_desc_ex* less_rx; 1597 1598 less_rx = np->get_rx.ex; 1599 if (less_rx-- == np->first_rx.ex) 1600 less_rx = np->last_rx.ex; 1601 1602 while (np->put_rx.ex != less_rx) { 1603 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1604 if (skb) { 1605 np->put_rx_ctx->skb = skb; 1606 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1607 skb->data, 1608 skb_tailroom(skb), 1609 PCI_DMA_FROMDEVICE); 1610 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1611 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); 1612 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); 1613 wmb(); 1614 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1615 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) 1616 np->put_rx.ex = np->first_rx.ex; 1617 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1618 np->put_rx_ctx = np->first_rx_ctx; 1619 } else { 1620 return 1; 1621 } 1622 } 1623 return 0; 1624} 1625 1626/* If rx bufs are exhausted called after 50ms to attempt to refresh */ 1627#ifdef CONFIG_FORCEDETH_NAPI 1628static void nv_do_rx_refill(unsigned long data) 1629{ 1630 struct net_device *dev = (struct net_device *) data; 1631 struct fe_priv *np = netdev_priv(dev); 1632 1633 /* Just reschedule NAPI rx processing */ 1634 netif_rx_schedule(dev, &np->napi); 1635} 1636#else 1637static void nv_do_rx_refill(unsigned long data) 1638{ 1639 struct net_device *dev = (struct net_device *) data; 1640 struct fe_priv *np = netdev_priv(dev); 1641 int retcode; 1642 1643 if (!using_multi_irqs(dev)) { 1644 if (np->msi_flags & NV_MSI_X_ENABLED) 1645 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1646 else 1647 disable_irq(np->pci_dev->irq); 1648 } else { 1649 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1650 } 1651 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1652 retcode = nv_alloc_rx(dev); 1653 else 1654 retcode = nv_alloc_rx_optimized(dev); 1655 if (retcode) { 1656 spin_lock_irq(&np->lock); 1657 if (!np->in_shutdown) 1658 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1659 spin_unlock_irq(&np->lock); 1660 } 1661 if (!using_multi_irqs(dev)) { 1662 if (np->msi_flags & NV_MSI_X_ENABLED) 1663 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1664 else 1665 enable_irq(np->pci_dev->irq); 1666 } else { 1667 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1668 } 1669} 1670#endif 1671 1672static void nv_init_rx(struct net_device *dev) 1673{ 1674 struct fe_priv *np = netdev_priv(dev); 1675 int i; 1676 np->get_rx = np->put_rx = np->first_rx = np->rx_ring; 1677 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1678 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; 1679 else 1680 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; 1681 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; 1682 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; 1683 1684 for (i = 0; i < np->rx_ring_size; i++) { 1685 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1686 np->rx_ring.orig[i].flaglen = 0; 1687 np->rx_ring.orig[i].buf = 0; 1688 } else { 1689 np->rx_ring.ex[i].flaglen = 0; 1690 np->rx_ring.ex[i].txvlan = 0; 1691 np->rx_ring.ex[i].bufhigh = 0; 1692 np->rx_ring.ex[i].buflow = 0; 1693 } 1694 np->rx_skb[i].skb = NULL; 1695 np->rx_skb[i].dma = 0; 1696 } 1697} 1698 1699static void nv_init_tx(struct net_device *dev) 1700{ 1701 struct fe_priv *np = netdev_priv(dev); 1702 int i; 1703 np->get_tx = np->put_tx = np->first_tx = np->tx_ring; 1704 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1705 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; 1706 else 1707 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; 1708 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; 1709 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; 1710 1711 for (i = 0; i < np->tx_ring_size; i++) { 1712 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1713 np->tx_ring.orig[i].flaglen = 0; 1714 np->tx_ring.orig[i].buf = 0; 1715 } else { 1716 np->tx_ring.ex[i].flaglen = 0; 1717 np->tx_ring.ex[i].txvlan = 0; 1718 np->tx_ring.ex[i].bufhigh = 0; 1719 np->tx_ring.ex[i].buflow = 0; 1720 } 1721 np->tx_skb[i].skb = NULL; 1722 np->tx_skb[i].dma = 0; 1723 } 1724} 1725 1726static int nv_init_ring(struct net_device *dev) 1727{ 1728 struct fe_priv *np = netdev_priv(dev); 1729 1730 nv_init_tx(dev); 1731 nv_init_rx(dev); 1732 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1733 return nv_alloc_rx(dev); 1734 else 1735 return nv_alloc_rx_optimized(dev); 1736} 1737 1738static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb) 1739{ 1740 struct fe_priv *np = netdev_priv(dev); 1741 1742 if (tx_skb->dma) { 1743 pci_unmap_page(np->pci_dev, tx_skb->dma, 1744 tx_skb->dma_len, 1745 PCI_DMA_TODEVICE); 1746 tx_skb->dma = 0; 1747 } 1748 if (tx_skb->skb) { 1749 dev_kfree_skb_any(tx_skb->skb); 1750 tx_skb->skb = NULL; 1751 return 1; 1752 } else { 1753 return 0; 1754 } 1755} 1756 1757static void nv_drain_tx(struct net_device *dev) 1758{ 1759 struct fe_priv *np = netdev_priv(dev); 1760 unsigned int i; 1761 1762 for (i = 0; i < np->tx_ring_size; i++) { 1763 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1764 np->tx_ring.orig[i].flaglen = 0; 1765 np->tx_ring.orig[i].buf = 0; 1766 } else { 1767 np->tx_ring.ex[i].flaglen = 0; 1768 np->tx_ring.ex[i].txvlan = 0; 1769 np->tx_ring.ex[i].bufhigh = 0; 1770 np->tx_ring.ex[i].buflow = 0; 1771 } 1772 if (nv_release_txskb(dev, &np->tx_skb[i])) 1773 dev->stats.tx_dropped++; 1774 } 1775} 1776 1777static void nv_drain_rx(struct net_device *dev) 1778{ 1779 struct fe_priv *np = netdev_priv(dev); 1780 int i; 1781 1782 for (i = 0; i < np->rx_ring_size; i++) { 1783 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1784 np->rx_ring.orig[i].flaglen = 0; 1785 np->rx_ring.orig[i].buf = 0; 1786 } else { 1787 np->rx_ring.ex[i].flaglen = 0; 1788 np->rx_ring.ex[i].txvlan = 0; 1789 np->rx_ring.ex[i].bufhigh = 0; 1790 np->rx_ring.ex[i].buflow = 0; 1791 } 1792 wmb(); 1793 if (np->rx_skb[i].skb) { 1794 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, 1795 (skb_end_pointer(np->rx_skb[i].skb) - 1796 np->rx_skb[i].skb->data), 1797 PCI_DMA_FROMDEVICE); 1798 dev_kfree_skb(np->rx_skb[i].skb); 1799 np->rx_skb[i].skb = NULL; 1800 } 1801 } 1802} 1803 1804static void drain_ring(struct net_device *dev) 1805{ 1806 nv_drain_tx(dev); 1807 nv_drain_rx(dev); 1808} 1809 1810static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) 1811{ 1812 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); 1813} 1814 1815/* 1816 * nv_start_xmit: dev->hard_start_xmit function 1817 * Called with netif_tx_lock held. 1818 */ 1819static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 1820{ 1821 struct fe_priv *np = netdev_priv(dev); 1822 u32 tx_flags = 0; 1823 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 1824 unsigned int fragments = skb_shinfo(skb)->nr_frags; 1825 unsigned int i; 1826 u32 offset = 0; 1827 u32 bcnt; 1828 u32 size = skb->len-skb->data_len; 1829 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1830 u32 empty_slots; 1831 struct ring_desc* put_tx; 1832 struct ring_desc* start_tx; 1833 struct ring_desc* prev_tx; 1834 struct nv_skb_map* prev_tx_ctx; 1835 1836 /* add fragments to entries count */ 1837 for (i = 0; i < fragments; i++) { 1838 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 1839 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1840 } 1841 1842 empty_slots = nv_get_empty_tx_slots(np); 1843 if (unlikely(empty_slots <= entries)) { 1844 spin_lock_irq(&np->lock); 1845 netif_stop_queue(dev); 1846 np->tx_stop = 1; 1847 spin_unlock_irq(&np->lock); 1848 return NETDEV_TX_BUSY; 1849 } 1850 1851 start_tx = put_tx = np->put_tx.orig; 1852 1853 /* setup the header buffer */ 1854 do { 1855 prev_tx = put_tx; 1856 prev_tx_ctx = np->put_tx_ctx; 1857 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1858 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 1859 PCI_DMA_TODEVICE); 1860 np->put_tx_ctx->dma_len = bcnt; 1861 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 1862 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1863 1864 tx_flags = np->tx_flags; 1865 offset += bcnt; 1866 size -= bcnt; 1867 if (unlikely(put_tx++ == np->last_tx.orig)) 1868 put_tx = np->first_tx.orig; 1869 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 1870 np->put_tx_ctx = np->first_tx_ctx; 1871 } while (size); 1872 1873 /* setup the fragments */ 1874 for (i = 0; i < fragments; i++) { 1875 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1876 u32 size = frag->size; 1877 offset = 0; 1878 1879 do { 1880 prev_tx = put_tx; 1881 prev_tx_ctx = np->put_tx_ctx; 1882 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1883 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 1884 PCI_DMA_TODEVICE); 1885 np->put_tx_ctx->dma_len = bcnt; 1886 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 1887 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1888 1889 offset += bcnt; 1890 size -= bcnt; 1891 if (unlikely(put_tx++ == np->last_tx.orig)) 1892 put_tx = np->first_tx.orig; 1893 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 1894 np->put_tx_ctx = np->first_tx_ctx; 1895 } while (size); 1896 } 1897 1898 /* set last fragment flag */ 1899 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); 1900 1901 /* save skb in this slot's context area */ 1902 prev_tx_ctx->skb = skb; 1903 1904 if (skb_is_gso(skb)) 1905 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1906 else 1907 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 1908 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 1909 1910 spin_lock_irq(&np->lock); 1911 1912 /* set tx flags */ 1913 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 1914 np->put_tx.orig = put_tx; 1915 1916 spin_unlock_irq(&np->lock); 1917 1918 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", 1919 dev->name, entries, tx_flags_extra); 1920 { 1921 int j; 1922 for (j=0; j<64; j++) { 1923 if ((j%16) == 0) 1924 dprintk("\n%03x:", j); 1925 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 1926 } 1927 dprintk("\n"); 1928 } 1929 1930 dev->trans_start = jiffies; 1931 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 1932 return NETDEV_TX_OK; 1933} 1934 1935static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) 1936{ 1937 struct fe_priv *np = netdev_priv(dev); 1938 u32 tx_flags = 0; 1939 u32 tx_flags_extra; 1940 unsigned int fragments = skb_shinfo(skb)->nr_frags; 1941 unsigned int i; 1942 u32 offset = 0; 1943 u32 bcnt; 1944 u32 size = skb->len-skb->data_len; 1945 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1946 u32 empty_slots; 1947 struct ring_desc_ex* put_tx; 1948 struct ring_desc_ex* start_tx; 1949 struct ring_desc_ex* prev_tx; 1950 struct nv_skb_map* prev_tx_ctx; 1951 1952 /* add fragments to entries count */ 1953 for (i = 0; i < fragments; i++) { 1954 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 1955 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1956 } 1957 1958 empty_slots = nv_get_empty_tx_slots(np); 1959 if (unlikely(empty_slots <= entries)) { 1960 spin_lock_irq(&np->lock); 1961 netif_stop_queue(dev); 1962 np->tx_stop = 1; 1963 spin_unlock_irq(&np->lock); 1964 return NETDEV_TX_BUSY; 1965 } 1966 1967 start_tx = put_tx = np->put_tx.ex; 1968 1969 /* setup the header buffer */ 1970 do { 1971 prev_tx = put_tx; 1972 prev_tx_ctx = np->put_tx_ctx; 1973 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1974 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 1975 PCI_DMA_TODEVICE); 1976 np->put_tx_ctx->dma_len = bcnt; 1977 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 1978 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 1979 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1980 1981 tx_flags = NV_TX2_VALID; 1982 offset += bcnt; 1983 size -= bcnt; 1984 if (unlikely(put_tx++ == np->last_tx.ex)) 1985 put_tx = np->first_tx.ex; 1986 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 1987 np->put_tx_ctx = np->first_tx_ctx; 1988 } while (size); 1989 1990 /* setup the fragments */ 1991 for (i = 0; i < fragments; i++) { 1992 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1993 u32 size = frag->size; 1994 offset = 0; 1995 1996 do { 1997 prev_tx = put_tx; 1998 prev_tx_ctx = np->put_tx_ctx; 1999 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2000 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 2001 PCI_DMA_TODEVICE); 2002 np->put_tx_ctx->dma_len = bcnt; 2003 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2004 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2005 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2006 2007 offset += bcnt; 2008 size -= bcnt; 2009 if (unlikely(put_tx++ == np->last_tx.ex)) 2010 put_tx = np->first_tx.ex; 2011 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2012 np->put_tx_ctx = np->first_tx_ctx; 2013 } while (size); 2014 } 2015 2016 /* set last fragment flag */ 2017 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); 2018 2019 /* save skb in this slot's context area */ 2020 prev_tx_ctx->skb = skb; 2021 2022 if (skb_is_gso(skb)) 2023 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2024 else 2025 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2026 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2027 2028 /* vlan tag */ 2029 if (likely(!np->vlangrp)) { 2030 start_tx->txvlan = 0; 2031 } else { 2032 if (vlan_tx_tag_present(skb)) 2033 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb)); 2034 else 2035 start_tx->txvlan = 0; 2036 } 2037 2038 spin_lock_irq(&np->lock); 2039 2040 /* set tx flags */ 2041 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2042 np->put_tx.ex = put_tx; 2043 2044 spin_unlock_irq(&np->lock); 2045 2046 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", 2047 dev->name, entries, tx_flags_extra); 2048 { 2049 int j; 2050 for (j=0; j<64; j++) { 2051 if ((j%16) == 0) 2052 dprintk("\n%03x:", j); 2053 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2054 } 2055 dprintk("\n"); 2056 } 2057 2058 dev->trans_start = jiffies; 2059 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2060 return NETDEV_TX_OK; 2061} 2062 2063/* 2064 * nv_tx_done: check for completed packets, release the skbs. 2065 * 2066 * Caller must own np->lock. 2067 */ 2068static void nv_tx_done(struct net_device *dev) 2069{ 2070 struct fe_priv *np = netdev_priv(dev); 2071 u32 flags; 2072 struct ring_desc* orig_get_tx = np->get_tx.orig; 2073 2074 while ((np->get_tx.orig != np->put_tx.orig) && 2075 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) { 2076 2077 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", 2078 dev->name, flags); 2079 2080 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, 2081 np->get_tx_ctx->dma_len, 2082 PCI_DMA_TODEVICE); 2083 np->get_tx_ctx->dma = 0; 2084 2085 if (np->desc_ver == DESC_VER_1) { 2086 if (flags & NV_TX_LASTPACKET) { 2087 if (flags & NV_TX_ERROR) { 2088 if (flags & NV_TX_UNDERFLOW) 2089 dev->stats.tx_fifo_errors++; 2090 if (flags & NV_TX_CARRIERLOST) 2091 dev->stats.tx_carrier_errors++; 2092 dev->stats.tx_errors++; 2093 } else { 2094 dev->stats.tx_packets++; 2095 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2096 } 2097 dev_kfree_skb_any(np->get_tx_ctx->skb); 2098 np->get_tx_ctx->skb = NULL; 2099 } 2100 } else { 2101 if (flags & NV_TX2_LASTPACKET) { 2102 if (flags & NV_TX2_ERROR) { 2103 if (flags & NV_TX2_UNDERFLOW) 2104 dev->stats.tx_fifo_errors++; 2105 if (flags & NV_TX2_CARRIERLOST) 2106 dev->stats.tx_carrier_errors++; 2107 dev->stats.tx_errors++; 2108 } else { 2109 dev->stats.tx_packets++; 2110 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2111 } 2112 dev_kfree_skb_any(np->get_tx_ctx->skb); 2113 np->get_tx_ctx->skb = NULL; 2114 } 2115 } 2116 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) 2117 np->get_tx.orig = np->first_tx.orig; 2118 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2119 np->get_tx_ctx = np->first_tx_ctx; 2120 } 2121 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { 2122 np->tx_stop = 0; 2123 netif_wake_queue(dev); 2124 } 2125} 2126 2127static void nv_tx_done_optimized(struct net_device *dev, int limit) 2128{ 2129 struct fe_priv *np = netdev_priv(dev); 2130 u32 flags; 2131 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 2132 2133 while ((np->get_tx.ex != np->put_tx.ex) && 2134 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) && 2135 (limit-- > 0)) { 2136 2137 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", 2138 dev->name, flags); 2139 2140 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, 2141 np->get_tx_ctx->dma_len, 2142 PCI_DMA_TODEVICE); 2143 np->get_tx_ctx->dma = 0; 2144 2145 if (flags & NV_TX2_LASTPACKET) { 2146 if (!(flags & NV_TX2_ERROR)) 2147 dev->stats.tx_packets++; 2148 dev_kfree_skb_any(np->get_tx_ctx->skb); 2149 np->get_tx_ctx->skb = NULL; 2150 } 2151 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 2152 np->get_tx.ex = np->first_tx.ex; 2153 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2154 np->get_tx_ctx = np->first_tx_ctx; 2155 } 2156 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { 2157 np->tx_stop = 0; 2158 netif_wake_queue(dev); 2159 } 2160} 2161 2162/* 2163 * nv_tx_timeout: dev->tx_timeout function 2164 * Called with netif_tx_lock held. 2165 */ 2166static void nv_tx_timeout(struct net_device *dev) 2167{ 2168 struct fe_priv *np = netdev_priv(dev); 2169 u8 __iomem *base = get_hwbase(dev); 2170 u32 status; 2171 2172 if (np->msi_flags & NV_MSI_X_ENABLED) 2173 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2174 else 2175 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2176 2177 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); 2178 2179 { 2180 int i; 2181 2182 printk(KERN_INFO "%s: Ring at %lx\n", 2183 dev->name, (unsigned long)np->ring_addr); 2184 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 2185 for (i=0;i<=np->register_size;i+= 32) { 2186 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 2187 i, 2188 readl(base + i + 0), readl(base + i + 4), 2189 readl(base + i + 8), readl(base + i + 12), 2190 readl(base + i + 16), readl(base + i + 20), 2191 readl(base + i + 24), readl(base + i + 28)); 2192 } 2193 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2194 for (i=0;i<np->tx_ring_size;i+= 4) { 2195 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 2196 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2197 i, 2198 le32_to_cpu(np->tx_ring.orig[i].buf), 2199 le32_to_cpu(np->tx_ring.orig[i].flaglen), 2200 le32_to_cpu(np->tx_ring.orig[i+1].buf), 2201 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), 2202 le32_to_cpu(np->tx_ring.orig[i+2].buf), 2203 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), 2204 le32_to_cpu(np->tx_ring.orig[i+3].buf), 2205 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); 2206 } else { 2207 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 2208 i, 2209 le32_to_cpu(np->tx_ring.ex[i].bufhigh), 2210 le32_to_cpu(np->tx_ring.ex[i].buflow), 2211 le32_to_cpu(np->tx_ring.ex[i].flaglen), 2212 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), 2213 le32_to_cpu(np->tx_ring.ex[i+1].buflow), 2214 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), 2215 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), 2216 le32_to_cpu(np->tx_ring.ex[i+2].buflow), 2217 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), 2218 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), 2219 le32_to_cpu(np->tx_ring.ex[i+3].buflow), 2220 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); 2221 } 2222 } 2223 } 2224 2225 spin_lock_irq(&np->lock); 2226 2227 /* 1) stop tx engine */ 2228 nv_stop_tx(dev); 2229 2230 /* 2) check that the packets were not sent already: */ 2231 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 2232 nv_tx_done(dev); 2233 else 2234 nv_tx_done_optimized(dev, np->tx_ring_size); 2235 2236 /* 3) if there are dead entries: clear everything */ 2237 if (np->get_tx_ctx != np->put_tx_ctx) { 2238 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 2239 nv_drain_tx(dev); 2240 nv_init_tx(dev); 2241 setup_hw_rings(dev, NV_SETUP_TX_RING); 2242 } 2243 2244 netif_wake_queue(dev); 2245 2246 /* 4) restart tx engine */ 2247 nv_start_tx(dev); 2248 spin_unlock_irq(&np->lock); 2249} 2250 2251/* 2252 * Called when the nic notices a mismatch between the actual data len on the 2253 * wire and the len indicated in the 802 header 2254 */ 2255static int nv_getlen(struct net_device *dev, void *packet, int datalen) 2256{ 2257 int hdrlen; /* length of the 802 header */ 2258 int protolen; /* length as stored in the proto field */ 2259 2260 /* 1) calculate len according to header */ 2261 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2262 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 2263 hdrlen = VLAN_HLEN; 2264 } else { 2265 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 2266 hdrlen = ETH_HLEN; 2267 } 2268 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", 2269 dev->name, datalen, protolen, hdrlen); 2270 if (protolen > ETH_DATA_LEN) 2271 return datalen; /* Value in proto field not a len, no checks possible */ 2272 2273 protolen += hdrlen; 2274 /* consistency checks: */ 2275 if (datalen > ETH_ZLEN) { 2276 if (datalen >= protolen) { 2277 /* more data on wire than in 802 header, trim of 2278 * additional data. 2279 */ 2280 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2281 dev->name, protolen); 2282 return protolen; 2283 } else { 2284 /* less data on wire than mentioned in header. 2285 * Discard the packet. 2286 */ 2287 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", 2288 dev->name); 2289 return -1; 2290 } 2291 } else { 2292 /* short packet. Accept only if 802 values are also short */ 2293 if (protolen > ETH_ZLEN) { 2294 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", 2295 dev->name); 2296 return -1; 2297 } 2298 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2299 dev->name, datalen); 2300 return datalen; 2301 } 2302} 2303 2304static int nv_rx_process(struct net_device *dev, int limit) 2305{ 2306 struct fe_priv *np = netdev_priv(dev); 2307 u32 flags; 2308 int rx_work = 0; 2309 struct sk_buff *skb; 2310 int len; 2311 2312 while((np->get_rx.orig != np->put_rx.orig) && 2313 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2314 (rx_work < limit)) { 2315 2316 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", 2317 dev->name, flags); 2318 2319 /* 2320 * the packet is for us - immediately tear down the pci mapping. 2321 * TODO: check if a prefetch of the first cacheline improves 2322 * the performance. 2323 */ 2324 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2325 np->get_rx_ctx->dma_len, 2326 PCI_DMA_FROMDEVICE); 2327 skb = np->get_rx_ctx->skb; 2328 np->get_rx_ctx->skb = NULL; 2329 2330 { 2331 int j; 2332 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2333 for (j=0; j<64; j++) { 2334 if ((j%16) == 0) 2335 dprintk("\n%03x:", j); 2336 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2337 } 2338 dprintk("\n"); 2339 } 2340 /* look at what we actually got: */ 2341 if (np->desc_ver == DESC_VER_1) { 2342 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2343 len = flags & LEN_MASK_V1; 2344 if (unlikely(flags & NV_RX_ERROR)) { 2345 if (flags & NV_RX_ERROR4) { 2346 len = nv_getlen(dev, skb->data, len); 2347 if (len < 0) { 2348 dev->stats.rx_errors++; 2349 dev_kfree_skb(skb); 2350 goto next_pkt; 2351 } 2352 } 2353 /* framing errors are soft errors */ 2354 else if (flags & NV_RX_FRAMINGERR) { 2355 if (flags & NV_RX_SUBSTRACT1) { 2356 len--; 2357 } 2358 } 2359 /* the rest are hard errors */ 2360 else { 2361 if (flags & NV_RX_MISSEDFRAME) 2362 dev->stats.rx_missed_errors++; 2363 if (flags & NV_RX_CRCERR) 2364 dev->stats.rx_crc_errors++; 2365 if (flags & NV_RX_OVERFLOW) 2366 dev->stats.rx_over_errors++; 2367 dev->stats.rx_errors++; 2368 dev_kfree_skb(skb); 2369 goto next_pkt; 2370 } 2371 } 2372 } else { 2373 dev_kfree_skb(skb); 2374 goto next_pkt; 2375 } 2376 } else { 2377 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2378 len = flags & LEN_MASK_V2; 2379 if (unlikely(flags & NV_RX2_ERROR)) { 2380 if (flags & NV_RX2_ERROR4) { 2381 len = nv_getlen(dev, skb->data, len); 2382 if (len < 0) { 2383 dev->stats.rx_errors++; 2384 dev_kfree_skb(skb); 2385 goto next_pkt; 2386 } 2387 } 2388 /* framing errors are soft errors */ 2389 else if (flags & NV_RX2_FRAMINGERR) { 2390 if (flags & NV_RX2_SUBSTRACT1) { 2391 len--; 2392 } 2393 } 2394 /* the rest are hard errors */ 2395 else { 2396 if (flags & NV_RX2_CRCERR) 2397 dev->stats.rx_crc_errors++; 2398 if (flags & NV_RX2_OVERFLOW) 2399 dev->stats.rx_over_errors++; 2400 dev->stats.rx_errors++; 2401 dev_kfree_skb(skb); 2402 goto next_pkt; 2403 } 2404 } 2405 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2406 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2407 skb->ip_summed = CHECKSUM_UNNECESSARY; 2408 } else { 2409 dev_kfree_skb(skb); 2410 goto next_pkt; 2411 } 2412 } 2413 /* got a valid packet - forward it to the network core */ 2414 skb_put(skb, len); 2415 skb->protocol = eth_type_trans(skb, dev); 2416 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", 2417 dev->name, len, skb->protocol); 2418#ifdef CONFIG_FORCEDETH_NAPI 2419 netif_receive_skb(skb); 2420#else 2421 netif_rx(skb); 2422#endif 2423 dev->last_rx = jiffies; 2424 dev->stats.rx_packets++; 2425 dev->stats.rx_bytes += len; 2426next_pkt: 2427 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2428 np->get_rx.orig = np->first_rx.orig; 2429 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2430 np->get_rx_ctx = np->first_rx_ctx; 2431 2432 rx_work++; 2433 } 2434 2435 return rx_work; 2436} 2437 2438static int nv_rx_process_optimized(struct net_device *dev, int limit) 2439{ 2440 struct fe_priv *np = netdev_priv(dev); 2441 u32 flags; 2442 u32 vlanflags = 0; 2443 int rx_work = 0; 2444 struct sk_buff *skb; 2445 int len; 2446 2447 while((np->get_rx.ex != np->put_rx.ex) && 2448 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2449 (rx_work < limit)) { 2450 2451 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", 2452 dev->name, flags); 2453 2454 /* 2455 * the packet is for us - immediately tear down the pci mapping. 2456 * TODO: check if a prefetch of the first cacheline improves 2457 * the performance. 2458 */ 2459 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2460 np->get_rx_ctx->dma_len, 2461 PCI_DMA_FROMDEVICE); 2462 skb = np->get_rx_ctx->skb; 2463 np->get_rx_ctx->skb = NULL; 2464 2465 { 2466 int j; 2467 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2468 for (j=0; j<64; j++) { 2469 if ((j%16) == 0) 2470 dprintk("\n%03x:", j); 2471 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2472 } 2473 dprintk("\n"); 2474 } 2475 /* look at what we actually got: */ 2476 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2477 len = flags & LEN_MASK_V2; 2478 if (unlikely(flags & NV_RX2_ERROR)) { 2479 if (flags & NV_RX2_ERROR4) { 2480 len = nv_getlen(dev, skb->data, len); 2481 if (len < 0) { 2482 dev_kfree_skb(skb); 2483 goto next_pkt; 2484 } 2485 } 2486 /* framing errors are soft errors */ 2487 else if (flags & NV_RX2_FRAMINGERR) { 2488 if (flags & NV_RX2_SUBSTRACT1) { 2489 len--; 2490 } 2491 } 2492 /* the rest are hard errors */ 2493 else { 2494 dev_kfree_skb(skb); 2495 goto next_pkt; 2496 } 2497 } 2498 2499 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2500 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2501 skb->ip_summed = CHECKSUM_UNNECESSARY; 2502 2503 /* got a valid packet - forward it to the network core */ 2504 skb_put(skb, len); 2505 skb->protocol = eth_type_trans(skb, dev); 2506 prefetch(skb->data); 2507 2508 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n", 2509 dev->name, len, skb->protocol); 2510 2511 if (likely(!np->vlangrp)) { 2512#ifdef CONFIG_FORCEDETH_NAPI 2513 netif_receive_skb(skb); 2514#else 2515 netif_rx(skb); 2516#endif 2517 } else { 2518 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2519 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2520#ifdef CONFIG_FORCEDETH_NAPI 2521 vlan_hwaccel_receive_skb(skb, np->vlangrp, 2522 vlanflags & NV_RX3_VLAN_TAG_MASK); 2523#else 2524 vlan_hwaccel_rx(skb, np->vlangrp, 2525 vlanflags & NV_RX3_VLAN_TAG_MASK); 2526#endif 2527 } else { 2528#ifdef CONFIG_FORCEDETH_NAPI 2529 netif_receive_skb(skb); 2530#else 2531 netif_rx(skb); 2532#endif 2533 } 2534 } 2535 2536 dev->last_rx = jiffies; 2537 dev->stats.rx_packets++; 2538 dev->stats.rx_bytes += len; 2539 } else { 2540 dev_kfree_skb(skb); 2541 } 2542next_pkt: 2543 if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) 2544 np->get_rx.ex = np->first_rx.ex; 2545 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2546 np->get_rx_ctx = np->first_rx_ctx; 2547 2548 rx_work++; 2549 } 2550 2551 return rx_work; 2552} 2553 2554static void set_bufsize(struct net_device *dev) 2555{ 2556 struct fe_priv *np = netdev_priv(dev); 2557 2558 if (dev->mtu <= ETH_DATA_LEN) 2559 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; 2560 else 2561 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; 2562} 2563 2564/* 2565 * nv_change_mtu: dev->change_mtu function 2566 * Called with dev_base_lock held for read. 2567 */ 2568static int nv_change_mtu(struct net_device *dev, int new_mtu) 2569{ 2570 struct fe_priv *np = netdev_priv(dev); 2571 int old_mtu; 2572 2573 if (new_mtu < 64 || new_mtu > np->pkt_limit) 2574 return -EINVAL; 2575 2576 old_mtu = dev->mtu; 2577 dev->mtu = new_mtu; 2578 2579 /* return early if the buffer sizes will not change */ 2580 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) 2581 return 0; 2582 if (old_mtu == new_mtu) 2583 return 0; 2584 2585 /* synchronized against open : rtnl_lock() held by caller */ 2586 if (netif_running(dev)) { 2587 u8 __iomem *base = get_hwbase(dev); 2588 /* 2589 * It seems that the nic preloads valid ring entries into an 2590 * internal buffer. The procedure for flushing everything is 2591 * guessed, there is probably a simpler approach. 2592 * Changing the MTU is a rare event, it shouldn't matter. 2593 */ 2594 nv_disable_irq(dev); 2595 netif_tx_lock_bh(dev); 2596 spin_lock(&np->lock); 2597 /* stop engines */ 2598 nv_stop_rx(dev); 2599 nv_stop_tx(dev); 2600 nv_txrx_reset(dev); 2601 /* drain rx queue */ 2602 nv_drain_rx(dev); 2603 nv_drain_tx(dev); 2604 /* reinit driver view of the rx queue */ 2605 set_bufsize(dev); 2606 if (nv_init_ring(dev)) { 2607 if (!np->in_shutdown) 2608 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2609 } 2610 /* reinit nic view of the rx queue */ 2611 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 2612 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 2613 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 2614 base + NvRegRingSizes); 2615 pci_push(base); 2616 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2617 pci_push(base); 2618 2619 /* restart rx engine */ 2620 nv_start_rx(dev); 2621 nv_start_tx(dev); 2622 spin_unlock(&np->lock); 2623 netif_tx_unlock_bh(dev); 2624 nv_enable_irq(dev); 2625 } 2626 return 0; 2627} 2628 2629static void nv_copy_mac_to_hw(struct net_device *dev) 2630{ 2631 u8 __iomem *base = get_hwbase(dev); 2632 u32 mac[2]; 2633 2634 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 2635 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 2636 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 2637 2638 writel(mac[0], base + NvRegMacAddrA); 2639 writel(mac[1], base + NvRegMacAddrB); 2640} 2641 2642/* 2643 * nv_set_mac_address: dev->set_mac_address function 2644 * Called with rtnl_lock() held. 2645 */ 2646static int nv_set_mac_address(struct net_device *dev, void *addr) 2647{ 2648 struct fe_priv *np = netdev_priv(dev); 2649 struct sockaddr *macaddr = (struct sockaddr*)addr; 2650 2651 if (!is_valid_ether_addr(macaddr->sa_data)) 2652 return -EADDRNOTAVAIL; 2653 2654 /* synchronized against open : rtnl_lock() held by caller */ 2655 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 2656 2657 if (netif_running(dev)) { 2658 netif_tx_lock_bh(dev); 2659 spin_lock_irq(&np->lock); 2660 2661 /* stop rx engine */ 2662 nv_stop_rx(dev); 2663 2664 /* set mac address */ 2665 nv_copy_mac_to_hw(dev); 2666 2667 /* restart rx engine */ 2668 nv_start_rx(dev); 2669 spin_unlock_irq(&np->lock); 2670 netif_tx_unlock_bh(dev); 2671 } else { 2672 nv_copy_mac_to_hw(dev); 2673 } 2674 return 0; 2675} 2676 2677/* 2678 * nv_set_multicast: dev->set_multicast function 2679 * Called with netif_tx_lock held. 2680 */ 2681static void nv_set_multicast(struct net_device *dev) 2682{ 2683 struct fe_priv *np = netdev_priv(dev); 2684 u8 __iomem *base = get_hwbase(dev); 2685 u32 addr[2]; 2686 u32 mask[2]; 2687 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; 2688 2689 memset(addr, 0, sizeof(addr)); 2690 memset(mask, 0, sizeof(mask)); 2691 2692 if (dev->flags & IFF_PROMISC) { 2693 pff |= NVREG_PFF_PROMISC; 2694 } else { 2695 pff |= NVREG_PFF_MYADDR; 2696 2697 if (dev->flags & IFF_ALLMULTI || dev->mc_list) { 2698 u32 alwaysOff[2]; 2699 u32 alwaysOn[2]; 2700 2701 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; 2702 if (dev->flags & IFF_ALLMULTI) { 2703 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 2704 } else { 2705 struct dev_mc_list *walk; 2706 2707 walk = dev->mc_list; 2708 while (walk != NULL) { 2709 u32 a, b; 2710 a = le32_to_cpu(*(__le32 *) walk->dmi_addr); 2711 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4])); 2712 alwaysOn[0] &= a; 2713 alwaysOff[0] &= ~a; 2714 alwaysOn[1] &= b; 2715 alwaysOff[1] &= ~b; 2716 walk = walk->next; 2717 } 2718 } 2719 addr[0] = alwaysOn[0]; 2720 addr[1] = alwaysOn[1]; 2721 mask[0] = alwaysOn[0] | alwaysOff[0]; 2722 mask[1] = alwaysOn[1] | alwaysOff[1]; 2723 } else { 2724 mask[0] = NVREG_MCASTMASKA_NONE; 2725 mask[1] = NVREG_MCASTMASKB_NONE; 2726 } 2727 } 2728 addr[0] |= NVREG_MCASTADDRA_FORCE; 2729 pff |= NVREG_PFF_ALWAYS; 2730 spin_lock_irq(&np->lock); 2731 nv_stop_rx(dev); 2732 writel(addr[0], base + NvRegMulticastAddrA); 2733 writel(addr[1], base + NvRegMulticastAddrB); 2734 writel(mask[0], base + NvRegMulticastMaskA); 2735 writel(mask[1], base + NvRegMulticastMaskB); 2736 writel(pff, base + NvRegPacketFilterFlags); 2737 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", 2738 dev->name); 2739 nv_start_rx(dev); 2740 spin_unlock_irq(&np->lock); 2741} 2742 2743static void nv_update_pause(struct net_device *dev, u32 pause_flags) 2744{ 2745 struct fe_priv *np = netdev_priv(dev); 2746 u8 __iomem *base = get_hwbase(dev); 2747 2748 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); 2749 2750 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { 2751 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; 2752 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { 2753 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); 2754 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2755 } else { 2756 writel(pff, base + NvRegPacketFilterFlags); 2757 } 2758 } 2759 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 2760 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 2761 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 2762 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; 2763 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) 2764 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; 2765 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) 2766 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; 2767 writel(pause_enable, base + NvRegTxPauseFrame); 2768 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 2769 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2770 } else { 2771 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 2772 writel(regmisc, base + NvRegMisc1); 2773 } 2774 } 2775} 2776 2777/** 2778 * nv_update_linkspeed: Setup the MAC according to the link partner 2779 * @dev: Network device to be configured 2780 * 2781 * The function queries the PHY and checks if there is a link partner. 2782 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is 2783 * set to 10 MBit HD. 2784 * 2785 * The function returns 0 if there is no link partner and 1 if there is 2786 * a good link partner. 2787 */ 2788static int nv_update_linkspeed(struct net_device *dev) 2789{ 2790 struct fe_priv *np = netdev_priv(dev); 2791 u8 __iomem *base = get_hwbase(dev); 2792 int adv = 0; 2793 int lpa = 0; 2794 int adv_lpa, adv_pause, lpa_pause; 2795 int newls = np->linkspeed; 2796 int newdup = np->duplex; 2797 int mii_status; 2798 int retval = 0; 2799 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 2800 u32 txrxFlags = 0; 2801 u32 phy_exp; 2802 2803 /* BMSR_LSTATUS is latched, read it twice: 2804 * we want the current value. 2805 */ 2806 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 2807 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 2808 2809 if (!(mii_status & BMSR_LSTATUS)) { 2810 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", 2811 dev->name); 2812 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2813 newdup = 0; 2814 retval = 0; 2815 goto set_speed; 2816 } 2817 2818 if (np->autoneg == 0) { 2819 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", 2820 dev->name, np->fixed_mode); 2821 if (np->fixed_mode & LPA_100FULL) { 2822 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2823 newdup = 1; 2824 } else if (np->fixed_mode & LPA_100HALF) { 2825 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2826 newdup = 0; 2827 } else if (np->fixed_mode & LPA_10FULL) { 2828 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2829 newdup = 1; 2830 } else { 2831 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2832 newdup = 0; 2833 } 2834 retval = 1; 2835 goto set_speed; 2836 } 2837 /* check auto negotiation is complete */ 2838 if (!(mii_status & BMSR_ANEGCOMPLETE)) { 2839 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ 2840 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2841 newdup = 0; 2842 retval = 0; 2843 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); 2844 goto set_speed; 2845 } 2846 2847 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 2848 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 2849 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", 2850 dev->name, adv, lpa); 2851 2852 retval = 1; 2853 if (np->gigabit == PHY_GIGABIT) { 2854 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 2855 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); 2856 2857 if ((control_1000 & ADVERTISE_1000FULL) && 2858 (status_1000 & LPA_1000FULL)) { 2859 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", 2860 dev->name); 2861 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 2862 newdup = 1; 2863 goto set_speed; 2864 } 2865 } 2866 2867 /* FIXME: handle parallel detection properly */ 2868 adv_lpa = lpa & adv; 2869 if (adv_lpa & LPA_100FULL) { 2870 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2871 newdup = 1; 2872 } else if (adv_lpa & LPA_100HALF) { 2873 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2874 newdup = 0; 2875 } else if (adv_lpa & LPA_10FULL) { 2876 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2877 newdup = 1; 2878 } else if (adv_lpa & LPA_10HALF) { 2879 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2880 newdup = 0; 2881 } else { 2882 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); 2883 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2884 newdup = 0; 2885 } 2886 2887set_speed: 2888 if (np->duplex == newdup && np->linkspeed == newls) 2889 return retval; 2890 2891 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", 2892 dev->name, np->linkspeed, np->duplex, newls, newdup); 2893 2894 np->duplex = newdup; 2895 np->linkspeed = newls; 2896 2897 /* The transmitter and receiver must be restarted for safe update */ 2898 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) { 2899 txrxFlags |= NV_RESTART_TX; 2900 nv_stop_tx(dev); 2901 } 2902 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { 2903 txrxFlags |= NV_RESTART_RX; 2904 nv_stop_rx(dev); 2905 } 2906 2907 if (np->gigabit == PHY_GIGABIT) { 2908 phyreg = readl(base + NvRegRandomSeed); 2909 phyreg &= ~(0x3FF00); 2910 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) 2911 phyreg |= NVREG_RNDSEED_FORCE3; 2912 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) 2913 phyreg |= NVREG_RNDSEED_FORCE2; 2914 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 2915 phyreg |= NVREG_RNDSEED_FORCE; 2916 writel(phyreg, base + NvRegRandomSeed); 2917 } 2918 2919 phyreg = readl(base + NvRegPhyInterface); 2920 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 2921 if (np->duplex == 0) 2922 phyreg |= PHY_HALF; 2923 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 2924 phyreg |= PHY_100; 2925 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2926 phyreg |= PHY_1000; 2927 writel(phyreg, base + NvRegPhyInterface); 2928 2929 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ 2930 if (phyreg & PHY_RGMII) { 2931 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { 2932 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 2933 } else { 2934 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { 2935 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) 2936 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10; 2937 else 2938 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100; 2939 } else { 2940 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 2941 } 2942 } 2943 } else { 2944 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) 2945 txreg = NVREG_TX_DEFERRAL_MII_STRETCH; 2946 else 2947 txreg = NVREG_TX_DEFERRAL_DEFAULT; 2948 } 2949 writel(txreg, base + NvRegTxDeferral); 2950 2951 if (np->desc_ver == DESC_VER_1) { 2952 txreg = NVREG_TX_WM_DESC1_DEFAULT; 2953 } else { 2954 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2955 txreg = NVREG_TX_WM_DESC2_3_1000; 2956 else 2957 txreg = NVREG_TX_WM_DESC2_3_DEFAULT; 2958 } 2959 writel(txreg, base + NvRegTxWatermark); 2960 2961 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 2962 base + NvRegMisc1); 2963 pci_push(base); 2964 writel(np->linkspeed, base + NvRegLinkSpeed); 2965 pci_push(base); 2966 2967 pause_flags = 0; 2968 /* setup pause frame */ 2969 if (np->duplex != 0) { 2970 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 2971 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 2972 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 2973 2974 switch (adv_pause) { 2975 case ADVERTISE_PAUSE_CAP: 2976 if (lpa_pause & LPA_PAUSE_CAP) { 2977 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2978 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2979 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2980 } 2981 break; 2982 case ADVERTISE_PAUSE_ASYM: 2983 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 2984 { 2985 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2986 } 2987 break; 2988 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: 2989 if (lpa_pause & LPA_PAUSE_CAP) 2990 { 2991 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2992 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2993 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2994 } 2995 if (lpa_pause == LPA_PAUSE_ASYM) 2996 { 2997 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2998 } 2999 break; 3000 } 3001 } else { 3002 pause_flags = np->pause_flags; 3003 } 3004 } 3005 nv_update_pause(dev, pause_flags); 3006 3007 if (txrxFlags & NV_RESTART_TX) 3008 nv_start_tx(dev); 3009 if (txrxFlags & NV_RESTART_RX) 3010 nv_start_rx(dev); 3011 3012 return retval; 3013} 3014 3015static void nv_linkchange(struct net_device *dev) 3016{ 3017 if (nv_update_linkspeed(dev)) { 3018 if (!netif_carrier_ok(dev)) { 3019 netif_carrier_on(dev); 3020 printk(KERN_INFO "%s: link up.\n", dev->name); 3021 nv_start_rx(dev); 3022 } 3023 } else { 3024 if (netif_carrier_ok(dev)) { 3025 netif_carrier_off(dev); 3026 printk(KERN_INFO "%s: link down.\n", dev->name); 3027 nv_stop_rx(dev); 3028 } 3029 } 3030} 3031 3032static void nv_link_irq(struct net_device *dev) 3033{ 3034 u8 __iomem *base = get_hwbase(dev); 3035 u32 miistat; 3036 3037 miistat = readl(base + NvRegMIIStatus); 3038 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus); 3039 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); 3040 3041 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 3042 nv_linkchange(dev); 3043 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); 3044} 3045 3046static irqreturn_t nv_nic_irq(int foo, void *data) 3047{ 3048 struct net_device *dev = (struct net_device *) data; 3049 struct fe_priv *np = netdev_priv(dev); 3050 u8 __iomem *base = get_hwbase(dev); 3051 u32 events; 3052 int i; 3053 3054 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 3055 3056 for (i=0; ; i++) { 3057 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3058 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3059 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3060 } else { 3061 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3062 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 3063 } 3064 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3065 if (!(events & np->irqmask)) 3066 break; 3067 3068 spin_lock(&np->lock); 3069 nv_tx_done(dev); 3070 spin_unlock(&np->lock); 3071 3072#ifdef CONFIG_FORCEDETH_NAPI 3073 if (events & NVREG_IRQ_RX_ALL) { 3074 netif_rx_schedule(dev, &np->napi); 3075 3076 /* Disable furthur receive irq's */ 3077 spin_lock(&np->lock); 3078 np->irqmask &= ~NVREG_IRQ_RX_ALL; 3079 3080 if (np->msi_flags & NV_MSI_X_ENABLED) 3081 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3082 else 3083 writel(np->irqmask, base + NvRegIrqMask); 3084 spin_unlock(&np->lock); 3085 } 3086#else 3087 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) { 3088 if (unlikely(nv_alloc_rx(dev))) { 3089 spin_lock(&np->lock); 3090 if (!np->in_shutdown) 3091 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3092 spin_unlock(&np->lock); 3093 } 3094 } 3095#endif 3096 if (unlikely(events & NVREG_IRQ_LINK)) { 3097 spin_lock(&np->lock); 3098 nv_link_irq(dev); 3099 spin_unlock(&np->lock); 3100 } 3101 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3102 spin_lock(&np->lock); 3103 nv_linkchange(dev); 3104 spin_unlock(&np->lock); 3105 np->link_timeout = jiffies + LINK_TIMEOUT; 3106 } 3107 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 3108 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3109 dev->name, events); 3110 } 3111 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { 3112 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3113 dev->name, events); 3114 } 3115 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { 3116 spin_lock(&np->lock); 3117 /* disable interrupts on the nic */ 3118 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3119 writel(0, base + NvRegIrqMask); 3120 else 3121 writel(np->irqmask, base + NvRegIrqMask); 3122 pci_push(base); 3123 3124 if (!np->in_shutdown) { 3125 np->nic_poll_irq = np->irqmask; 3126 np->recover_error = 1; 3127 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3128 } 3129 spin_unlock(&np->lock); 3130 break; 3131 } 3132 if (unlikely(i > max_interrupt_work)) { 3133 spin_lock(&np->lock); 3134 /* disable interrupts on the nic */ 3135 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3136 writel(0, base + NvRegIrqMask); 3137 else 3138 writel(np->irqmask, base + NvRegIrqMask); 3139 pci_push(base); 3140 3141 if (!np->in_shutdown) { 3142 np->nic_poll_irq = np->irqmask; 3143 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3144 } 3145 spin_unlock(&np->lock); 3146 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 3147 break; 3148 } 3149 3150 } 3151 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3152 3153 return IRQ_RETVAL(i); 3154} 3155 3156/** 3157 * All _optimized functions are used to help increase performance 3158 * (reduce CPU and increase throughput). They use descripter version 3, 3159 * compiler directives, and reduce memory accesses. 3160 */ 3161static irqreturn_t nv_nic_irq_optimized(int foo, void *data) 3162{ 3163 struct net_device *dev = (struct net_device *) data; 3164 struct fe_priv *np = netdev_priv(dev); 3165 u8 __iomem *base = get_hwbase(dev); 3166 u32 events; 3167 int i; 3168 3169 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); 3170 3171 for (i=0; ; i++) { 3172 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3173 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3174 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3175 } else { 3176 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3177 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 3178 } 3179 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3180 if (!(events & np->irqmask)) 3181 break; 3182 3183 spin_lock(&np->lock); 3184 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3185 spin_unlock(&np->lock); 3186 3187#ifdef CONFIG_FORCEDETH_NAPI 3188 if (events & NVREG_IRQ_RX_ALL) { 3189 netif_rx_schedule(dev, &np->napi); 3190 3191 /* Disable furthur receive irq's */ 3192 spin_lock(&np->lock); 3193 np->irqmask &= ~NVREG_IRQ_RX_ALL; 3194 3195 if (np->msi_flags & NV_MSI_X_ENABLED) 3196 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3197 else 3198 writel(np->irqmask, base + NvRegIrqMask); 3199 spin_unlock(&np->lock); 3200 } 3201#else 3202 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3203 if (unlikely(nv_alloc_rx_optimized(dev))) { 3204 spin_lock(&np->lock); 3205 if (!np->in_shutdown) 3206 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3207 spin_unlock(&np->lock); 3208 } 3209 } 3210#endif 3211 if (unlikely(events & NVREG_IRQ_LINK)) { 3212 spin_lock(&np->lock); 3213 nv_link_irq(dev); 3214 spin_unlock(&np->lock); 3215 } 3216 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3217 spin_lock(&np->lock); 3218 nv_linkchange(dev); 3219 spin_unlock(&np->lock); 3220 np->link_timeout = jiffies + LINK_TIMEOUT; 3221 } 3222 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 3223 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3224 dev->name, events); 3225 } 3226 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { 3227 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3228 dev->name, events); 3229 } 3230 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { 3231 spin_lock(&np->lock); 3232 /* disable interrupts on the nic */ 3233 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3234 writel(0, base + NvRegIrqMask); 3235 else 3236 writel(np->irqmask, base + NvRegIrqMask); 3237 pci_push(base); 3238 3239 if (!np->in_shutdown) { 3240 np->nic_poll_irq = np->irqmask; 3241 np->recover_error = 1; 3242 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3243 } 3244 spin_unlock(&np->lock); 3245 break; 3246 } 3247 3248 if (unlikely(i > max_interrupt_work)) { 3249 spin_lock(&np->lock); 3250 /* disable interrupts on the nic */ 3251 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3252 writel(0, base + NvRegIrqMask); 3253 else 3254 writel(np->irqmask, base + NvRegIrqMask); 3255 pci_push(base); 3256 3257 if (!np->in_shutdown) { 3258 np->nic_poll_irq = np->irqmask; 3259 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3260 } 3261 spin_unlock(&np->lock); 3262 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 3263 break; 3264 } 3265 3266 } 3267 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); 3268 3269 return IRQ_RETVAL(i); 3270} 3271 3272static irqreturn_t nv_nic_irq_tx(int foo, void *data) 3273{ 3274 struct net_device *dev = (struct net_device *) data; 3275 struct fe_priv *np = netdev_priv(dev); 3276 u8 __iomem *base = get_hwbase(dev); 3277 u32 events; 3278 int i; 3279 unsigned long flags; 3280 3281 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 3282 3283 for (i=0; ; i++) { 3284 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3285 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3286 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 3287 if (!(events & np->irqmask)) 3288 break; 3289 3290 spin_lock_irqsave(&np->lock, flags); 3291 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3292 spin_unlock_irqrestore(&np->lock, flags); 3293 3294 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 3295 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3296 dev->name, events); 3297 } 3298 if (unlikely(i > max_interrupt_work)) { 3299 spin_lock_irqsave(&np->lock, flags); 3300 /* disable interrupts on the nic */ 3301 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 3302 pci_push(base); 3303 3304 if (!np->in_shutdown) { 3305 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; 3306 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3307 } 3308 spin_unlock_irqrestore(&np->lock, flags); 3309 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 3310 break; 3311 } 3312 3313 } 3314 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); 3315 3316 return IRQ_RETVAL(i); 3317} 3318 3319#ifdef CONFIG_FORCEDETH_NAPI 3320static int nv_napi_poll(struct napi_struct *napi, int budget) 3321{ 3322 struct fe_priv *np = container_of(napi, struct fe_priv, napi); 3323 struct net_device *dev = np->dev; 3324 u8 __iomem *base = get_hwbase(dev); 3325 unsigned long flags; 3326 int pkts, retcode; 3327 3328 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3329 pkts = nv_rx_process(dev, budget); 3330 retcode = nv_alloc_rx(dev); 3331 } else { 3332 pkts = nv_rx_process_optimized(dev, budget); 3333 retcode = nv_alloc_rx_optimized(dev); 3334 } 3335 3336 if (retcode) { 3337 spin_lock_irqsave(&np->lock, flags); 3338 if (!np->in_shutdown) 3339 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3340 spin_unlock_irqrestore(&np->lock, flags); 3341 } 3342 3343 if (pkts < budget) { 3344 /* re-enable receive interrupts */ 3345 spin_lock_irqsave(&np->lock, flags); 3346 3347 __netif_rx_complete(dev, napi); 3348 3349 np->irqmask |= NVREG_IRQ_RX_ALL; 3350 if (np->msi_flags & NV_MSI_X_ENABLED) 3351 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3352 else 3353 writel(np->irqmask, base + NvRegIrqMask); 3354 3355 spin_unlock_irqrestore(&np->lock, flags); 3356 } 3357 return pkts; 3358} 3359#endif 3360 3361#ifdef CONFIG_FORCEDETH_NAPI 3362static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3363{ 3364 struct net_device *dev = (struct net_device *) data; 3365 struct fe_priv *np = netdev_priv(dev); 3366 u8 __iomem *base = get_hwbase(dev); 3367 u32 events; 3368 3369 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3370 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3371 3372 if (events) { 3373 netif_rx_schedule(dev, &np->napi); 3374 /* disable receive interrupts on the nic */ 3375 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3376 pci_push(base); 3377 } 3378 return IRQ_HANDLED; 3379} 3380#else 3381static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3382{ 3383 struct net_device *dev = (struct net_device *) data; 3384 struct fe_priv *np = netdev_priv(dev); 3385 u8 __iomem *base = get_hwbase(dev); 3386 u32 events; 3387 int i; 3388 unsigned long flags; 3389 3390 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 3391 3392 for (i=0; ; i++) { 3393 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3394 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3395 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 3396 if (!(events & np->irqmask)) 3397 break; 3398 3399 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3400 if (unlikely(nv_alloc_rx_optimized(dev))) { 3401 spin_lock_irqsave(&np->lock, flags); 3402 if (!np->in_shutdown) 3403 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3404 spin_unlock_irqrestore(&np->lock, flags); 3405 } 3406 } 3407 3408 if (unlikely(i > max_interrupt_work)) { 3409 spin_lock_irqsave(&np->lock, flags); 3410 /* disable interrupts on the nic */ 3411 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3412 pci_push(base); 3413 3414 if (!np->in_shutdown) { 3415 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; 3416 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3417 } 3418 spin_unlock_irqrestore(&np->lock, flags); 3419 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 3420 break; 3421 } 3422 } 3423 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 3424 3425 return IRQ_RETVAL(i); 3426} 3427#endif 3428 3429static irqreturn_t nv_nic_irq_other(int foo, void *data) 3430{ 3431 struct net_device *dev = (struct net_device *) data; 3432 struct fe_priv *np = netdev_priv(dev); 3433 u8 __iomem *base = get_hwbase(dev); 3434 u32 events; 3435 int i; 3436 unsigned long flags; 3437 3438 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 3439 3440 for (i=0; ; i++) { 3441 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3442 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3443 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3444 if (!(events & np->irqmask)) 3445 break; 3446 3447 /* check tx in case we reached max loop limit in tx isr */ 3448 spin_lock_irqsave(&np->lock, flags); 3449 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3450 spin_unlock_irqrestore(&np->lock, flags); 3451 3452 if (events & NVREG_IRQ_LINK) { 3453 spin_lock_irqsave(&np->lock, flags); 3454 nv_link_irq(dev); 3455 spin_unlock_irqrestore(&np->lock, flags); 3456 } 3457 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 3458 spin_lock_irqsave(&np->lock, flags); 3459 nv_linkchange(dev); 3460 spin_unlock_irqrestore(&np->lock, flags); 3461 np->link_timeout = jiffies + LINK_TIMEOUT; 3462 } 3463 if (events & NVREG_IRQ_RECOVER_ERROR) { 3464 spin_lock_irq(&np->lock); 3465 /* disable interrupts on the nic */ 3466 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3467 pci_push(base); 3468 3469 if (!np->in_shutdown) { 3470 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3471 np->recover_error = 1; 3472 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3473 } 3474 spin_unlock_irq(&np->lock); 3475 break; 3476 } 3477 if (events & (NVREG_IRQ_UNKNOWN)) { 3478 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3479 dev->name, events); 3480 } 3481 if (unlikely(i > max_interrupt_work)) { 3482 spin_lock_irqsave(&np->lock, flags); 3483 /* disable interrupts on the nic */ 3484 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3485 pci_push(base); 3486 3487 if (!np->in_shutdown) { 3488 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3489 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3490 } 3491 spin_unlock_irqrestore(&np->lock, flags); 3492 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 3493 break; 3494 } 3495 3496 } 3497 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); 3498 3499 return IRQ_RETVAL(i); 3500} 3501 3502static irqreturn_t nv_nic_irq_test(int foo, void *data) 3503{ 3504 struct net_device *dev = (struct net_device *) data; 3505 struct fe_priv *np = netdev_priv(dev); 3506 u8 __iomem *base = get_hwbase(dev); 3507 u32 events; 3508 3509 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); 3510 3511 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3512 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3513 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3514 } else { 3515 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3516 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3517 } 3518 pci_push(base); 3519 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3520 if (!(events & NVREG_IRQ_TIMER)) 3521 return IRQ_RETVAL(0); 3522 3523 spin_lock(&np->lock); 3524 np->intr_test = 1; 3525 spin_unlock(&np->lock); 3526 3527 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); 3528 3529 return IRQ_RETVAL(1); 3530} 3531 3532static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) 3533{ 3534 u8 __iomem *base = get_hwbase(dev); 3535 int i; 3536 u32 msixmap = 0; 3537 3538 /* Each interrupt bit can be mapped to a MSIX vector (4 bits). 3539 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents 3540 * the remaining 8 interrupts. 3541 */ 3542 for (i = 0; i < 8; i++) { 3543 if ((irqmask >> i) & 0x1) { 3544 msixmap |= vector << (i << 2); 3545 } 3546 } 3547 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3548 3549 msixmap = 0; 3550 for (i = 0; i < 8; i++) { 3551 if ((irqmask >> (i + 8)) & 0x1) { 3552 msixmap |= vector << (i << 2); 3553 } 3554 } 3555 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3556} 3557 3558static int nv_request_irq(struct net_device *dev, int intr_test) 3559{ 3560 struct fe_priv *np = get_nvpriv(dev); 3561 u8 __iomem *base = get_hwbase(dev); 3562 int ret = 1; 3563 int i; 3564 irqreturn_t (*handler)(int foo, void *data); 3565 3566 if (intr_test) { 3567 handler = nv_nic_irq_test; 3568 } else { 3569 if (np->desc_ver == DESC_VER_3) 3570 handler = nv_nic_irq_optimized; 3571 else 3572 handler = nv_nic_irq; 3573 } 3574 3575 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3576 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3577 np->msi_x_entry[i].entry = i; 3578 } 3579 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 3580 np->msi_flags |= NV_MSI_X_ENABLED; 3581 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3582 /* Request irq for rx handling */ 3583 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) { 3584 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 3585 pci_disable_msix(np->pci_dev); 3586 np->msi_flags &= ~NV_MSI_X_ENABLED; 3587 goto out_err; 3588 } 3589 /* Request irq for tx handling */ 3590 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) { 3591 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 3592 pci_disable_msix(np->pci_dev); 3593 np->msi_flags &= ~NV_MSI_X_ENABLED; 3594 goto out_free_rx; 3595 } 3596 /* Request irq for link and timer handling */ 3597 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) { 3598 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 3599 pci_disable_msix(np->pci_dev); 3600 np->msi_flags &= ~NV_MSI_X_ENABLED; 3601 goto out_free_tx; 3602 } 3603 /* map interrupts to their respective vector */ 3604 writel(0, base + NvRegMSIXMap0); 3605 writel(0, base + NvRegMSIXMap1); 3606 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); 3607 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); 3608 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 3609 } else { 3610 /* Request irq for all interrupts */ 3611 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { 3612 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3613 pci_disable_msix(np->pci_dev); 3614 np->msi_flags &= ~NV_MSI_X_ENABLED; 3615 goto out_err; 3616 } 3617 3618 /* map interrupts to vector 0 */ 3619 writel(0, base + NvRegMSIXMap0); 3620 writel(0, base + NvRegMSIXMap1); 3621 } 3622 } 3623 } 3624 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3625 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3626 np->msi_flags |= NV_MSI_ENABLED; 3627 dev->irq = np->pci_dev->irq; 3628 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3629 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3630 pci_disable_msi(np->pci_dev); 3631 np->msi_flags &= ~NV_MSI_ENABLED; 3632 dev->irq = np->pci_dev->irq; 3633 goto out_err; 3634 } 3635 3636 /* map interrupts to vector 0 */ 3637 writel(0, base + NvRegMSIMap0); 3638 writel(0, base + NvRegMSIMap1); 3639 /* enable msi vector 0 */ 3640 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 3641 } 3642 } 3643 if (ret != 0) { 3644 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) 3645 goto out_err; 3646 3647 } 3648 3649 return 0; 3650out_free_tx: 3651 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); 3652out_free_rx: 3653 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); 3654out_err: 3655 return 1; 3656} 3657 3658static void nv_free_irq(struct net_device *dev) 3659{ 3660 struct fe_priv *np = get_nvpriv(dev); 3661 int i; 3662 3663 if (np->msi_flags & NV_MSI_X_ENABLED) { 3664 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3665 free_irq(np->msi_x_entry[i].vector, dev); 3666 } 3667 pci_disable_msix(np->pci_dev); 3668 np->msi_flags &= ~NV_MSI_X_ENABLED; 3669 } else { 3670 free_irq(np->pci_dev->irq, dev); 3671 if (np->msi_flags & NV_MSI_ENABLED) { 3672 pci_disable_msi(np->pci_dev); 3673 np->msi_flags &= ~NV_MSI_ENABLED; 3674 } 3675 } 3676} 3677 3678static void nv_do_nic_poll(unsigned long data) 3679{ 3680 struct net_device *dev = (struct net_device *) data; 3681 struct fe_priv *np = netdev_priv(dev); 3682 u8 __iomem *base = get_hwbase(dev); 3683 u32 mask = 0; 3684 3685 /* 3686 * First disable irq(s) and then 3687 * reenable interrupts on the nic, we have to do this before calling 3688 * nv_nic_irq because that may decide to do otherwise 3689 */ 3690 3691 if (!using_multi_irqs(dev)) { 3692 if (np->msi_flags & NV_MSI_X_ENABLED) 3693 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 3694 else 3695 disable_irq_lockdep(np->pci_dev->irq); 3696 mask = np->irqmask; 3697 } else { 3698 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 3699 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 3700 mask |= NVREG_IRQ_RX_ALL; 3701 } 3702 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 3703 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 3704 mask |= NVREG_IRQ_TX_ALL; 3705 } 3706 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 3707 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 3708 mask |= NVREG_IRQ_OTHER; 3709 } 3710 } 3711 np->nic_poll_irq = 0; 3712 3713 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */ 3714 3715 if (np->recover_error) { 3716 np->recover_error = 0; 3717 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n"); 3718 if (netif_running(dev)) { 3719 netif_tx_lock_bh(dev); 3720 spin_lock(&np->lock); 3721 /* stop engines */ 3722 nv_stop_rx(dev); 3723 nv_stop_tx(dev); 3724 nv_txrx_reset(dev); 3725 /* drain rx queue */ 3726 nv_drain_rx(dev); 3727 nv_drain_tx(dev); 3728 /* reinit driver view of the rx queue */ 3729 set_bufsize(dev); 3730 if (nv_init_ring(dev)) { 3731 if (!np->in_shutdown) 3732 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3733 } 3734 /* reinit nic view of the rx queue */ 3735 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3736 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3737 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3738 base + NvRegRingSizes); 3739 pci_push(base); 3740 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3741 pci_push(base); 3742 3743 /* restart rx engine */ 3744 nv_start_rx(dev); 3745 nv_start_tx(dev); 3746 spin_unlock(&np->lock); 3747 netif_tx_unlock_bh(dev); 3748 } 3749 } 3750 3751 3752 writel(mask, base + NvRegIrqMask); 3753 pci_push(base); 3754 3755 if (!using_multi_irqs(dev)) { 3756 if (np->desc_ver == DESC_VER_3) 3757 nv_nic_irq_optimized(0, dev); 3758 else 3759 nv_nic_irq(0, dev); 3760 if (np->msi_flags & NV_MSI_X_ENABLED) 3761 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 3762 else 3763 enable_irq_lockdep(np->pci_dev->irq); 3764 } else { 3765 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 3766 nv_nic_irq_rx(0, dev); 3767 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 3768 } 3769 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 3770 nv_nic_irq_tx(0, dev); 3771 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 3772 } 3773 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 3774 nv_nic_irq_other(0, dev); 3775 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 3776 } 3777 } 3778} 3779 3780#ifdef CONFIG_NET_POLL_CONTROLLER 3781static void nv_poll_controller(struct net_device *dev) 3782{ 3783 nv_do_nic_poll((unsigned long) dev); 3784} 3785#endif 3786 3787static void nv_do_stats_poll(unsigned long data) 3788{ 3789 struct net_device *dev = (struct net_device *) data; 3790 struct fe_priv *np = netdev_priv(dev); 3791 3792 nv_get_hw_stats(dev); 3793 3794 if (!np->in_shutdown) 3795 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 3796} 3797 3798static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 3799{ 3800 struct fe_priv *np = netdev_priv(dev); 3801 strcpy(info->driver, DRV_NAME); 3802 strcpy(info->version, FORCEDETH_VERSION); 3803 strcpy(info->bus_info, pci_name(np->pci_dev)); 3804} 3805 3806static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 3807{ 3808 struct fe_priv *np = netdev_priv(dev); 3809 wolinfo->supported = WAKE_MAGIC; 3810 3811 spin_lock_irq(&np->lock); 3812 if (np->wolenabled) 3813 wolinfo->wolopts = WAKE_MAGIC; 3814 spin_unlock_irq(&np->lock); 3815} 3816 3817static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 3818{ 3819 struct fe_priv *np = netdev_priv(dev); 3820 u8 __iomem *base = get_hwbase(dev); 3821 u32 flags = 0; 3822 3823 if (wolinfo->wolopts == 0) { 3824 np->wolenabled = 0; 3825 } else if (wolinfo->wolopts & WAKE_MAGIC) { 3826 np->wolenabled = 1; 3827 flags = NVREG_WAKEUPFLAGS_ENABLE; 3828 } 3829 if (netif_running(dev)) { 3830 spin_lock_irq(&np->lock); 3831 writel(flags, base + NvRegWakeUpFlags); 3832 spin_unlock_irq(&np->lock); 3833 } 3834 return 0; 3835} 3836 3837static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 3838{ 3839 struct fe_priv *np = netdev_priv(dev); 3840 int adv; 3841 3842 spin_lock_irq(&np->lock); 3843 ecmd->port = PORT_MII; 3844 if (!netif_running(dev)) { 3845 /* We do not track link speed / duplex setting if the 3846 * interface is disabled. Force a link check */ 3847 if (nv_update_linkspeed(dev)) { 3848 if (!netif_carrier_ok(dev)) 3849 netif_carrier_on(dev); 3850 } else { 3851 if (netif_carrier_ok(dev)) 3852 netif_carrier_off(dev); 3853 } 3854 } 3855 3856 if (netif_carrier_ok(dev)) { 3857 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 3858 case NVREG_LINKSPEED_10: 3859 ecmd->speed = SPEED_10; 3860 break; 3861 case NVREG_LINKSPEED_100: 3862 ecmd->speed = SPEED_100; 3863 break; 3864 case NVREG_LINKSPEED_1000: 3865 ecmd->speed = SPEED_1000; 3866 break; 3867 } 3868 ecmd->duplex = DUPLEX_HALF; 3869 if (np->duplex) 3870 ecmd->duplex = DUPLEX_FULL; 3871 } else { 3872 ecmd->speed = -1; 3873 ecmd->duplex = -1; 3874 } 3875 3876 ecmd->autoneg = np->autoneg; 3877 3878 ecmd->advertising = ADVERTISED_MII; 3879 if (np->autoneg) { 3880 ecmd->advertising |= ADVERTISED_Autoneg; 3881 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3882 if (adv & ADVERTISE_10HALF) 3883 ecmd->advertising |= ADVERTISED_10baseT_Half; 3884 if (adv & ADVERTISE_10FULL) 3885 ecmd->advertising |= ADVERTISED_10baseT_Full; 3886 if (adv & ADVERTISE_100HALF) 3887 ecmd->advertising |= ADVERTISED_100baseT_Half; 3888 if (adv & ADVERTISE_100FULL) 3889 ecmd->advertising |= ADVERTISED_100baseT_Full; 3890 if (np->gigabit == PHY_GIGABIT) { 3891 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3892 if (adv & ADVERTISE_1000FULL) 3893 ecmd->advertising |= ADVERTISED_1000baseT_Full; 3894 } 3895 } 3896 ecmd->supported = (SUPPORTED_Autoneg | 3897 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 3898 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 3899 SUPPORTED_MII); 3900 if (np->gigabit == PHY_GIGABIT) 3901 ecmd->supported |= SUPPORTED_1000baseT_Full; 3902 3903 ecmd->phy_address = np->phyaddr; 3904 ecmd->transceiver = XCVR_EXTERNAL; 3905 3906 /* ignore maxtxpkt, maxrxpkt for now */ 3907 spin_unlock_irq(&np->lock); 3908 return 0; 3909} 3910 3911static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 3912{ 3913 struct fe_priv *np = netdev_priv(dev); 3914 3915 if (ecmd->port != PORT_MII) 3916 return -EINVAL; 3917 if (ecmd->transceiver != XCVR_EXTERNAL) 3918 return -EINVAL; 3919 if (ecmd->phy_address != np->phyaddr) { 3920 /* TODO: support switching between multiple phys. Should be 3921 * trivial, but not enabled due to lack of test hardware. */ 3922 return -EINVAL; 3923 } 3924 if (ecmd->autoneg == AUTONEG_ENABLE) { 3925 u32 mask; 3926 3927 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 3928 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 3929 if (np->gigabit == PHY_GIGABIT) 3930 mask |= ADVERTISED_1000baseT_Full; 3931 3932 if ((ecmd->advertising & mask) == 0) 3933 return -EINVAL; 3934 3935 } else if (ecmd->autoneg == AUTONEG_DISABLE) { 3936 /* Note: autonegotiation disable, speed 1000 intentionally 3937 * forbidden - noone should need that. */ 3938 3939 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) 3940 return -EINVAL; 3941 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 3942 return -EINVAL; 3943 } else { 3944 return -EINVAL; 3945 } 3946 3947 netif_carrier_off(dev); 3948 if (netif_running(dev)) { 3949 nv_disable_irq(dev); 3950 netif_tx_lock_bh(dev); 3951 spin_lock(&np->lock); 3952 /* stop engines */ 3953 nv_stop_rx(dev); 3954 nv_stop_tx(dev); 3955 spin_unlock(&np->lock); 3956 netif_tx_unlock_bh(dev); 3957 } 3958 3959 if (ecmd->autoneg == AUTONEG_ENABLE) { 3960 int adv, bmcr; 3961 3962 np->autoneg = 1; 3963 3964 /* advertise only what has been requested */ 3965 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3966 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3967 if (ecmd->advertising & ADVERTISED_10baseT_Half) 3968 adv |= ADVERTISE_10HALF; 3969 if (ecmd->advertising & ADVERTISED_10baseT_Full) 3970 adv |= ADVERTISE_10FULL; 3971 if (ecmd->advertising & ADVERTISED_100baseT_Half) 3972 adv |= ADVERTISE_100HALF; 3973 if (ecmd->advertising & ADVERTISED_100baseT_Full) 3974 adv |= ADVERTISE_100FULL; 3975 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 3976 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 3977 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3978 adv |= ADVERTISE_PAUSE_ASYM; 3979 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 3980 3981 if (np->gigabit == PHY_GIGABIT) { 3982 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3983 adv &= ~ADVERTISE_1000FULL; 3984 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 3985 adv |= ADVERTISE_1000FULL; 3986 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 3987 } 3988 3989 if (netif_running(dev)) 3990 printk(KERN_INFO "%s: link down.\n", dev->name); 3991 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3992 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 3993 bmcr |= BMCR_ANENABLE; 3994 /* reset the phy in order for settings to stick, 3995 * and cause autoneg to start */ 3996 if (phy_reset(dev, bmcr)) { 3997 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 3998 return -EINVAL; 3999 } 4000 } else { 4001 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4002 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4003 } 4004 } else { 4005 int adv, bmcr; 4006 4007 np->autoneg = 0; 4008 4009 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4010 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4011 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) 4012 adv |= ADVERTISE_10HALF; 4013 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) 4014 adv |= ADVERTISE_10FULL; 4015 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) 4016 adv |= ADVERTISE_100HALF; 4017 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) 4018 adv |= ADVERTISE_100FULL; 4019 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4020 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ 4021 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4022 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4023 } 4024 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { 4025 adv |= ADVERTISE_PAUSE_ASYM; 4026 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4027 } 4028 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4029 np->fixed_mode = adv; 4030 4031 if (np->gigabit == PHY_GIGABIT) { 4032 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4033 adv &= ~ADVERTISE_1000FULL; 4034 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4035 } 4036 4037 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4038 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); 4039 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) 4040 bmcr |= BMCR_FULLDPLX; 4041 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 4042 bmcr |= BMCR_SPEED100; 4043 if (np->phy_oui == PHY_OUI_MARVELL) { 4044 /* reset the phy in order for forced mode settings to stick */ 4045 if (phy_reset(dev, bmcr)) { 4046 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4047 return -EINVAL; 4048 } 4049 } else { 4050 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4051 if (netif_running(dev)) { 4052 /* Wait a bit and then reconfigure the nic. */ 4053 udelay(10); 4054 nv_linkchange(dev); 4055 } 4056 } 4057 } 4058 4059 if (netif_running(dev)) { 4060 nv_start_rx(dev); 4061 nv_start_tx(dev); 4062 nv_enable_irq(dev); 4063 } 4064 4065 return 0; 4066} 4067 4068#define FORCEDETH_REGS_VER 1 4069 4070static int nv_get_regs_len(struct net_device *dev) 4071{ 4072 struct fe_priv *np = netdev_priv(dev); 4073 return np->register_size; 4074} 4075 4076static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 4077{ 4078 struct fe_priv *np = netdev_priv(dev); 4079 u8 __iomem *base = get_hwbase(dev); 4080 u32 *rbuf = buf; 4081 int i; 4082 4083 regs->version = FORCEDETH_REGS_VER; 4084 spin_lock_irq(&np->lock); 4085 for (i = 0;i <= np->register_size/sizeof(u32); i++) 4086 rbuf[i] = readl(base + i*sizeof(u32)); 4087 spin_unlock_irq(&np->lock); 4088} 4089 4090static int nv_nway_reset(struct net_device *dev) 4091{ 4092 struct fe_priv *np = netdev_priv(dev); 4093 int ret; 4094 4095 if (np->autoneg) { 4096 int bmcr; 4097 4098 netif_carrier_off(dev); 4099 if (netif_running(dev)) { 4100 nv_disable_irq(dev); 4101 netif_tx_lock_bh(dev); 4102 spin_lock(&np->lock); 4103 /* stop engines */ 4104 nv_stop_rx(dev); 4105 nv_stop_tx(dev); 4106 spin_unlock(&np->lock); 4107 netif_tx_unlock_bh(dev); 4108 printk(KERN_INFO "%s: link down.\n", dev->name); 4109 } 4110 4111 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4112 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4113 bmcr |= BMCR_ANENABLE; 4114 /* reset the phy in order for settings to stick*/ 4115 if (phy_reset(dev, bmcr)) { 4116 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4117 return -EINVAL; 4118 } 4119 } else { 4120 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4121 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4122 } 4123 4124 if (netif_running(dev)) { 4125 nv_start_rx(dev); 4126 nv_start_tx(dev); 4127 nv_enable_irq(dev); 4128 } 4129 ret = 0; 4130 } else { 4131 ret = -EINVAL; 4132 } 4133 4134 return ret; 4135} 4136 4137static int nv_set_tso(struct net_device *dev, u32 value) 4138{ 4139 struct fe_priv *np = netdev_priv(dev); 4140 4141 if ((np->driver_data & DEV_HAS_CHECKSUM)) 4142 return ethtool_op_set_tso(dev, value); 4143 else 4144 return -EOPNOTSUPP; 4145} 4146 4147static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4148{ 4149 struct fe_priv *np = netdev_priv(dev); 4150 4151 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4152 ring->rx_mini_max_pending = 0; 4153 ring->rx_jumbo_max_pending = 0; 4154 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4155 4156 ring->rx_pending = np->rx_ring_size; 4157 ring->rx_mini_pending = 0; 4158 ring->rx_jumbo_pending = 0; 4159 ring->tx_pending = np->tx_ring_size; 4160} 4161 4162static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4163{ 4164 struct fe_priv *np = netdev_priv(dev); 4165 u8 __iomem *base = get_hwbase(dev); 4166 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; 4167 dma_addr_t ring_addr; 4168 4169 if (ring->rx_pending < RX_RING_MIN || 4170 ring->tx_pending < TX_RING_MIN || 4171 ring->rx_mini_pending != 0 || 4172 ring->rx_jumbo_pending != 0 || 4173 (np->desc_ver == DESC_VER_1 && 4174 (ring->rx_pending > RING_MAX_DESC_VER_1 || 4175 ring->tx_pending > RING_MAX_DESC_VER_1)) || 4176 (np->desc_ver != DESC_VER_1 && 4177 (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 4178 ring->tx_pending > RING_MAX_DESC_VER_2_3))) { 4179 return -EINVAL; 4180 } 4181 4182 /* allocate new rings */ 4183 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4184 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4185 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4186 &ring_addr); 4187 } else { 4188 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4189 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4190 &ring_addr); 4191 } 4192 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); 4193 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); 4194 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { 4195 /* fall back to old rings */ 4196 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4197 if (rxtx_ring) 4198 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4199 rxtx_ring, ring_addr); 4200 } else { 4201 if (rxtx_ring) 4202 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4203 rxtx_ring, ring_addr); 4204 } 4205 if (rx_skbuff) 4206 kfree(rx_skbuff); 4207 if (tx_skbuff) 4208 kfree(tx_skbuff); 4209 goto exit; 4210 } 4211 4212 if (netif_running(dev)) { 4213 nv_disable_irq(dev); 4214 netif_tx_lock_bh(dev); 4215 spin_lock(&np->lock); 4216 /* stop engines */ 4217 nv_stop_rx(dev); 4218 nv_stop_tx(dev); 4219 nv_txrx_reset(dev); 4220 /* drain queues */ 4221 nv_drain_rx(dev); 4222 nv_drain_tx(dev); 4223 /* delete queues */ 4224 free_rings(dev); 4225 } 4226 4227 /* set new values */ 4228 np->rx_ring_size = ring->rx_pending; 4229 np->tx_ring_size = ring->tx_pending; 4230 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4231 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4232 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4233 } else { 4234 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4235 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4236 } 4237 np->rx_skb = (struct nv_skb_map*)rx_skbuff; 4238 np->tx_skb = (struct nv_skb_map*)tx_skbuff; 4239 np->ring_addr = ring_addr; 4240 4241 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4242 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); 4243 4244 if (netif_running(dev)) { 4245 /* reinit driver view of the queues */ 4246 set_bufsize(dev); 4247 if (nv_init_ring(dev)) { 4248 if (!np->in_shutdown) 4249 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4250 } 4251 4252 /* reinit nic view of the queues */ 4253 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4254 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4255 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4256 base + NvRegRingSizes); 4257 pci_push(base); 4258 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4259 pci_push(base); 4260 4261 /* restart engines */ 4262 nv_start_rx(dev); 4263 nv_start_tx(dev); 4264 spin_unlock(&np->lock); 4265 netif_tx_unlock_bh(dev); 4266 nv_enable_irq(dev); 4267 } 4268 return 0; 4269exit: 4270 return -ENOMEM; 4271} 4272 4273static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4274{ 4275 struct fe_priv *np = netdev_priv(dev); 4276 4277 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; 4278 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; 4279 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; 4280} 4281 4282static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4283{ 4284 struct fe_priv *np = netdev_priv(dev); 4285 int adv, bmcr; 4286 4287 if ((!np->autoneg && np->duplex == 0) || 4288 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 4289 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 4290 dev->name); 4291 return -EINVAL; 4292 } 4293 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 4294 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); 4295 return -EINVAL; 4296 } 4297 4298 netif_carrier_off(dev); 4299 if (netif_running(dev)) { 4300 nv_disable_irq(dev); 4301 netif_tx_lock_bh(dev); 4302 spin_lock(&np->lock); 4303 /* stop engines */ 4304 nv_stop_rx(dev); 4305 nv_stop_tx(dev); 4306 spin_unlock(&np->lock); 4307 netif_tx_unlock_bh(dev); 4308 } 4309 4310 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); 4311 if (pause->rx_pause) 4312 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; 4313 if (pause->tx_pause) 4314 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; 4315 4316 if (np->autoneg && pause->autoneg) { 4317 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; 4318 4319 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4320 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4321 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 4322 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4323 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4324 adv |= ADVERTISE_PAUSE_ASYM; 4325 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4326 4327 if (netif_running(dev)) 4328 printk(KERN_INFO "%s: link down.\n", dev->name); 4329 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4330 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4331 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4332 } else { 4333 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4334 if (pause->rx_pause) 4335 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4336 if (pause->tx_pause) 4337 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4338 4339 if (!netif_running(dev)) 4340 nv_update_linkspeed(dev); 4341 else 4342 nv_update_pause(dev, np->pause_flags); 4343 } 4344 4345 if (netif_running(dev)) { 4346 nv_start_rx(dev); 4347 nv_start_tx(dev); 4348 nv_enable_irq(dev); 4349 } 4350 return 0; 4351} 4352 4353static u32 nv_get_rx_csum(struct net_device *dev) 4354{ 4355 struct fe_priv *np = netdev_priv(dev); 4356 return (np->rx_csum) != 0; 4357} 4358 4359static int nv_set_rx_csum(struct net_device *dev, u32 data) 4360{ 4361 struct fe_priv *np = netdev_priv(dev); 4362 u8 __iomem *base = get_hwbase(dev); 4363 int retcode = 0; 4364 4365 if (np->driver_data & DEV_HAS_CHECKSUM) { 4366 if (data) { 4367 np->rx_csum = 1; 4368 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4369 } else { 4370 np->rx_csum = 0; 4371 /* vlan is dependent on rx checksum offload */ 4372 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) 4373 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; 4374 } 4375 if (netif_running(dev)) { 4376 spin_lock_irq(&np->lock); 4377 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4378 spin_unlock_irq(&np->lock); 4379 } 4380 } else { 4381 return -EINVAL; 4382 } 4383 4384 return retcode; 4385} 4386 4387static int nv_set_tx_csum(struct net_device *dev, u32 data) 4388{ 4389 struct fe_priv *np = netdev_priv(dev); 4390 4391 if (np->driver_data & DEV_HAS_CHECKSUM) 4392 return ethtool_op_set_tx_hw_csum(dev, data); 4393 else 4394 return -EOPNOTSUPP; 4395} 4396 4397static int nv_set_sg(struct net_device *dev, u32 data) 4398{ 4399 struct fe_priv *np = netdev_priv(dev); 4400 4401 if (np->driver_data & DEV_HAS_CHECKSUM) 4402 return ethtool_op_set_sg(dev, data); 4403 else 4404 return -EOPNOTSUPP; 4405} 4406 4407static int nv_get_sset_count(struct net_device *dev, int sset) 4408{ 4409 struct fe_priv *np = netdev_priv(dev); 4410 4411 switch (sset) { 4412 case ETH_SS_TEST: 4413 if (np->driver_data & DEV_HAS_TEST_EXTENDED) 4414 return NV_TEST_COUNT_EXTENDED; 4415 else 4416 return NV_TEST_COUNT_BASE; 4417 case ETH_SS_STATS: 4418 if (np->driver_data & DEV_HAS_STATISTICS_V1) 4419 return NV_DEV_STATISTICS_V1_COUNT; 4420 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4421 return NV_DEV_STATISTICS_V2_COUNT; 4422 else 4423 return 0; 4424 default: 4425 return -EOPNOTSUPP; 4426 } 4427} 4428 4429static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) 4430{ 4431 struct fe_priv *np = netdev_priv(dev); 4432 4433 /* update stats */ 4434 nv_do_stats_poll((unsigned long)dev); 4435 4436 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); 4437} 4438 4439static int nv_link_test(struct net_device *dev) 4440{ 4441 struct fe_priv *np = netdev_priv(dev); 4442 int mii_status; 4443 4444 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4445 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4446 4447 /* check phy link status */ 4448 if (!(mii_status & BMSR_LSTATUS)) 4449 return 0; 4450 else 4451 return 1; 4452} 4453 4454static int nv_register_test(struct net_device *dev) 4455{ 4456 u8 __iomem *base = get_hwbase(dev); 4457 int i = 0; 4458 u32 orig_read, new_read; 4459 4460 do { 4461 orig_read = readl(base + nv_registers_test[i].reg); 4462 4463 /* xor with mask to toggle bits */ 4464 orig_read ^= nv_registers_test[i].mask; 4465 4466 writel(orig_read, base + nv_registers_test[i].reg); 4467 4468 new_read = readl(base + nv_registers_test[i].reg); 4469 4470 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) 4471 return 0; 4472 4473 /* restore original value */ 4474 orig_read ^= nv_registers_test[i].mask; 4475 writel(orig_read, base + nv_registers_test[i].reg); 4476 4477 } while (nv_registers_test[++i].reg != 0); 4478 4479 return 1; 4480} 4481 4482static int nv_interrupt_test(struct net_device *dev) 4483{ 4484 struct fe_priv *np = netdev_priv(dev); 4485 u8 __iomem *base = get_hwbase(dev); 4486 int ret = 1; 4487 int testcnt; 4488 u32 save_msi_flags, save_poll_interval = 0; 4489 4490 if (netif_running(dev)) { 4491 /* free current irq */ 4492 nv_free_irq(dev); 4493 save_poll_interval = readl(base+NvRegPollingInterval); 4494 } 4495 4496 /* flag to test interrupt handler */ 4497 np->intr_test = 0; 4498 4499 /* setup test irq */ 4500 save_msi_flags = np->msi_flags; 4501 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; 4502 np->msi_flags |= 0x001; /* setup 1 vector */ 4503 if (nv_request_irq(dev, 1)) 4504 return 0; 4505 4506 /* setup timer interrupt */ 4507 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 4508 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4509 4510 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4511 4512 /* wait for at least one interrupt */ 4513 msleep(100); 4514 4515 spin_lock_irq(&np->lock); 4516 4517 /* flag should be set within ISR */ 4518 testcnt = np->intr_test; 4519 if (!testcnt) 4520 ret = 2; 4521 4522 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4523 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 4524 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4525 else 4526 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4527 4528 spin_unlock_irq(&np->lock); 4529 4530 nv_free_irq(dev); 4531 4532 np->msi_flags = save_msi_flags; 4533 4534 if (netif_running(dev)) { 4535 writel(save_poll_interval, base + NvRegPollingInterval); 4536 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4537 /* restore original irq */ 4538 if (nv_request_irq(dev, 0)) 4539 return 0; 4540 } 4541 4542 return ret; 4543} 4544 4545static int nv_loopback_test(struct net_device *dev) 4546{ 4547 struct fe_priv *np = netdev_priv(dev); 4548 u8 __iomem *base = get_hwbase(dev); 4549 struct sk_buff *tx_skb, *rx_skb; 4550 dma_addr_t test_dma_addr; 4551 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 4552 u32 flags; 4553 int len, i, pkt_len; 4554 u8 *pkt_data; 4555 u32 filter_flags = 0; 4556 u32 misc1_flags = 0; 4557 int ret = 1; 4558 4559 if (netif_running(dev)) { 4560 nv_disable_irq(dev); 4561 filter_flags = readl(base + NvRegPacketFilterFlags); 4562 misc1_flags = readl(base + NvRegMisc1); 4563 } else { 4564 nv_txrx_reset(dev); 4565 } 4566 4567 /* reinit driver view of the rx queue */ 4568 set_bufsize(dev); 4569 nv_init_ring(dev); 4570 4571 /* setup hardware for loopback */ 4572 writel(NVREG_MISC1_FORCE, base + NvRegMisc1); 4573 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); 4574 4575 /* reinit nic view of the rx queue */ 4576 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4577 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4578 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4579 base + NvRegRingSizes); 4580 pci_push(base); 4581 4582 /* restart rx engine */ 4583 nv_start_rx(dev); 4584 nv_start_tx(dev); 4585 4586 /* setup packet for tx */ 4587 pkt_len = ETH_DATA_LEN; 4588 tx_skb = dev_alloc_skb(pkt_len); 4589 if (!tx_skb) { 4590 printk(KERN_ERR "dev_alloc_skb() failed during loopback test" 4591 " of %s\n", dev->name); 4592 ret = 0; 4593 goto out; 4594 } 4595 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, 4596 skb_tailroom(tx_skb), 4597 PCI_DMA_FROMDEVICE); 4598 pkt_data = skb_put(tx_skb, pkt_len); 4599 for (i = 0; i < pkt_len; i++) 4600 pkt_data[i] = (u8)(i & 0xff); 4601 4602 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4603 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 4604 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4605 } else { 4606 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); 4607 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); 4608 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4609 } 4610 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4611 pci_push(get_hwbase(dev)); 4612 4613 msleep(500); 4614 4615 /* check for rx of the packet */ 4616 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4617 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); 4618 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 4619 4620 } else { 4621 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); 4622 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 4623 } 4624 4625 if (flags & NV_RX_AVAIL) { 4626 ret = 0; 4627 } else if (np->desc_ver == DESC_VER_1) { 4628 if (flags & NV_RX_ERROR) 4629 ret = 0; 4630 } else { 4631 if (flags & NV_RX2_ERROR) { 4632 ret = 0; 4633 } 4634 } 4635 4636 if (ret) { 4637 if (len != pkt_len) { 4638 ret = 0; 4639 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 4640 dev->name, len, pkt_len); 4641 } else { 4642 rx_skb = np->rx_skb[0].skb; 4643 for (i = 0; i < pkt_len; i++) { 4644 if (rx_skb->data[i] != (u8)(i & 0xff)) { 4645 ret = 0; 4646 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", 4647 dev->name, i); 4648 break; 4649 } 4650 } 4651 } 4652 } else { 4653 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); 4654 } 4655 4656 pci_unmap_page(np->pci_dev, test_dma_addr, 4657 (skb_end_pointer(tx_skb) - tx_skb->data), 4658 PCI_DMA_TODEVICE); 4659 dev_kfree_skb_any(tx_skb); 4660 out: 4661 /* stop engines */ 4662 nv_stop_rx(dev); 4663 nv_stop_tx(dev); 4664 nv_txrx_reset(dev); 4665 /* drain rx queue */ 4666 nv_drain_rx(dev); 4667 nv_drain_tx(dev); 4668 4669 if (netif_running(dev)) { 4670 writel(misc1_flags, base + NvRegMisc1); 4671 writel(filter_flags, base + NvRegPacketFilterFlags); 4672 nv_enable_irq(dev); 4673 } 4674 4675 return ret; 4676} 4677 4678static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) 4679{ 4680 struct fe_priv *np = netdev_priv(dev); 4681 u8 __iomem *base = get_hwbase(dev); 4682 int result; 4683 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); 4684 4685 if (!nv_link_test(dev)) { 4686 test->flags |= ETH_TEST_FL_FAILED; 4687 buffer[0] = 1; 4688 } 4689 4690 if (test->flags & ETH_TEST_FL_OFFLINE) { 4691 if (netif_running(dev)) { 4692 netif_stop_queue(dev); 4693#ifdef CONFIG_FORCEDETH_NAPI 4694 napi_disable(&np->napi); 4695#endif 4696 netif_tx_lock_bh(dev); 4697 spin_lock_irq(&np->lock); 4698 nv_disable_hw_interrupts(dev, np->irqmask); 4699 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 4700 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4701 } else { 4702 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4703 } 4704 /* stop engines */ 4705 nv_stop_rx(dev); 4706 nv_stop_tx(dev); 4707 nv_txrx_reset(dev); 4708 /* drain rx queue */ 4709 nv_drain_rx(dev); 4710 nv_drain_tx(dev); 4711 spin_unlock_irq(&np->lock); 4712 netif_tx_unlock_bh(dev); 4713 } 4714 4715 if (!nv_register_test(dev)) { 4716 test->flags |= ETH_TEST_FL_FAILED; 4717 buffer[1] = 1; 4718 } 4719 4720 result = nv_interrupt_test(dev); 4721 if (result != 1) { 4722 test->flags |= ETH_TEST_FL_FAILED; 4723 buffer[2] = 1; 4724 } 4725 if (result == 0) { 4726 /* bail out */ 4727 return; 4728 } 4729 4730 if (!nv_loopback_test(dev)) { 4731 test->flags |= ETH_TEST_FL_FAILED; 4732 buffer[3] = 1; 4733 } 4734 4735 if (netif_running(dev)) { 4736 /* reinit driver view of the rx queue */ 4737 set_bufsize(dev); 4738 if (nv_init_ring(dev)) { 4739 if (!np->in_shutdown) 4740 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4741 } 4742 /* reinit nic view of the rx queue */ 4743 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4744 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4745 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4746 base + NvRegRingSizes); 4747 pci_push(base); 4748 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4749 pci_push(base); 4750 /* restart rx engine */ 4751 nv_start_rx(dev); 4752 nv_start_tx(dev); 4753 netif_start_queue(dev); 4754#ifdef CONFIG_FORCEDETH_NAPI 4755 napi_enable(&np->napi); 4756#endif 4757 nv_enable_hw_interrupts(dev, np->irqmask); 4758 } 4759 } 4760} 4761 4762static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) 4763{ 4764 switch (stringset) { 4765 case ETH_SS_STATS: 4766 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); 4767 break; 4768 case ETH_SS_TEST: 4769 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); 4770 break; 4771 } 4772} 4773 4774static const struct ethtool_ops ops = { 4775 .get_drvinfo = nv_get_drvinfo, 4776 .get_link = ethtool_op_get_link, 4777 .get_wol = nv_get_wol, 4778 .set_wol = nv_set_wol, 4779 .get_settings = nv_get_settings, 4780 .set_settings = nv_set_settings, 4781 .get_regs_len = nv_get_regs_len, 4782 .get_regs = nv_get_regs, 4783 .nway_reset = nv_nway_reset, 4784 .set_tso = nv_set_tso, 4785 .get_ringparam = nv_get_ringparam, 4786 .set_ringparam = nv_set_ringparam, 4787 .get_pauseparam = nv_get_pauseparam, 4788 .set_pauseparam = nv_set_pauseparam, 4789 .get_rx_csum = nv_get_rx_csum, 4790 .set_rx_csum = nv_set_rx_csum, 4791 .set_tx_csum = nv_set_tx_csum, 4792 .set_sg = nv_set_sg, 4793 .get_strings = nv_get_strings, 4794 .get_ethtool_stats = nv_get_ethtool_stats, 4795 .get_sset_count = nv_get_sset_count, 4796 .self_test = nv_self_test, 4797}; 4798 4799static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 4800{ 4801 struct fe_priv *np = get_nvpriv(dev); 4802 4803 spin_lock_irq(&np->lock); 4804 4805 /* save vlan group */ 4806 np->vlangrp = grp; 4807 4808 if (grp) { 4809 /* enable vlan on MAC */ 4810 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; 4811 } else { 4812 /* disable vlan on MAC */ 4813 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; 4814 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; 4815 } 4816 4817 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4818 4819 spin_unlock_irq(&np->lock); 4820} 4821 4822/* The mgmt unit and driver use a semaphore to access the phy during init */ 4823static int nv_mgmt_acquire_sema(struct net_device *dev) 4824{ 4825 u8 __iomem *base = get_hwbase(dev); 4826 int i; 4827 u32 tx_ctrl, mgmt_sema; 4828 4829 for (i = 0; i < 10; i++) { 4830 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; 4831 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) 4832 break; 4833 msleep(500); 4834 } 4835 4836 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) 4837 return 0; 4838 4839 for (i = 0; i < 2; i++) { 4840 tx_ctrl = readl(base + NvRegTransmitterControl); 4841 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; 4842 writel(tx_ctrl, base + NvRegTransmitterControl); 4843 4844 /* verify that semaphore was acquired */ 4845 tx_ctrl = readl(base + NvRegTransmitterControl); 4846 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && 4847 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) 4848 return 1; 4849 else 4850 udelay(50); 4851 } 4852 4853 return 0; 4854} 4855 4856static int nv_open(struct net_device *dev) 4857{ 4858 struct fe_priv *np = netdev_priv(dev); 4859 u8 __iomem *base = get_hwbase(dev); 4860 int ret = 1; 4861 int oom, i; 4862 4863 dprintk(KERN_DEBUG "nv_open: begin\n"); 4864 4865 /* erase previous misconfiguration */ 4866 if (np->driver_data & DEV_HAS_POWER_CNTRL) 4867 nv_mac_reset(dev); 4868 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 4869 writel(0, base + NvRegMulticastAddrB); 4870 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 4871 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 4872 writel(0, base + NvRegPacketFilterFlags); 4873 4874 writel(0, base + NvRegTransmitterControl); 4875 writel(0, base + NvRegReceiverControl); 4876 4877 writel(0, base + NvRegAdapterControl); 4878 4879 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 4880 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 4881 4882 /* initialize descriptor rings */ 4883 set_bufsize(dev); 4884 oom = nv_init_ring(dev); 4885 4886 writel(0, base + NvRegLinkSpeed); 4887 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 4888 nv_txrx_reset(dev); 4889 writel(0, base + NvRegUnknownSetupReg6); 4890 4891 np->in_shutdown = 0; 4892 4893 /* give hw rings */ 4894 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4895 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4896 base + NvRegRingSizes); 4897 4898 writel(np->linkspeed, base + NvRegLinkSpeed); 4899 if (np->desc_ver == DESC_VER_1) 4900 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 4901 else 4902 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); 4903 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4904 writel(np->vlanctl_bits, base + NvRegVlanControl); 4905 pci_push(base); 4906 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 4907 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 4908 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 4909 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 4910 4911 writel(0, base + NvRegMIIMask); 4912 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4913 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 4914 4915 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 4916 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 4917 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 4918 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4919 4920 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 4921 get_random_bytes(&i, sizeof(i)); 4922 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); 4923 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); 4924 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); 4925 if (poll_interval == -1) { 4926 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 4927 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 4928 else 4929 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 4930 } 4931 else 4932 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 4933 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4934 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 4935 base + NvRegAdapterControl); 4936 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); 4937 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); 4938 if (np->wolenabled) 4939 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 4940 4941 i = readl(base + NvRegPowerState); 4942 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 4943 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 4944 4945 pci_push(base); 4946 udelay(10); 4947 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 4948 4949 nv_disable_hw_interrupts(dev, np->irqmask); 4950 pci_push(base); 4951 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 4952 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4953 pci_push(base); 4954 4955 if (nv_request_irq(dev, 0)) { 4956 goto out_drain; 4957 } 4958 4959 /* ask for interrupts */ 4960 nv_enable_hw_interrupts(dev, np->irqmask); 4961 4962 spin_lock_irq(&np->lock); 4963 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 4964 writel(0, base + NvRegMulticastAddrB); 4965 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 4966 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 4967 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 4968 /* One manual link speed update: Interrupts are enabled, future link 4969 * speed changes cause interrupts and are handled by nv_link_irq(). 4970 */ 4971 { 4972 u32 miistat; 4973 miistat = readl(base + NvRegMIIStatus); 4974 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 4975 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); 4976 } 4977 /* set linkspeed to invalid value, thus force nv_update_linkspeed 4978 * to init hw */ 4979 np->linkspeed = 0; 4980 ret = nv_update_linkspeed(dev); 4981 nv_start_rx(dev); 4982 nv_start_tx(dev); 4983 netif_start_queue(dev); 4984#ifdef CONFIG_FORCEDETH_NAPI 4985 napi_enable(&np->napi); 4986#endif 4987 4988 if (ret) { 4989 netif_carrier_on(dev); 4990 } else { 4991 printk(KERN_INFO "%s: no link during initialization.\n", dev->name); 4992 netif_carrier_off(dev); 4993 } 4994 if (oom) 4995 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4996 4997 /* start statistics timer */ 4998 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) 4999 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 5000 5001 spin_unlock_irq(&np->lock); 5002 5003 return 0; 5004out_drain: 5005 drain_ring(dev); 5006 return ret; 5007} 5008 5009static int nv_close(struct net_device *dev) 5010{ 5011 struct fe_priv *np = netdev_priv(dev); 5012 u8 __iomem *base; 5013 5014 spin_lock_irq(&np->lock); 5015 np->in_shutdown = 1; 5016 spin_unlock_irq(&np->lock); 5017#ifdef CONFIG_FORCEDETH_NAPI 5018 napi_disable(&np->napi); 5019#endif 5020 synchronize_irq(np->pci_dev->irq); 5021 5022 del_timer_sync(&np->oom_kick); 5023 del_timer_sync(&np->nic_poll); 5024 del_timer_sync(&np->stats_poll); 5025 5026 netif_stop_queue(dev); 5027 spin_lock_irq(&np->lock); 5028 nv_stop_tx(dev); 5029 nv_stop_rx(dev); 5030 nv_txrx_reset(dev); 5031 5032 /* disable interrupts on the nic or we will lock up */ 5033 base = get_hwbase(dev); 5034 nv_disable_hw_interrupts(dev, np->irqmask); 5035 pci_push(base); 5036 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 5037 5038 spin_unlock_irq(&np->lock); 5039 5040 nv_free_irq(dev); 5041 5042 drain_ring(dev); 5043 5044 if (np->wolenabled) { 5045 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5046 nv_start_rx(dev); 5047 } 5048 5049 /* FIXME: power down nic */ 5050 5051 return 0; 5052} 5053 5054static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 5055{ 5056 struct net_device *dev; 5057 struct fe_priv *np; 5058 unsigned long addr; 5059 u8 __iomem *base; 5060 int err, i; 5061 u32 powerstate, txreg; 5062 u32 phystate_orig = 0, phystate; 5063 int phyinitialized = 0; 5064 DECLARE_MAC_BUF(mac); 5065 static int printed_version; 5066 5067 if (!printed_version++) 5068 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet" 5069 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION); 5070 5071 dev = alloc_etherdev(sizeof(struct fe_priv)); 5072 err = -ENOMEM; 5073 if (!dev) 5074 goto out; 5075 5076 np = netdev_priv(dev); 5077 np->dev = dev; 5078 np->pci_dev = pci_dev; 5079 spin_lock_init(&np->lock); 5080 SET_NETDEV_DEV(dev, &pci_dev->dev); 5081 5082 init_timer(&np->oom_kick); 5083 np->oom_kick.data = (unsigned long) dev; 5084 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ 5085 init_timer(&np->nic_poll); 5086 np->nic_poll.data = (unsigned long) dev; 5087 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ 5088 init_timer(&np->stats_poll); 5089 np->stats_poll.data = (unsigned long) dev; 5090 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ 5091 5092 err = pci_enable_device(pci_dev); 5093 if (err) 5094 goto out_free; 5095 5096 pci_set_master(pci_dev); 5097 5098 err = pci_request_regions(pci_dev, DRV_NAME); 5099 if (err < 0) 5100 goto out_disable; 5101 5102 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2)) 5103 np->register_size = NV_PCI_REGSZ_VER3; 5104 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 5105 np->register_size = NV_PCI_REGSZ_VER2; 5106 else 5107 np->register_size = NV_PCI_REGSZ_VER1; 5108 5109 err = -EINVAL; 5110 addr = 0; 5111 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5112 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", 5113 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), 5114 pci_resource_len(pci_dev, i), 5115 pci_resource_flags(pci_dev, i)); 5116 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 5117 pci_resource_len(pci_dev, i) >= np->register_size) { 5118 addr = pci_resource_start(pci_dev, i); 5119 break; 5120 } 5121 } 5122 if (i == DEVICE_COUNT_RESOURCE) { 5123 dev_printk(KERN_INFO, &pci_dev->dev, 5124 "Couldn't find register window\n"); 5125 goto out_relreg; 5126 } 5127 5128 /* copy of driver data */ 5129 np->driver_data = id->driver_data; 5130 5131 /* handle different descriptor versions */ 5132 if (id->driver_data & DEV_HAS_HIGH_DMA) { 5133 /* packet format 3: supports 40-bit addressing */ 5134 np->desc_ver = DESC_VER_3; 5135 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 5136 if (dma_64bit) { 5137 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) 5138 dev_printk(KERN_INFO, &pci_dev->dev, 5139 "64-bit DMA failed, using 32-bit addressing\n"); 5140 else 5141 dev->features |= NETIF_F_HIGHDMA; 5142 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) { 5143 dev_printk(KERN_INFO, &pci_dev->dev, 5144 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); 5145 } 5146 } 5147 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 5148 /* packet format 2: supports jumbo frames */ 5149 np->desc_ver = DESC_VER_2; 5150 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; 5151 } else { 5152 /* original packet format */ 5153 np->desc_ver = DESC_VER_1; 5154 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; 5155 } 5156 5157 np->pkt_limit = NV_PKTLIMIT_1; 5158 if (id->driver_data & DEV_HAS_LARGEDESC) 5159 np->pkt_limit = NV_PKTLIMIT_2; 5160 5161 if (id->driver_data & DEV_HAS_CHECKSUM) { 5162 np->rx_csum = 1; 5163 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5164 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 5165 dev->features |= NETIF_F_TSO; 5166 } 5167 5168 np->vlanctl_bits = 0; 5169 if (id->driver_data & DEV_HAS_VLAN) { 5170 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 5171 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; 5172 dev->vlan_rx_register = nv_vlan_rx_register; 5173 } 5174 5175 np->msi_flags = 0; 5176 if ((id->driver_data & DEV_HAS_MSI) && msi) { 5177 np->msi_flags |= NV_MSI_CAPABLE; 5178 } 5179 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 5180 np->msi_flags |= NV_MSI_X_CAPABLE; 5181 } 5182 5183 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 5184 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || 5185 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || 5186 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) { 5187 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 5188 } 5189 5190 5191 err = -ENOMEM; 5192 np->base = ioremap(addr, np->register_size); 5193 if (!np->base) 5194 goto out_relreg; 5195 dev->base_addr = (unsigned long)np->base; 5196 5197 dev->irq = pci_dev->irq; 5198 5199 np->rx_ring_size = RX_RING_DEFAULT; 5200 np->tx_ring_size = TX_RING_DEFAULT; 5201 5202 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 5203 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 5204 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 5205 &np->ring_addr); 5206 if (!np->rx_ring.orig) 5207 goto out_unmap; 5208 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 5209 } else { 5210 np->rx_ring.ex = pci_alloc_consistent(pci_dev, 5211 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 5212 &np->ring_addr); 5213 if (!np->rx_ring.ex) 5214 goto out_unmap; 5215 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 5216 } 5217 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5218 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5219 if (!np->rx_skb || !np->tx_skb) 5220 goto out_freering; 5221 5222 dev->open = nv_open; 5223 dev->stop = nv_close; 5224 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 5225 dev->hard_start_xmit = nv_start_xmit; 5226 else 5227 dev->hard_start_xmit = nv_start_xmit_optimized; 5228 dev->get_stats = nv_get_stats; 5229 dev->change_mtu = nv_change_mtu; 5230 dev->set_mac_address = nv_set_mac_address; 5231 dev->set_multicast_list = nv_set_multicast; 5232#ifdef CONFIG_NET_POLL_CONTROLLER 5233 dev->poll_controller = nv_poll_controller; 5234#endif 5235#ifdef CONFIG_FORCEDETH_NAPI 5236 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5237#endif 5238 SET_ETHTOOL_OPS(dev, &ops); 5239 dev->tx_timeout = nv_tx_timeout; 5240 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5241 5242 pci_set_drvdata(pci_dev, dev); 5243 5244 /* read the mac address */ 5245 base = get_hwbase(dev); 5246 np->orig_mac[0] = readl(base + NvRegMacAddrA); 5247 np->orig_mac[1] = readl(base + NvRegMacAddrB); 5248 5249 /* check the workaround bit for correct mac address order */ 5250 txreg = readl(base + NvRegTransmitPoll); 5251 if ((txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) || 5252 (id->driver_data & DEV_HAS_CORRECT_MACADDR)) { 5253 /* mac address is already in correct order */ 5254 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5255 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5256 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5257 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5258 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5259 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5260 } else { 5261 /* need to reverse mac address to correct order */ 5262 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 5263 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 5264 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 5265 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 5266 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 5267 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 5268 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5269 } 5270 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 5271 5272 if (!is_valid_ether_addr(dev->perm_addr)) { 5273 /* 5274 * Bad mac address. At least one bios sets the mac address 5275 * to 01:23:45:67:89:ab 5276 */ 5277 dev_printk(KERN_ERR, &pci_dev->dev, 5278 "Invalid Mac address detected: %s\n", 5279 print_mac(mac, dev->dev_addr)); 5280 dev_printk(KERN_ERR, &pci_dev->dev, 5281 "Please complain to your hardware vendor. Switching to a random MAC.\n"); 5282 dev->dev_addr[0] = 0x00; 5283 dev->dev_addr[1] = 0x00; 5284 dev->dev_addr[2] = 0x6c; 5285 get_random_bytes(&dev->dev_addr[3], 3); 5286 } 5287 5288 dprintk(KERN_DEBUG "%s: MAC Address %s\n", 5289 pci_name(pci_dev), print_mac(mac, dev->dev_addr)); 5290 5291 /* set mac address */ 5292 nv_copy_mac_to_hw(dev); 5293 5294 /* disable WOL */ 5295 writel(0, base + NvRegWakeUpFlags); 5296 np->wolenabled = 0; 5297 5298 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5299 5300 /* take phy and nic out of low power mode */ 5301 powerstate = readl(base + NvRegPowerState2); 5302 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; 5303 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || 5304 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && 5305 pci_dev->revision >= 0xA3) 5306 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; 5307 writel(powerstate, base + NvRegPowerState2); 5308 } 5309 5310 if (np->desc_ver == DESC_VER_1) { 5311 np->tx_flags = NV_TX_VALID; 5312 } else { 5313 np->tx_flags = NV_TX2_VALID; 5314 } 5315 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { 5316 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5317 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5318 np->msi_flags |= 0x0003; 5319 } else { 5320 np->irqmask = NVREG_IRQMASK_CPU; 5321 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5322 np->msi_flags |= 0x0001; 5323 } 5324 5325 if (id->driver_data & DEV_NEED_TIMERIRQ) 5326 np->irqmask |= NVREG_IRQ_TIMER; 5327 if (id->driver_data & DEV_NEED_LINKTIMER) { 5328 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); 5329 np->need_linktimer = 1; 5330 np->link_timeout = jiffies + LINK_TIMEOUT; 5331 } else { 5332 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); 5333 np->need_linktimer = 0; 5334 } 5335 5336 /* clear phy state and temporarily halt phy interrupts */ 5337 writel(0, base + NvRegMIIMask); 5338 phystate = readl(base + NvRegAdapterControl); 5339 if (phystate & NVREG_ADAPTCTL_RUNNING) { 5340 phystate_orig = 1; 5341 phystate &= ~NVREG_ADAPTCTL_RUNNING; 5342 writel(phystate, base + NvRegAdapterControl); 5343 } 5344 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5345 5346 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 5347 /* management unit running on the mac? */ 5348 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) { 5349 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; 5350 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use); 5351 if (nv_mgmt_acquire_sema(dev)) { 5352 /* management unit setup the phy already? */ 5353 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 5354 NVREG_XMITCTL_SYNC_PHY_INIT) { 5355 /* phy is inited by mgmt unit */ 5356 phyinitialized = 1; 5357 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); 5358 } else { 5359 /* we need to init the phy */ 5360 } 5361 } 5362 } 5363 } 5364 5365 /* find a suitable phy */ 5366 for (i = 1; i <= 32; i++) { 5367 int id1, id2; 5368 int phyaddr = i & 0x1F; 5369 5370 spin_lock_irq(&np->lock); 5371 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); 5372 spin_unlock_irq(&np->lock); 5373 if (id1 < 0 || id1 == 0xffff) 5374 continue; 5375 spin_lock_irq(&np->lock); 5376 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); 5377 spin_unlock_irq(&np->lock); 5378 if (id2 < 0 || id2 == 0xffff) 5379 continue; 5380 5381 np->phy_model = id2 & PHYID2_MODEL_MASK; 5382 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 5383 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 5384 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 5385 pci_name(pci_dev), id1, id2, phyaddr); 5386 np->phyaddr = phyaddr; 5387 np->phy_oui = id1 | id2; 5388 break; 5389 } 5390 if (i == 33) { 5391 dev_printk(KERN_INFO, &pci_dev->dev, 5392 "open: Could not find a valid PHY.\n"); 5393 goto out_error; 5394 } 5395 5396 if (!phyinitialized) { 5397 /* reset it */ 5398 phy_init(dev); 5399 } else { 5400 /* see if it is a gigabit phy */ 5401 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 5402 if (mii_status & PHY_GIGABIT) { 5403 np->gigabit = PHY_GIGABIT; 5404 } 5405 } 5406 5407 /* set default link speed settings */ 5408 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 5409 np->duplex = 0; 5410 np->autoneg = 1; 5411 5412 err = register_netdev(dev); 5413 if (err) { 5414 dev_printk(KERN_INFO, &pci_dev->dev, 5415 "unable to register netdev: %d\n", err); 5416 goto out_error; 5417 } 5418 5419 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, " 5420 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 5421 dev->name, 5422 np->phy_oui, 5423 np->phyaddr, 5424 dev->dev_addr[0], 5425 dev->dev_addr[1], 5426 dev->dev_addr[2], 5427 dev->dev_addr[3], 5428 dev->dev_addr[4], 5429 dev->dev_addr[5]); 5430 5431 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 5432 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5433 dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ? 5434 "csum " : "", 5435 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 5436 "vlan " : "", 5437 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", 5438 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", 5439 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", 5440 np->gigabit == PHY_GIGABIT ? "gbit " : "", 5441 np->need_linktimer ? "lnktim " : "", 5442 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", 5443 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", 5444 np->desc_ver); 5445 5446 return 0; 5447 5448out_error: 5449 if (phystate_orig) 5450 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 5451 pci_set_drvdata(pci_dev, NULL); 5452out_freering: 5453 free_rings(dev); 5454out_unmap: 5455 iounmap(get_hwbase(dev)); 5456out_relreg: 5457 pci_release_regions(pci_dev); 5458out_disable: 5459 pci_disable_device(pci_dev); 5460out_free: 5461 free_netdev(dev); 5462out: 5463 return err; 5464} 5465 5466static void __devexit nv_remove(struct pci_dev *pci_dev) 5467{ 5468 struct net_device *dev = pci_get_drvdata(pci_dev); 5469 struct fe_priv *np = netdev_priv(dev); 5470 u8 __iomem *base = get_hwbase(dev); 5471 5472 unregister_netdev(dev); 5473 5474 /* special op: write back the misordered MAC address - otherwise 5475 * the next nv_probe would see a wrong address. 5476 */ 5477 writel(np->orig_mac[0], base + NvRegMacAddrA); 5478 writel(np->orig_mac[1], base + NvRegMacAddrB); 5479 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV, 5480 base + NvRegTransmitPoll); 5481 5482 /* free all structures */ 5483 free_rings(dev); 5484 iounmap(get_hwbase(dev)); 5485 pci_release_regions(pci_dev); 5486 pci_disable_device(pci_dev); 5487 free_netdev(dev); 5488 pci_set_drvdata(pci_dev, NULL); 5489} 5490 5491#ifdef CONFIG_PM 5492static int nv_suspend(struct pci_dev *pdev, pm_message_t state) 5493{ 5494 struct net_device *dev = pci_get_drvdata(pdev); 5495 struct fe_priv *np = netdev_priv(dev); 5496 5497 if (!netif_running(dev)) 5498 goto out; 5499 5500 netif_device_detach(dev); 5501 5502 // Gross. 5503 nv_close(dev); 5504 5505 pci_save_state(pdev); 5506 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); 5507 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5508out: 5509 return 0; 5510} 5511 5512static int nv_resume(struct pci_dev *pdev) 5513{ 5514 struct net_device *dev = pci_get_drvdata(pdev); 5515 int rc = 0; 5516 5517 if (!netif_running(dev)) 5518 goto out; 5519 5520 netif_device_attach(dev); 5521 5522 pci_set_power_state(pdev, PCI_D0); 5523 pci_restore_state(pdev); 5524 pci_enable_wake(pdev, PCI_D0, 0); 5525 5526 rc = nv_open(dev); 5527out: 5528 return rc; 5529} 5530#else 5531#define nv_suspend NULL 5532#define nv_resume NULL 5533#endif /* CONFIG_PM */ 5534 5535static struct pci_device_id pci_tbl[] = { 5536 { /* nForce Ethernet Controller */ 5537 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), 5538 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5539 }, 5540 { /* nForce2 Ethernet Controller */ 5541 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), 5542 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5543 }, 5544 { /* nForce3 Ethernet Controller */ 5545 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), 5546 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5547 }, 5548 { /* nForce3 Ethernet Controller */ 5549 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), 5550 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5551 }, 5552 { /* nForce3 Ethernet Controller */ 5553 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), 5554 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5555 }, 5556 { /* nForce3 Ethernet Controller */ 5557 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), 5558 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5559 }, 5560 { /* nForce3 Ethernet Controller */ 5561 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), 5562 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5563 }, 5564 { /* CK804 Ethernet Controller */ 5565 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), 5566 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, 5567 }, 5568 { /* CK804 Ethernet Controller */ 5569 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), 5570 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, 5571 }, 5572 { /* MCP04 Ethernet Controller */ 5573 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), 5574 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, 5575 }, 5576 { /* MCP04 Ethernet Controller */ 5577 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), 5578 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, 5579 }, 5580 { /* MCP51 Ethernet Controller */ 5581 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 5582 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, 5583 }, 5584 { /* MCP51 Ethernet Controller */ 5585 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 5586 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, 5587 }, 5588 { /* MCP55 Ethernet Controller */ 5589 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 5590 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5591 }, 5592 { /* MCP55 Ethernet Controller */ 5593 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 5594 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5595 }, 5596 { /* MCP61 Ethernet Controller */ 5597 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 5598 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5599 }, 5600 { /* MCP61 Ethernet Controller */ 5601 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 5602 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5603 }, 5604 { /* MCP61 Ethernet Controller */ 5605 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 5606 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5607 }, 5608 { /* MCP61 Ethernet Controller */ 5609 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 5610 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5611 }, 5612 { /* MCP65 Ethernet Controller */ 5613 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 5614 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5615 }, 5616 { /* MCP65 Ethernet Controller */ 5617 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 5618 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5619 }, 5620 { /* MCP65 Ethernet Controller */ 5621 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 5622 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5623 }, 5624 { /* MCP65 Ethernet Controller */ 5625 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 5626 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5627 }, 5628 { /* MCP67 Ethernet Controller */ 5629 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 5630 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5631 }, 5632 { /* MCP67 Ethernet Controller */ 5633 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 5634 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5635 }, 5636 { /* MCP67 Ethernet Controller */ 5637 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 5638 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5639 }, 5640 { /* MCP67 Ethernet Controller */ 5641 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 5642 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5643 }, 5644 { /* MCP73 Ethernet Controller */ 5645 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28), 5646 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5647 }, 5648 { /* MCP73 Ethernet Controller */ 5649 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29), 5650 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5651 }, 5652 { /* MCP73 Ethernet Controller */ 5653 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30), 5654 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5655 }, 5656 { /* MCP73 Ethernet Controller */ 5657 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), 5658 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5659 }, 5660 { /* MCP77 Ethernet Controller */ 5661 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 5662 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5663 }, 5664 { /* MCP77 Ethernet Controller */ 5665 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 5666 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5667 }, 5668 { /* MCP77 Ethernet Controller */ 5669 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 5670 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5671 }, 5672 { /* MCP77 Ethernet Controller */ 5673 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 5674 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5675 }, 5676 { /* MCP79 Ethernet Controller */ 5677 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 5678 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5679 }, 5680 { /* MCP79 Ethernet Controller */ 5681 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 5682 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5683 }, 5684 { /* MCP79 Ethernet Controller */ 5685 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 5686 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5687 }, 5688 { /* MCP79 Ethernet Controller */ 5689 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 5690 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5691 }, 5692 {0,}, 5693}; 5694 5695static struct pci_driver driver = { 5696 .name = DRV_NAME, 5697 .id_table = pci_tbl, 5698 .probe = nv_probe, 5699 .remove = __devexit_p(nv_remove), 5700 .suspend = nv_suspend, 5701 .resume = nv_resume, 5702}; 5703 5704static int __init init_nic(void) 5705{ 5706 return pci_register_driver(&driver); 5707} 5708 5709static void __exit exit_nic(void) 5710{ 5711 pci_unregister_driver(&driver); 5712} 5713 5714module_param(max_interrupt_work, int, 0); 5715MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 5716module_param(optimization_mode, int, 0); 5717MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); 5718module_param(poll_interval, int, 0); 5719MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 5720module_param(msi, int, 0); 5721MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); 5722module_param(msix, int, 0); 5723MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); 5724module_param(dma_64bit, int, 0); 5725MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 5726 5727MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 5728MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 5729MODULE_LICENSE("GPL"); 5730 5731MODULE_DEVICE_TABLE(pci, pci_tbl); 5732 5733module_init(init_nic); 5734module_exit(exit_nic);