Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.18-rc6 4571 lines 138 kB view raw
1/* 2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. 3 * 4 * Note: This driver is a cleanroom reimplementation based on reverse 5 * engineered documentation written by Carl-Daniel Hailfinger 6 * and Andrew de Quincey. It's neither supported nor endorsed 7 * by NVIDIA Corp. Use at your own risk. 8 * 9 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered 10 * trademarks of NVIDIA Corporation in the United States and other 11 * countries. 12 * 13 * Copyright (C) 2003,4,5 Manfred Spraul 14 * Copyright (C) 2004 Andrew de Quincey (wol support) 15 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 16 * IRQ rate fixes, bigendian fixes, cleanups, verification) 17 * Copyright (c) 2004 NVIDIA Corporation 18 * 19 * This program is free software; you can redistribute it and/or modify 20 * it under the terms of the GNU General Public License as published by 21 * the Free Software Foundation; either version 2 of the License, or 22 * (at your option) any later version. 23 * 24 * This program is distributed in the hope that it will be useful, 25 * but WITHOUT ANY WARRANTY; without even the implied warranty of 26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 27 * GNU General Public License for more details. 28 * 29 * You should have received a copy of the GNU General Public License 30 * along with this program; if not, write to the Free Software 31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 32 * 33 * Changelog: 34 * 0.01: 05 Oct 2003: First release that compiles without warnings. 35 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs. 36 * Check all PCI BARs for the register window. 37 * udelay added to mii_rw. 38 * 0.03: 06 Oct 2003: Initialize dev->irq. 39 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks. 40 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout. 41 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated, 42 * irq mask updated 43 * 0.07: 14 Oct 2003: Further irq mask updates. 44 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill 45 * added into irq handler, NULL check for drain_ring. 46 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the 47 * requested interrupt sources. 48 * 0.10: 20 Oct 2003: First cleanup for release. 49 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased. 50 * MAC Address init fix, set_multicast cleanup. 51 * 0.12: 23 Oct 2003: Cleanups for release. 52 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10. 53 * Set link speed correctly. start rx before starting 54 * tx (nv_start_rx sets the link speed). 55 * 0.14: 25 Oct 2003: Nic dependant irq mask. 56 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during 57 * open. 58 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size 59 * increased to 1628 bytes. 60 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from 61 * the tx length. 62 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats 63 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac 64 * addresses, really stop rx if already running 65 * in nv_start_rx, clean up a bit. 66 * 0.20: 07 Dec 2003: alloc fixes 67 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix. 68 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup 69 * on close. 70 * 0.23: 26 Jan 2004: various small cleanups 71 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces 72 * 0.25: 09 Mar 2004: wol support 73 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes 74 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings, 75 * added CK804/MCP04 device IDs, code fixes 76 * for registers, link status and other minor fixes. 77 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe 78 * 0.29: 31 Aug 2004: Add backup timer for link change notification. 79 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset 80 * into nv_close, otherwise reenabling for wol can 81 * cause DMA to kfree'd memory. 82 * 0.31: 14 Nov 2004: ethtool support for getting/setting link 83 * capabilities. 84 * 0.32: 16 Apr 2005: RX_ERROR4 handling added. 85 * 0.33: 16 May 2005: Support for MCP51 added. 86 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. 87 * 0.35: 26 Jun 2005: Support for MCP55 added. 88 * 0.36: 28 Jun 2005: Add jumbo frame support. 89 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list 90 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of 91 * per-packet flags. 92 * 0.39: 18 Jul 2005: Add 64bit descriptor support. 93 * 0.40: 19 Jul 2005: Add support for mac address change. 94 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead 95 * of nv_remove 96 * 0.42: 06 Aug 2005: Fix lack of link speed initialization 97 * in the second (and later) nv_open call 98 * 0.43: 10 Aug 2005: Add support for tx checksum. 99 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation. 100 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check 101 * 0.46: 20 Oct 2005: Add irq optimization modes. 102 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. 103 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single 104 * 0.49: 10 Dec 2005: Fix tso for large buffers. 105 * 0.50: 20 Jan 2006: Add 8021pq tagging support. 106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. 107 * 0.52: 20 Jan 2006: Add MSI/MSIX support. 108 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. 109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. 110 * 0.55: 22 Mar 2006: Add flow control (pause frame). 111 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. 112 * 113 * Known bugs: 114 * We suspect that on some hardware no TX done interrupts are generated. 115 * This means recovery from netif_stop_queue only happens if the hw timer 116 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) 117 * and the timer is active in the IRQMask, or if a rx packet arrives by chance. 118 * If your hardware reliably generates tx done interrupts, then you can remove 119 * DEV_NEED_TIMERIRQ from the driver_data flags. 120 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 121 * superfluous timer interrupts from the nic. 122 */ 123#define FORCEDETH_VERSION "0.56" 124#define DRV_NAME "forcedeth" 125 126#include <linux/module.h> 127#include <linux/types.h> 128#include <linux/pci.h> 129#include <linux/interrupt.h> 130#include <linux/netdevice.h> 131#include <linux/etherdevice.h> 132#include <linux/delay.h> 133#include <linux/spinlock.h> 134#include <linux/ethtool.h> 135#include <linux/timer.h> 136#include <linux/skbuff.h> 137#include <linux/mii.h> 138#include <linux/random.h> 139#include <linux/init.h> 140#include <linux/if_vlan.h> 141#include <linux/dma-mapping.h> 142 143#include <asm/irq.h> 144#include <asm/io.h> 145#include <asm/uaccess.h> 146#include <asm/system.h> 147 148#if 0 149#define dprintk printk 150#else 151#define dprintk(x...) do { } while (0) 152#endif 153 154 155/* 156 * Hardware access: 157 */ 158 159#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ 160#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ 161#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ 162#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ 163#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ 164#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ 165#define DEV_HAS_MSI 0x0040 /* device supports MSI */ 166#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ 167#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ 168#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ 169#define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */ 170#define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */ 171 172enum { 173 NvRegIrqStatus = 0x000, 174#define NVREG_IRQSTAT_MIIEVENT 0x040 175#define NVREG_IRQSTAT_MASK 0x1ff 176 NvRegIrqMask = 0x004, 177#define NVREG_IRQ_RX_ERROR 0x0001 178#define NVREG_IRQ_RX 0x0002 179#define NVREG_IRQ_RX_NOBUF 0x0004 180#define NVREG_IRQ_TX_ERR 0x0008 181#define NVREG_IRQ_TX_OK 0x0010 182#define NVREG_IRQ_TIMER 0x0020 183#define NVREG_IRQ_LINK 0x0040 184#define NVREG_IRQ_RX_FORCED 0x0080 185#define NVREG_IRQ_TX_FORCED 0x0100 186#define NVREG_IRQMASK_THROUGHPUT 0x00df 187#define NVREG_IRQMASK_CPU 0x0040 188#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) 189#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) 190#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK) 191 192#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ 193 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ 194 NVREG_IRQ_TX_FORCED)) 195 196 NvRegUnknownSetupReg6 = 0x008, 197#define NVREG_UNKSETUP6_VAL 3 198 199/* 200 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic 201 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 202 */ 203 NvRegPollingInterval = 0x00c, 204#define NVREG_POLL_DEFAULT_THROUGHPUT 970 205#define NVREG_POLL_DEFAULT_CPU 13 206 NvRegMSIMap0 = 0x020, 207 NvRegMSIMap1 = 0x024, 208 NvRegMSIIrqMask = 0x030, 209#define NVREG_MSI_VECTOR_0_ENABLED 0x01 210 NvRegMisc1 = 0x080, 211#define NVREG_MISC1_PAUSE_TX 0x01 212#define NVREG_MISC1_HD 0x02 213#define NVREG_MISC1_FORCE 0x3b0f3c 214 215 NvRegMacReset = 0x3c, 216#define NVREG_MAC_RESET_ASSERT 0x0F3 217 NvRegTransmitterControl = 0x084, 218#define NVREG_XMITCTL_START 0x01 219 NvRegTransmitterStatus = 0x088, 220#define NVREG_XMITSTAT_BUSY 0x01 221 222 NvRegPacketFilterFlags = 0x8c, 223#define NVREG_PFF_PAUSE_RX 0x08 224#define NVREG_PFF_ALWAYS 0x7F0000 225#define NVREG_PFF_PROMISC 0x80 226#define NVREG_PFF_MYADDR 0x20 227#define NVREG_PFF_LOOPBACK 0x10 228 229 NvRegOffloadConfig = 0x90, 230#define NVREG_OFFLOAD_HOMEPHY 0x601 231#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 232 NvRegReceiverControl = 0x094, 233#define NVREG_RCVCTL_START 0x01 234 NvRegReceiverStatus = 0x98, 235#define NVREG_RCVSTAT_BUSY 0x01 236 237 NvRegRandomSeed = 0x9c, 238#define NVREG_RNDSEED_MASK 0x00ff 239#define NVREG_RNDSEED_FORCE 0x7f00 240#define NVREG_RNDSEED_FORCE2 0x2d00 241#define NVREG_RNDSEED_FORCE3 0x7400 242 243 NvRegTxDeferral = 0xA0, 244#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 245#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 246#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 247 NvRegRxDeferral = 0xA4, 248#define NVREG_RX_DEFERRAL_DEFAULT 0x16 249 NvRegMacAddrA = 0xA8, 250 NvRegMacAddrB = 0xAC, 251 NvRegMulticastAddrA = 0xB0, 252#define NVREG_MCASTADDRA_FORCE 0x01 253 NvRegMulticastAddrB = 0xB4, 254 NvRegMulticastMaskA = 0xB8, 255 NvRegMulticastMaskB = 0xBC, 256 257 NvRegPhyInterface = 0xC0, 258#define PHY_RGMII 0x10000000 259 260 NvRegTxRingPhysAddr = 0x100, 261 NvRegRxRingPhysAddr = 0x104, 262 NvRegRingSizes = 0x108, 263#define NVREG_RINGSZ_TXSHIFT 0 264#define NVREG_RINGSZ_RXSHIFT 16 265 NvRegUnknownTransmitterReg = 0x10c, 266 NvRegLinkSpeed = 0x110, 267#define NVREG_LINKSPEED_FORCE 0x10000 268#define NVREG_LINKSPEED_10 1000 269#define NVREG_LINKSPEED_100 100 270#define NVREG_LINKSPEED_1000 50 271#define NVREG_LINKSPEED_MASK (0xFFF) 272 NvRegUnknownSetupReg5 = 0x130, 273#define NVREG_UNKSETUP5_BIT31 (1<<31) 274 NvRegTxWatermark = 0x13c, 275#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 276#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 277#define NVREG_TX_WM_DESC2_3_1000 0xfe08000 278 NvRegTxRxControl = 0x144, 279#define NVREG_TXRXCTL_KICK 0x0001 280#define NVREG_TXRXCTL_BIT1 0x0002 281#define NVREG_TXRXCTL_BIT2 0x0004 282#define NVREG_TXRXCTL_IDLE 0x0008 283#define NVREG_TXRXCTL_RESET 0x0010 284#define NVREG_TXRXCTL_RXCHECK 0x0400 285#define NVREG_TXRXCTL_DESC_1 0 286#define NVREG_TXRXCTL_DESC_2 0x02100 287#define NVREG_TXRXCTL_DESC_3 0x02200 288#define NVREG_TXRXCTL_VLANSTRIP 0x00040 289#define NVREG_TXRXCTL_VLANINS 0x00080 290 NvRegTxRingPhysAddrHigh = 0x148, 291 NvRegRxRingPhysAddrHigh = 0x14C, 292 NvRegTxPauseFrame = 0x170, 293#define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080 294#define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030 295 NvRegMIIStatus = 0x180, 296#define NVREG_MIISTAT_ERROR 0x0001 297#define NVREG_MIISTAT_LINKCHANGE 0x0008 298#define NVREG_MIISTAT_MASK 0x000f 299#define NVREG_MIISTAT_MASK2 0x000f 300 NvRegUnknownSetupReg4 = 0x184, 301#define NVREG_UNKSETUP4_VAL 8 302 303 NvRegAdapterControl = 0x188, 304#define NVREG_ADAPTCTL_START 0x02 305#define NVREG_ADAPTCTL_LINKUP 0x04 306#define NVREG_ADAPTCTL_PHYVALID 0x40000 307#define NVREG_ADAPTCTL_RUNNING 0x100000 308#define NVREG_ADAPTCTL_PHYSHIFT 24 309 NvRegMIISpeed = 0x18c, 310#define NVREG_MIISPEED_BIT8 (1<<8) 311#define NVREG_MIIDELAY 5 312 NvRegMIIControl = 0x190, 313#define NVREG_MIICTL_INUSE 0x08000 314#define NVREG_MIICTL_WRITE 0x00400 315#define NVREG_MIICTL_ADDRSHIFT 5 316 NvRegMIIData = 0x194, 317 NvRegWakeUpFlags = 0x200, 318#define NVREG_WAKEUPFLAGS_VAL 0x7770 319#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 320#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 321#define NVREG_WAKEUPFLAGS_D3SHIFT 12 322#define NVREG_WAKEUPFLAGS_D2SHIFT 8 323#define NVREG_WAKEUPFLAGS_D1SHIFT 4 324#define NVREG_WAKEUPFLAGS_D0SHIFT 0 325#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 326#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 327#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 328#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 329 330 NvRegPatternCRC = 0x204, 331 NvRegPatternMask = 0x208, 332 NvRegPowerCap = 0x268, 333#define NVREG_POWERCAP_D3SUPP (1<<30) 334#define NVREG_POWERCAP_D2SUPP (1<<26) 335#define NVREG_POWERCAP_D1SUPP (1<<25) 336 NvRegPowerState = 0x26c, 337#define NVREG_POWERSTATE_POWEREDUP 0x8000 338#define NVREG_POWERSTATE_VALID 0x0100 339#define NVREG_POWERSTATE_MASK 0x0003 340#define NVREG_POWERSTATE_D0 0x0000 341#define NVREG_POWERSTATE_D1 0x0001 342#define NVREG_POWERSTATE_D2 0x0002 343#define NVREG_POWERSTATE_D3 0x0003 344 NvRegTxCnt = 0x280, 345 NvRegTxZeroReXmt = 0x284, 346 NvRegTxOneReXmt = 0x288, 347 NvRegTxManyReXmt = 0x28c, 348 NvRegTxLateCol = 0x290, 349 NvRegTxUnderflow = 0x294, 350 NvRegTxLossCarrier = 0x298, 351 NvRegTxExcessDef = 0x29c, 352 NvRegTxRetryErr = 0x2a0, 353 NvRegRxFrameErr = 0x2a4, 354 NvRegRxExtraByte = 0x2a8, 355 NvRegRxLateCol = 0x2ac, 356 NvRegRxRunt = 0x2b0, 357 NvRegRxFrameTooLong = 0x2b4, 358 NvRegRxOverflow = 0x2b8, 359 NvRegRxFCSErr = 0x2bc, 360 NvRegRxFrameAlignErr = 0x2c0, 361 NvRegRxLenErr = 0x2c4, 362 NvRegRxUnicast = 0x2c8, 363 NvRegRxMulticast = 0x2cc, 364 NvRegRxBroadcast = 0x2d0, 365 NvRegTxDef = 0x2d4, 366 NvRegTxFrame = 0x2d8, 367 NvRegRxCnt = 0x2dc, 368 NvRegTxPause = 0x2e0, 369 NvRegRxPause = 0x2e4, 370 NvRegRxDropFrame = 0x2e8, 371 NvRegVlanControl = 0x300, 372#define NVREG_VLANCONTROL_ENABLE 0x2000 373 NvRegMSIXMap0 = 0x3e0, 374 NvRegMSIXMap1 = 0x3e4, 375 NvRegMSIXIrqStatus = 0x3f0, 376 377 NvRegPowerState2 = 0x600, 378#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11 379#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 380}; 381 382/* Big endian: should work, but is untested */ 383struct ring_desc { 384 u32 PacketBuffer; 385 u32 FlagLen; 386}; 387 388struct ring_desc_ex { 389 u32 PacketBufferHigh; 390 u32 PacketBufferLow; 391 u32 TxVlan; 392 u32 FlagLen; 393}; 394 395typedef union _ring_type { 396 struct ring_desc* orig; 397 struct ring_desc_ex* ex; 398} ring_type; 399 400#define FLAG_MASK_V1 0xffff0000 401#define FLAG_MASK_V2 0xffffc000 402#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 403#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) 404 405#define NV_TX_LASTPACKET (1<<16) 406#define NV_TX_RETRYERROR (1<<19) 407#define NV_TX_FORCED_INTERRUPT (1<<24) 408#define NV_TX_DEFERRED (1<<26) 409#define NV_TX_CARRIERLOST (1<<27) 410#define NV_TX_LATECOLLISION (1<<28) 411#define NV_TX_UNDERFLOW (1<<29) 412#define NV_TX_ERROR (1<<30) 413#define NV_TX_VALID (1<<31) 414 415#define NV_TX2_LASTPACKET (1<<29) 416#define NV_TX2_RETRYERROR (1<<18) 417#define NV_TX2_FORCED_INTERRUPT (1<<30) 418#define NV_TX2_DEFERRED (1<<25) 419#define NV_TX2_CARRIERLOST (1<<26) 420#define NV_TX2_LATECOLLISION (1<<27) 421#define NV_TX2_UNDERFLOW (1<<28) 422/* error and valid are the same for both */ 423#define NV_TX2_ERROR (1<<30) 424#define NV_TX2_VALID (1<<31) 425#define NV_TX2_TSO (1<<28) 426#define NV_TX2_TSO_SHIFT 14 427#define NV_TX2_TSO_MAX_SHIFT 14 428#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) 429#define NV_TX2_CHECKSUM_L3 (1<<27) 430#define NV_TX2_CHECKSUM_L4 (1<<26) 431 432#define NV_TX3_VLAN_TAG_PRESENT (1<<18) 433 434#define NV_RX_DESCRIPTORVALID (1<<16) 435#define NV_RX_MISSEDFRAME (1<<17) 436#define NV_RX_SUBSTRACT1 (1<<18) 437#define NV_RX_ERROR1 (1<<23) 438#define NV_RX_ERROR2 (1<<24) 439#define NV_RX_ERROR3 (1<<25) 440#define NV_RX_ERROR4 (1<<26) 441#define NV_RX_CRCERR (1<<27) 442#define NV_RX_OVERFLOW (1<<28) 443#define NV_RX_FRAMINGERR (1<<29) 444#define NV_RX_ERROR (1<<30) 445#define NV_RX_AVAIL (1<<31) 446 447#define NV_RX2_CHECKSUMMASK (0x1C000000) 448#define NV_RX2_CHECKSUMOK1 (0x10000000) 449#define NV_RX2_CHECKSUMOK2 (0x14000000) 450#define NV_RX2_CHECKSUMOK3 (0x18000000) 451#define NV_RX2_DESCRIPTORVALID (1<<29) 452#define NV_RX2_SUBSTRACT1 (1<<25) 453#define NV_RX2_ERROR1 (1<<18) 454#define NV_RX2_ERROR2 (1<<19) 455#define NV_RX2_ERROR3 (1<<20) 456#define NV_RX2_ERROR4 (1<<21) 457#define NV_RX2_CRCERR (1<<22) 458#define NV_RX2_OVERFLOW (1<<23) 459#define NV_RX2_FRAMINGERR (1<<24) 460/* error and avail are the same for both */ 461#define NV_RX2_ERROR (1<<30) 462#define NV_RX2_AVAIL (1<<31) 463 464#define NV_RX3_VLAN_TAG_PRESENT (1<<16) 465#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 466 467/* Miscelaneous hardware related defines: */ 468#define NV_PCI_REGSZ_VER1 0x270 469#define NV_PCI_REGSZ_VER2 0x604 470 471/* various timeout delays: all in usec */ 472#define NV_TXRX_RESET_DELAY 4 473#define NV_TXSTOP_DELAY1 10 474#define NV_TXSTOP_DELAY1MAX 500000 475#define NV_TXSTOP_DELAY2 100 476#define NV_RXSTOP_DELAY1 10 477#define NV_RXSTOP_DELAY1MAX 500000 478#define NV_RXSTOP_DELAY2 100 479#define NV_SETUP5_DELAY 5 480#define NV_SETUP5_DELAYMAX 50000 481#define NV_POWERUP_DELAY 5 482#define NV_POWERUP_DELAYMAX 5000 483#define NV_MIIBUSY_DELAY 50 484#define NV_MIIPHY_DELAY 10 485#define NV_MIIPHY_DELAYMAX 10000 486#define NV_MAC_RESET_DELAY 64 487 488#define NV_WAKEUPPATTERNS 5 489#define NV_WAKEUPMASKENTRIES 4 490 491/* General driver defaults */ 492#define NV_WATCHDOG_TIMEO (5*HZ) 493 494#define RX_RING_DEFAULT 128 495#define TX_RING_DEFAULT 256 496#define RX_RING_MIN 128 497#define TX_RING_MIN 64 498#define RING_MAX_DESC_VER_1 1024 499#define RING_MAX_DESC_VER_2_3 16384 500/* 501 * Difference between the get and put pointers for the tx ring. 502 * This is used to throttle the amount of data outstanding in the 503 * tx ring. 504 */ 505#define TX_LIMIT_DIFFERENCE 1 506 507/* rx/tx mac addr + type + vlan + align + slack*/ 508#define NV_RX_HEADERS (64) 509/* even more slack. */ 510#define NV_RX_ALLOC_PAD (64) 511 512/* maximum mtu size */ 513#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ 514#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ 515 516#define OOM_REFILL (1+HZ/20) 517#define POLL_WAIT (1+HZ/100) 518#define LINK_TIMEOUT (3*HZ) 519#define STATS_INTERVAL (10*HZ) 520 521/* 522 * desc_ver values: 523 * The nic supports three different descriptor types: 524 * - DESC_VER_1: Original 525 * - DESC_VER_2: support for jumbo frames. 526 * - DESC_VER_3: 64-bit format. 527 */ 528#define DESC_VER_1 1 529#define DESC_VER_2 2 530#define DESC_VER_3 3 531 532/* PHY defines */ 533#define PHY_OUI_MARVELL 0x5043 534#define PHY_OUI_CICADA 0x03f1 535#define PHYID1_OUI_MASK 0x03ff 536#define PHYID1_OUI_SHFT 6 537#define PHYID2_OUI_MASK 0xfc00 538#define PHYID2_OUI_SHFT 10 539#define PHY_INIT1 0x0f000 540#define PHY_INIT2 0x0e00 541#define PHY_INIT3 0x01000 542#define PHY_INIT4 0x0200 543#define PHY_INIT5 0x0004 544#define PHY_INIT6 0x02000 545#define PHY_GIGABIT 0x0100 546 547#define PHY_TIMEOUT 0x1 548#define PHY_ERROR 0x2 549 550#define PHY_100 0x1 551#define PHY_1000 0x2 552#define PHY_HALF 0x100 553 554#define NV_PAUSEFRAME_RX_CAPABLE 0x0001 555#define NV_PAUSEFRAME_TX_CAPABLE 0x0002 556#define NV_PAUSEFRAME_RX_ENABLE 0x0004 557#define NV_PAUSEFRAME_TX_ENABLE 0x0008 558#define NV_PAUSEFRAME_RX_REQ 0x0010 559#define NV_PAUSEFRAME_TX_REQ 0x0020 560#define NV_PAUSEFRAME_AUTONEG 0x0040 561 562/* MSI/MSI-X defines */ 563#define NV_MSI_X_MAX_VECTORS 8 564#define NV_MSI_X_VECTORS_MASK 0x000f 565#define NV_MSI_CAPABLE 0x0010 566#define NV_MSI_X_CAPABLE 0x0020 567#define NV_MSI_ENABLED 0x0040 568#define NV_MSI_X_ENABLED 0x0080 569 570#define NV_MSI_X_VECTOR_ALL 0x0 571#define NV_MSI_X_VECTOR_RX 0x0 572#define NV_MSI_X_VECTOR_TX 0x1 573#define NV_MSI_X_VECTOR_OTHER 0x2 574 575/* statistics */ 576struct nv_ethtool_str { 577 char name[ETH_GSTRING_LEN]; 578}; 579 580static const struct nv_ethtool_str nv_estats_str[] = { 581 { "tx_bytes" }, 582 { "tx_zero_rexmt" }, 583 { "tx_one_rexmt" }, 584 { "tx_many_rexmt" }, 585 { "tx_late_collision" }, 586 { "tx_fifo_errors" }, 587 { "tx_carrier_errors" }, 588 { "tx_excess_deferral" }, 589 { "tx_retry_error" }, 590 { "tx_deferral" }, 591 { "tx_packets" }, 592 { "tx_pause" }, 593 { "rx_frame_error" }, 594 { "rx_extra_byte" }, 595 { "rx_late_collision" }, 596 { "rx_runt" }, 597 { "rx_frame_too_long" }, 598 { "rx_over_errors" }, 599 { "rx_crc_errors" }, 600 { "rx_frame_align_error" }, 601 { "rx_length_error" }, 602 { "rx_unicast" }, 603 { "rx_multicast" }, 604 { "rx_broadcast" }, 605 { "rx_bytes" }, 606 { "rx_pause" }, 607 { "rx_drop_frame" }, 608 { "rx_packets" }, 609 { "rx_errors_total" } 610}; 611 612struct nv_ethtool_stats { 613 u64 tx_bytes; 614 u64 tx_zero_rexmt; 615 u64 tx_one_rexmt; 616 u64 tx_many_rexmt; 617 u64 tx_late_collision; 618 u64 tx_fifo_errors; 619 u64 tx_carrier_errors; 620 u64 tx_excess_deferral; 621 u64 tx_retry_error; 622 u64 tx_deferral; 623 u64 tx_packets; 624 u64 tx_pause; 625 u64 rx_frame_error; 626 u64 rx_extra_byte; 627 u64 rx_late_collision; 628 u64 rx_runt; 629 u64 rx_frame_too_long; 630 u64 rx_over_errors; 631 u64 rx_crc_errors; 632 u64 rx_frame_align_error; 633 u64 rx_length_error; 634 u64 rx_unicast; 635 u64 rx_multicast; 636 u64 rx_broadcast; 637 u64 rx_bytes; 638 u64 rx_pause; 639 u64 rx_drop_frame; 640 u64 rx_packets; 641 u64 rx_errors_total; 642}; 643 644/* diagnostics */ 645#define NV_TEST_COUNT_BASE 3 646#define NV_TEST_COUNT_EXTENDED 4 647 648static const struct nv_ethtool_str nv_etests_str[] = { 649 { "link (online/offline)" }, 650 { "register (offline) " }, 651 { "interrupt (offline) " }, 652 { "loopback (offline) " } 653}; 654 655struct register_test { 656 u32 reg; 657 u32 mask; 658}; 659 660static const struct register_test nv_registers_test[] = { 661 { NvRegUnknownSetupReg6, 0x01 }, 662 { NvRegMisc1, 0x03c }, 663 { NvRegOffloadConfig, 0x03ff }, 664 { NvRegMulticastAddrA, 0xffffffff }, 665 { NvRegTxWatermark, 0x0ff }, 666 { NvRegWakeUpFlags, 0x07777 }, 667 { 0,0 } 668}; 669 670/* 671 * SMP locking: 672 * All hardware access under dev->priv->lock, except the performance 673 * critical parts: 674 * - rx is (pseudo-) lockless: it relies on the single-threading provided 675 * by the arch code for interrupts. 676 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 677 * needs dev->priv->lock :-( 678 * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 679 */ 680 681/* in dev: base, irq */ 682struct fe_priv { 683 spinlock_t lock; 684 685 /* General data: 686 * Locking: spin_lock(&np->lock); */ 687 struct net_device_stats stats; 688 struct nv_ethtool_stats estats; 689 int in_shutdown; 690 u32 linkspeed; 691 int duplex; 692 int autoneg; 693 int fixed_mode; 694 int phyaddr; 695 int wolenabled; 696 unsigned int phy_oui; 697 u16 gigabit; 698 int intr_test; 699 700 /* General data: RO fields */ 701 dma_addr_t ring_addr; 702 struct pci_dev *pci_dev; 703 u32 orig_mac[2]; 704 u32 irqmask; 705 u32 desc_ver; 706 u32 txrxctl_bits; 707 u32 vlanctl_bits; 708 u32 driver_data; 709 u32 register_size; 710 711 void __iomem *base; 712 713 /* rx specific fields. 714 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 715 */ 716 ring_type rx_ring; 717 unsigned int cur_rx, refill_rx; 718 struct sk_buff **rx_skbuff; 719 dma_addr_t *rx_dma; 720 unsigned int rx_buf_sz; 721 unsigned int pkt_limit; 722 struct timer_list oom_kick; 723 struct timer_list nic_poll; 724 struct timer_list stats_poll; 725 u32 nic_poll_irq; 726 int rx_ring_size; 727 728 /* media detection workaround. 729 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 730 */ 731 int need_linktimer; 732 unsigned long link_timeout; 733 /* 734 * tx specific fields. 735 */ 736 ring_type tx_ring; 737 unsigned int next_tx, nic_tx; 738 struct sk_buff **tx_skbuff; 739 dma_addr_t *tx_dma; 740 unsigned int *tx_dma_len; 741 u32 tx_flags; 742 int tx_ring_size; 743 int tx_limit_start; 744 int tx_limit_stop; 745 746 /* vlan fields */ 747 struct vlan_group *vlangrp; 748 749 /* msi/msi-x fields */ 750 u32 msi_flags; 751 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; 752 753 /* flow control */ 754 u32 pause_flags; 755}; 756 757/* 758 * Maximum number of loops until we assume that a bit in the irq mask 759 * is stuck. Overridable with module param. 760 */ 761static int max_interrupt_work = 5; 762 763/* 764 * Optimization can be either throuput mode or cpu mode 765 * 766 * Throughput Mode: Every tx and rx packet will generate an interrupt. 767 * CPU Mode: Interrupts are controlled by a timer. 768 */ 769enum { 770 NV_OPTIMIZATION_MODE_THROUGHPUT, 771 NV_OPTIMIZATION_MODE_CPU 772}; 773static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; 774 775/* 776 * Poll interval for timer irq 777 * 778 * This interval determines how frequent an interrupt is generated. 779 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] 780 * Min = 0, and Max = 65535 781 */ 782static int poll_interval = -1; 783 784/* 785 * MSI interrupts 786 */ 787enum { 788 NV_MSI_INT_DISABLED, 789 NV_MSI_INT_ENABLED 790}; 791static int msi = NV_MSI_INT_ENABLED; 792 793/* 794 * MSIX interrupts 795 */ 796enum { 797 NV_MSIX_INT_DISABLED, 798 NV_MSIX_INT_ENABLED 799}; 800static int msix = NV_MSIX_INT_ENABLED; 801 802/* 803 * DMA 64bit 804 */ 805enum { 806 NV_DMA_64BIT_DISABLED, 807 NV_DMA_64BIT_ENABLED 808}; 809static int dma_64bit = NV_DMA_64BIT_ENABLED; 810 811static inline struct fe_priv *get_nvpriv(struct net_device *dev) 812{ 813 return netdev_priv(dev); 814} 815 816static inline u8 __iomem *get_hwbase(struct net_device *dev) 817{ 818 return ((struct fe_priv *)netdev_priv(dev))->base; 819} 820 821static inline void pci_push(u8 __iomem *base) 822{ 823 /* force out pending posted writes */ 824 readl(base); 825} 826 827static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 828{ 829 return le32_to_cpu(prd->FlagLen) 830 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 831} 832 833static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 834{ 835 return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2; 836} 837 838static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 839 int delay, int delaymax, const char *msg) 840{ 841 u8 __iomem *base = get_hwbase(dev); 842 843 pci_push(base); 844 do { 845 udelay(delay); 846 delaymax -= delay; 847 if (delaymax < 0) { 848 if (msg) 849 printk(msg); 850 return 1; 851 } 852 } while ((readl(base + offset) & mask) != target); 853 return 0; 854} 855 856#define NV_SETUP_RX_RING 0x01 857#define NV_SETUP_TX_RING 0x02 858 859static void setup_hw_rings(struct net_device *dev, int rxtx_flags) 860{ 861 struct fe_priv *np = get_nvpriv(dev); 862 u8 __iomem *base = get_hwbase(dev); 863 864 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 865 if (rxtx_flags & NV_SETUP_RX_RING) { 866 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); 867 } 868 if (rxtx_flags & NV_SETUP_TX_RING) { 869 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 870 } 871 } else { 872 if (rxtx_flags & NV_SETUP_RX_RING) { 873 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); 874 writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); 875 } 876 if (rxtx_flags & NV_SETUP_TX_RING) { 877 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); 878 writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); 879 } 880 } 881} 882 883static void free_rings(struct net_device *dev) 884{ 885 struct fe_priv *np = get_nvpriv(dev); 886 887 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 888 if(np->rx_ring.orig) 889 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 890 np->rx_ring.orig, np->ring_addr); 891 } else { 892 if (np->rx_ring.ex) 893 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 894 np->rx_ring.ex, np->ring_addr); 895 } 896 if (np->rx_skbuff) 897 kfree(np->rx_skbuff); 898 if (np->rx_dma) 899 kfree(np->rx_dma); 900 if (np->tx_skbuff) 901 kfree(np->tx_skbuff); 902 if (np->tx_dma) 903 kfree(np->tx_dma); 904 if (np->tx_dma_len) 905 kfree(np->tx_dma_len); 906} 907 908static int using_multi_irqs(struct net_device *dev) 909{ 910 struct fe_priv *np = get_nvpriv(dev); 911 912 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 913 ((np->msi_flags & NV_MSI_X_ENABLED) && 914 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) 915 return 0; 916 else 917 return 1; 918} 919 920static void nv_enable_irq(struct net_device *dev) 921{ 922 struct fe_priv *np = get_nvpriv(dev); 923 924 if (!using_multi_irqs(dev)) { 925 if (np->msi_flags & NV_MSI_X_ENABLED) 926 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 927 else 928 enable_irq(dev->irq); 929 } else { 930 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 931 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 932 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 933 } 934} 935 936static void nv_disable_irq(struct net_device *dev) 937{ 938 struct fe_priv *np = get_nvpriv(dev); 939 940 if (!using_multi_irqs(dev)) { 941 if (np->msi_flags & NV_MSI_X_ENABLED) 942 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 943 else 944 disable_irq(dev->irq); 945 } else { 946 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 947 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 948 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 949 } 950} 951 952/* In MSIX mode, a write to irqmask behaves as XOR */ 953static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) 954{ 955 u8 __iomem *base = get_hwbase(dev); 956 957 writel(mask, base + NvRegIrqMask); 958} 959 960static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) 961{ 962 struct fe_priv *np = get_nvpriv(dev); 963 u8 __iomem *base = get_hwbase(dev); 964 965 if (np->msi_flags & NV_MSI_X_ENABLED) { 966 writel(mask, base + NvRegIrqMask); 967 } else { 968 if (np->msi_flags & NV_MSI_ENABLED) 969 writel(0, base + NvRegMSIIrqMask); 970 writel(0, base + NvRegIrqMask); 971 } 972} 973 974#define MII_READ (-1) 975/* mii_rw: read/write a register on the PHY. 976 * 977 * Caller must guarantee serialization 978 */ 979static int mii_rw(struct net_device *dev, int addr, int miireg, int value) 980{ 981 u8 __iomem *base = get_hwbase(dev); 982 u32 reg; 983 int retval; 984 985 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 986 987 reg = readl(base + NvRegMIIControl); 988 if (reg & NVREG_MIICTL_INUSE) { 989 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); 990 udelay(NV_MIIBUSY_DELAY); 991 } 992 993 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; 994 if (value != MII_READ) { 995 writel(value, base + NvRegMIIData); 996 reg |= NVREG_MIICTL_WRITE; 997 } 998 writel(reg, base + NvRegMIIControl); 999 1000 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1001 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { 1002 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", 1003 dev->name, miireg, addr); 1004 retval = -1; 1005 } else if (value != MII_READ) { 1006 /* it was a write operation - fewer failures are detectable */ 1007 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", 1008 dev->name, value, miireg, addr); 1009 retval = 0; 1010 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1011 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", 1012 dev->name, miireg, addr); 1013 retval = -1; 1014 } else { 1015 retval = readl(base + NvRegMIIData); 1016 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", 1017 dev->name, miireg, addr, retval); 1018 } 1019 1020 return retval; 1021} 1022 1023static int phy_reset(struct net_device *dev) 1024{ 1025 struct fe_priv *np = netdev_priv(dev); 1026 u32 miicontrol; 1027 unsigned int tries = 0; 1028 1029 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1030 miicontrol |= BMCR_RESET; 1031 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1032 return -1; 1033 } 1034 1035 /* wait for 500ms */ 1036 msleep(500); 1037 1038 /* must wait till reset is deasserted */ 1039 while (miicontrol & BMCR_RESET) { 1040 msleep(10); 1041 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1042 /* FIXME: 100 tries seem excessive */ 1043 if (tries++ > 100) 1044 return -1; 1045 } 1046 return 0; 1047} 1048 1049static int phy_init(struct net_device *dev) 1050{ 1051 struct fe_priv *np = get_nvpriv(dev); 1052 u8 __iomem *base = get_hwbase(dev); 1053 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1054 1055 /* set advertise register */ 1056 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1057 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1058 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1059 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); 1060 return PHY_ERROR; 1061 } 1062 1063 /* get phy interface type */ 1064 phyinterface = readl(base + NvRegPhyInterface); 1065 1066 /* see if gigabit phy */ 1067 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1068 if (mii_status & PHY_GIGABIT) { 1069 np->gigabit = PHY_GIGABIT; 1070 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 1071 mii_control_1000 &= ~ADVERTISE_1000HALF; 1072 if (phyinterface & PHY_RGMII) 1073 mii_control_1000 |= ADVERTISE_1000FULL; 1074 else 1075 mii_control_1000 &= ~ADVERTISE_1000FULL; 1076 1077 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1078 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1079 return PHY_ERROR; 1080 } 1081 } 1082 else 1083 np->gigabit = 0; 1084 1085 /* reset the phy */ 1086 if (phy_reset(dev)) { 1087 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1088 return PHY_ERROR; 1089 } 1090 1091 /* phy vendor specific configuration */ 1092 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1093 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1094 phy_reserved &= ~(PHY_INIT1 | PHY_INIT2); 1095 phy_reserved |= (PHY_INIT3 | PHY_INIT4); 1096 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { 1097 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1098 return PHY_ERROR; 1099 } 1100 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1101 phy_reserved |= PHY_INIT5; 1102 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { 1103 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1104 return PHY_ERROR; 1105 } 1106 } 1107 if (np->phy_oui == PHY_OUI_CICADA) { 1108 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); 1109 phy_reserved |= PHY_INIT6; 1110 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { 1111 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1112 return PHY_ERROR; 1113 } 1114 } 1115 /* some phys clear out pause advertisment on reset, set it back */ 1116 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); 1117 1118 /* restart auto negotiation */ 1119 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1120 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1121 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1122 return PHY_ERROR; 1123 } 1124 1125 return 0; 1126} 1127 1128static void nv_start_rx(struct net_device *dev) 1129{ 1130 struct fe_priv *np = netdev_priv(dev); 1131 u8 __iomem *base = get_hwbase(dev); 1132 1133 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 1134 /* Already running? Stop it. */ 1135 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { 1136 writel(0, base + NvRegReceiverControl); 1137 pci_push(base); 1138 } 1139 writel(np->linkspeed, base + NvRegLinkSpeed); 1140 pci_push(base); 1141 writel(NVREG_RCVCTL_START, base + NvRegReceiverControl); 1142 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1143 dev->name, np->duplex, np->linkspeed); 1144 pci_push(base); 1145} 1146 1147static void nv_stop_rx(struct net_device *dev) 1148{ 1149 u8 __iomem *base = get_hwbase(dev); 1150 1151 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 1152 writel(0, base + NvRegReceiverControl); 1153 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1154 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1155 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1156 1157 udelay(NV_RXSTOP_DELAY2); 1158 writel(0, base + NvRegLinkSpeed); 1159} 1160 1161static void nv_start_tx(struct net_device *dev) 1162{ 1163 u8 __iomem *base = get_hwbase(dev); 1164 1165 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 1166 writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl); 1167 pci_push(base); 1168} 1169 1170static void nv_stop_tx(struct net_device *dev) 1171{ 1172 u8 __iomem *base = get_hwbase(dev); 1173 1174 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 1175 writel(0, base + NvRegTransmitterControl); 1176 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1177 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1178 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1179 1180 udelay(NV_TXSTOP_DELAY2); 1181 writel(0, base + NvRegUnknownTransmitterReg); 1182} 1183 1184static void nv_txrx_reset(struct net_device *dev) 1185{ 1186 struct fe_priv *np = netdev_priv(dev); 1187 u8 __iomem *base = get_hwbase(dev); 1188 1189 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); 1190 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1191 pci_push(base); 1192 udelay(NV_TXRX_RESET_DELAY); 1193 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1194 pci_push(base); 1195} 1196 1197static void nv_mac_reset(struct net_device *dev) 1198{ 1199 struct fe_priv *np = netdev_priv(dev); 1200 u8 __iomem *base = get_hwbase(dev); 1201 1202 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); 1203 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1204 pci_push(base); 1205 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); 1206 pci_push(base); 1207 udelay(NV_MAC_RESET_DELAY); 1208 writel(0, base + NvRegMacReset); 1209 pci_push(base); 1210 udelay(NV_MAC_RESET_DELAY); 1211 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1212 pci_push(base); 1213} 1214 1215/* 1216 * nv_get_stats: dev->get_stats function 1217 * Get latest stats value from the nic. 1218 * Called with read_lock(&dev_base_lock) held for read - 1219 * only synchronized against unregister_netdevice. 1220 */ 1221static struct net_device_stats *nv_get_stats(struct net_device *dev) 1222{ 1223 struct fe_priv *np = netdev_priv(dev); 1224 1225 /* It seems that the nic always generates interrupts and doesn't 1226 * accumulate errors internally. Thus the current values in np->stats 1227 * are already up to date. 1228 */ 1229 return &np->stats; 1230} 1231 1232/* 1233 * nv_alloc_rx: fill rx ring entries. 1234 * Return 1 if the allocations for the skbs failed and the 1235 * rx engine is without Available descriptors 1236 */ 1237static int nv_alloc_rx(struct net_device *dev) 1238{ 1239 struct fe_priv *np = netdev_priv(dev); 1240 unsigned int refill_rx = np->refill_rx; 1241 int nr; 1242 1243 while (np->cur_rx != refill_rx) { 1244 struct sk_buff *skb; 1245 1246 nr = refill_rx % np->rx_ring_size; 1247 if (np->rx_skbuff[nr] == NULL) { 1248 1249 skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1250 if (!skb) 1251 break; 1252 1253 skb->dev = dev; 1254 np->rx_skbuff[nr] = skb; 1255 } else { 1256 skb = np->rx_skbuff[nr]; 1257 } 1258 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, 1259 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1260 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1261 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); 1262 wmb(); 1263 np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1264 } else { 1265 np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; 1266 np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; 1267 wmb(); 1268 np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1269 } 1270 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", 1271 dev->name, refill_rx); 1272 refill_rx++; 1273 } 1274 np->refill_rx = refill_rx; 1275 if (np->cur_rx - refill_rx == np->rx_ring_size) 1276 return 1; 1277 return 0; 1278} 1279 1280static void nv_do_rx_refill(unsigned long data) 1281{ 1282 struct net_device *dev = (struct net_device *) data; 1283 struct fe_priv *np = netdev_priv(dev); 1284 1285 if (!using_multi_irqs(dev)) { 1286 if (np->msi_flags & NV_MSI_X_ENABLED) 1287 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1288 else 1289 disable_irq(dev->irq); 1290 } else { 1291 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1292 } 1293 if (nv_alloc_rx(dev)) { 1294 spin_lock_irq(&np->lock); 1295 if (!np->in_shutdown) 1296 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1297 spin_unlock_irq(&np->lock); 1298 } 1299 if (!using_multi_irqs(dev)) { 1300 if (np->msi_flags & NV_MSI_X_ENABLED) 1301 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1302 else 1303 enable_irq(dev->irq); 1304 } else { 1305 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1306 } 1307} 1308 1309static void nv_init_rx(struct net_device *dev) 1310{ 1311 struct fe_priv *np = netdev_priv(dev); 1312 int i; 1313 1314 np->cur_rx = np->rx_ring_size; 1315 np->refill_rx = 0; 1316 for (i = 0; i < np->rx_ring_size; i++) 1317 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1318 np->rx_ring.orig[i].FlagLen = 0; 1319 else 1320 np->rx_ring.ex[i].FlagLen = 0; 1321} 1322 1323static void nv_init_tx(struct net_device *dev) 1324{ 1325 struct fe_priv *np = netdev_priv(dev); 1326 int i; 1327 1328 np->next_tx = np->nic_tx = 0; 1329 for (i = 0; i < np->tx_ring_size; i++) { 1330 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1331 np->tx_ring.orig[i].FlagLen = 0; 1332 else 1333 np->tx_ring.ex[i].FlagLen = 0; 1334 np->tx_skbuff[i] = NULL; 1335 np->tx_dma[i] = 0; 1336 } 1337} 1338 1339static int nv_init_ring(struct net_device *dev) 1340{ 1341 nv_init_tx(dev); 1342 nv_init_rx(dev); 1343 return nv_alloc_rx(dev); 1344} 1345 1346static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) 1347{ 1348 struct fe_priv *np = netdev_priv(dev); 1349 1350 dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n", 1351 dev->name, skbnr); 1352 1353 if (np->tx_dma[skbnr]) { 1354 pci_unmap_page(np->pci_dev, np->tx_dma[skbnr], 1355 np->tx_dma_len[skbnr], 1356 PCI_DMA_TODEVICE); 1357 np->tx_dma[skbnr] = 0; 1358 } 1359 1360 if (np->tx_skbuff[skbnr]) { 1361 dev_kfree_skb_any(np->tx_skbuff[skbnr]); 1362 np->tx_skbuff[skbnr] = NULL; 1363 return 1; 1364 } else { 1365 return 0; 1366 } 1367} 1368 1369static void nv_drain_tx(struct net_device *dev) 1370{ 1371 struct fe_priv *np = netdev_priv(dev); 1372 unsigned int i; 1373 1374 for (i = 0; i < np->tx_ring_size; i++) { 1375 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1376 np->tx_ring.orig[i].FlagLen = 0; 1377 else 1378 np->tx_ring.ex[i].FlagLen = 0; 1379 if (nv_release_txskb(dev, i)) 1380 np->stats.tx_dropped++; 1381 } 1382} 1383 1384static void nv_drain_rx(struct net_device *dev) 1385{ 1386 struct fe_priv *np = netdev_priv(dev); 1387 int i; 1388 for (i = 0; i < np->rx_ring_size; i++) { 1389 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1390 np->rx_ring.orig[i].FlagLen = 0; 1391 else 1392 np->rx_ring.ex[i].FlagLen = 0; 1393 wmb(); 1394 if (np->rx_skbuff[i]) { 1395 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1396 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, 1397 PCI_DMA_FROMDEVICE); 1398 dev_kfree_skb(np->rx_skbuff[i]); 1399 np->rx_skbuff[i] = NULL; 1400 } 1401 } 1402} 1403 1404static void drain_ring(struct net_device *dev) 1405{ 1406 nv_drain_tx(dev); 1407 nv_drain_rx(dev); 1408} 1409 1410/* 1411 * nv_start_xmit: dev->hard_start_xmit function 1412 * Called with netif_tx_lock held. 1413 */ 1414static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 1415{ 1416 struct fe_priv *np = netdev_priv(dev); 1417 u32 tx_flags = 0; 1418 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 1419 unsigned int fragments = skb_shinfo(skb)->nr_frags; 1420 unsigned int nr = (np->next_tx - 1) % np->tx_ring_size; 1421 unsigned int start_nr = np->next_tx % np->tx_ring_size; 1422 unsigned int i; 1423 u32 offset = 0; 1424 u32 bcnt; 1425 u32 size = skb->len-skb->data_len; 1426 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1427 u32 tx_flags_vlan = 0; 1428 1429 /* add fragments to entries count */ 1430 for (i = 0; i < fragments; i++) { 1431 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 1432 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1433 } 1434 1435 spin_lock_irq(&np->lock); 1436 1437 if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) { 1438 spin_unlock_irq(&np->lock); 1439 netif_stop_queue(dev); 1440 return NETDEV_TX_BUSY; 1441 } 1442 1443 /* setup the header buffer */ 1444 do { 1445 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1446 nr = (nr + 1) % np->tx_ring_size; 1447 1448 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 1449 PCI_DMA_TODEVICE); 1450 np->tx_dma_len[nr] = bcnt; 1451 1452 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1453 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1454 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1455 } else { 1456 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1457 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1458 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1459 } 1460 tx_flags = np->tx_flags; 1461 offset += bcnt; 1462 size -= bcnt; 1463 } while(size); 1464 1465 /* setup the fragments */ 1466 for (i = 0; i < fragments; i++) { 1467 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1468 u32 size = frag->size; 1469 offset = 0; 1470 1471 do { 1472 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1473 nr = (nr + 1) % np->tx_ring_size; 1474 1475 np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 1476 PCI_DMA_TODEVICE); 1477 np->tx_dma_len[nr] = bcnt; 1478 1479 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1480 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1481 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1482 } else { 1483 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1484 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1485 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1486 } 1487 offset += bcnt; 1488 size -= bcnt; 1489 } while (size); 1490 } 1491 1492 /* set last fragment flag */ 1493 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1494 np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1495 } else { 1496 np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1497 } 1498 1499 np->tx_skbuff[nr] = skb; 1500 1501#ifdef NETIF_F_TSO 1502 if (skb_is_gso(skb)) 1503 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1504 else 1505#endif 1506 tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); 1507 1508 /* vlan tag */ 1509 if (np->vlangrp && vlan_tx_tag_present(skb)) { 1510 tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); 1511 } 1512 1513 /* set tx flags */ 1514 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1515 np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1516 } else { 1517 np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); 1518 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1519 } 1520 1521 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", 1522 dev->name, np->next_tx, entries, tx_flags_extra); 1523 { 1524 int j; 1525 for (j=0; j<64; j++) { 1526 if ((j%16) == 0) 1527 dprintk("\n%03x:", j); 1528 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 1529 } 1530 dprintk("\n"); 1531 } 1532 1533 np->next_tx += entries; 1534 1535 dev->trans_start = jiffies; 1536 spin_unlock_irq(&np->lock); 1537 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 1538 pci_push(get_hwbase(dev)); 1539 return NETDEV_TX_OK; 1540} 1541 1542/* 1543 * nv_tx_done: check for completed packets, release the skbs. 1544 * 1545 * Caller must own np->lock. 1546 */ 1547static void nv_tx_done(struct net_device *dev) 1548{ 1549 struct fe_priv *np = netdev_priv(dev); 1550 u32 Flags; 1551 unsigned int i; 1552 struct sk_buff *skb; 1553 1554 while (np->nic_tx != np->next_tx) { 1555 i = np->nic_tx % np->tx_ring_size; 1556 1557 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1558 Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); 1559 else 1560 Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); 1561 1562 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", 1563 dev->name, np->nic_tx, Flags); 1564 if (Flags & NV_TX_VALID) 1565 break; 1566 if (np->desc_ver == DESC_VER_1) { 1567 if (Flags & NV_TX_LASTPACKET) { 1568 skb = np->tx_skbuff[i]; 1569 if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| 1570 NV_TX_UNDERFLOW|NV_TX_ERROR)) { 1571 if (Flags & NV_TX_UNDERFLOW) 1572 np->stats.tx_fifo_errors++; 1573 if (Flags & NV_TX_CARRIERLOST) 1574 np->stats.tx_carrier_errors++; 1575 np->stats.tx_errors++; 1576 } else { 1577 np->stats.tx_packets++; 1578 np->stats.tx_bytes += skb->len; 1579 } 1580 } 1581 } else { 1582 if (Flags & NV_TX2_LASTPACKET) { 1583 skb = np->tx_skbuff[i]; 1584 if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| 1585 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { 1586 if (Flags & NV_TX2_UNDERFLOW) 1587 np->stats.tx_fifo_errors++; 1588 if (Flags & NV_TX2_CARRIERLOST) 1589 np->stats.tx_carrier_errors++; 1590 np->stats.tx_errors++; 1591 } else { 1592 np->stats.tx_packets++; 1593 np->stats.tx_bytes += skb->len; 1594 } 1595 } 1596 } 1597 nv_release_txskb(dev, i); 1598 np->nic_tx++; 1599 } 1600 if (np->next_tx - np->nic_tx < np->tx_limit_start) 1601 netif_wake_queue(dev); 1602} 1603 1604/* 1605 * nv_tx_timeout: dev->tx_timeout function 1606 * Called with netif_tx_lock held. 1607 */ 1608static void nv_tx_timeout(struct net_device *dev) 1609{ 1610 struct fe_priv *np = netdev_priv(dev); 1611 u8 __iomem *base = get_hwbase(dev); 1612 u32 status; 1613 1614 if (np->msi_flags & NV_MSI_X_ENABLED) 1615 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 1616 else 1617 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 1618 1619 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); 1620 1621 { 1622 int i; 1623 1624 printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n", 1625 dev->name, (unsigned long)np->ring_addr, 1626 np->next_tx, np->nic_tx); 1627 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 1628 for (i=0;i<=np->register_size;i+= 32) { 1629 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 1630 i, 1631 readl(base + i + 0), readl(base + i + 4), 1632 readl(base + i + 8), readl(base + i + 12), 1633 readl(base + i + 16), readl(base + i + 20), 1634 readl(base + i + 24), readl(base + i + 28)); 1635 } 1636 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 1637 for (i=0;i<np->tx_ring_size;i+= 4) { 1638 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1639 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 1640 i, 1641 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), 1642 le32_to_cpu(np->tx_ring.orig[i].FlagLen), 1643 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), 1644 le32_to_cpu(np->tx_ring.orig[i+1].FlagLen), 1645 le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer), 1646 le32_to_cpu(np->tx_ring.orig[i+2].FlagLen), 1647 le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer), 1648 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); 1649 } else { 1650 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 1651 i, 1652 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), 1653 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), 1654 le32_to_cpu(np->tx_ring.ex[i].FlagLen), 1655 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh), 1656 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow), 1657 le32_to_cpu(np->tx_ring.ex[i+1].FlagLen), 1658 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh), 1659 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow), 1660 le32_to_cpu(np->tx_ring.ex[i+2].FlagLen), 1661 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh), 1662 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow), 1663 le32_to_cpu(np->tx_ring.ex[i+3].FlagLen)); 1664 } 1665 } 1666 } 1667 1668 spin_lock_irq(&np->lock); 1669 1670 /* 1) stop tx engine */ 1671 nv_stop_tx(dev); 1672 1673 /* 2) check that the packets were not sent already: */ 1674 nv_tx_done(dev); 1675 1676 /* 3) if there are dead entries: clear everything */ 1677 if (np->next_tx != np->nic_tx) { 1678 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 1679 nv_drain_tx(dev); 1680 np->next_tx = np->nic_tx = 0; 1681 setup_hw_rings(dev, NV_SETUP_TX_RING); 1682 netif_wake_queue(dev); 1683 } 1684 1685 /* 4) restart tx engine */ 1686 nv_start_tx(dev); 1687 spin_unlock_irq(&np->lock); 1688} 1689 1690/* 1691 * Called when the nic notices a mismatch between the actual data len on the 1692 * wire and the len indicated in the 802 header 1693 */ 1694static int nv_getlen(struct net_device *dev, void *packet, int datalen) 1695{ 1696 int hdrlen; /* length of the 802 header */ 1697 int protolen; /* length as stored in the proto field */ 1698 1699 /* 1) calculate len according to header */ 1700 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) { 1701 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 1702 hdrlen = VLAN_HLEN; 1703 } else { 1704 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 1705 hdrlen = ETH_HLEN; 1706 } 1707 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", 1708 dev->name, datalen, protolen, hdrlen); 1709 if (protolen > ETH_DATA_LEN) 1710 return datalen; /* Value in proto field not a len, no checks possible */ 1711 1712 protolen += hdrlen; 1713 /* consistency checks: */ 1714 if (datalen > ETH_ZLEN) { 1715 if (datalen >= protolen) { 1716 /* more data on wire than in 802 header, trim of 1717 * additional data. 1718 */ 1719 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 1720 dev->name, protolen); 1721 return protolen; 1722 } else { 1723 /* less data on wire than mentioned in header. 1724 * Discard the packet. 1725 */ 1726 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", 1727 dev->name); 1728 return -1; 1729 } 1730 } else { 1731 /* short packet. Accept only if 802 values are also short */ 1732 if (protolen > ETH_ZLEN) { 1733 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", 1734 dev->name); 1735 return -1; 1736 } 1737 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 1738 dev->name, datalen); 1739 return datalen; 1740 } 1741} 1742 1743static void nv_rx_process(struct net_device *dev) 1744{ 1745 struct fe_priv *np = netdev_priv(dev); 1746 u32 Flags; 1747 u32 vlanflags = 0; 1748 1749 for (;;) { 1750 struct sk_buff *skb; 1751 int len; 1752 int i; 1753 if (np->cur_rx - np->refill_rx >= np->rx_ring_size) 1754 break; /* we scanned the whole ring - do not continue */ 1755 1756 i = np->cur_rx % np->rx_ring_size; 1757 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1758 Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); 1759 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); 1760 } else { 1761 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); 1762 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); 1763 vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); 1764 } 1765 1766 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", 1767 dev->name, np->cur_rx, Flags); 1768 1769 if (Flags & NV_RX_AVAIL) 1770 break; /* still owned by hardware, */ 1771 1772 /* 1773 * the packet is for us - immediately tear down the pci mapping. 1774 * TODO: check if a prefetch of the first cacheline improves 1775 * the performance. 1776 */ 1777 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1778 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, 1779 PCI_DMA_FROMDEVICE); 1780 1781 { 1782 int j; 1783 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags); 1784 for (j=0; j<64; j++) { 1785 if ((j%16) == 0) 1786 dprintk("\n%03x:", j); 1787 dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]); 1788 } 1789 dprintk("\n"); 1790 } 1791 /* look at what we actually got: */ 1792 if (np->desc_ver == DESC_VER_1) { 1793 if (!(Flags & NV_RX_DESCRIPTORVALID)) 1794 goto next_pkt; 1795 1796 if (Flags & NV_RX_ERROR) { 1797 if (Flags & NV_RX_MISSEDFRAME) { 1798 np->stats.rx_missed_errors++; 1799 np->stats.rx_errors++; 1800 goto next_pkt; 1801 } 1802 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { 1803 np->stats.rx_errors++; 1804 goto next_pkt; 1805 } 1806 if (Flags & NV_RX_CRCERR) { 1807 np->stats.rx_crc_errors++; 1808 np->stats.rx_errors++; 1809 goto next_pkt; 1810 } 1811 if (Flags & NV_RX_OVERFLOW) { 1812 np->stats.rx_over_errors++; 1813 np->stats.rx_errors++; 1814 goto next_pkt; 1815 } 1816 if (Flags & NV_RX_ERROR4) { 1817 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1818 if (len < 0) { 1819 np->stats.rx_errors++; 1820 goto next_pkt; 1821 } 1822 } 1823 /* framing errors are soft errors. */ 1824 if (Flags & NV_RX_FRAMINGERR) { 1825 if (Flags & NV_RX_SUBSTRACT1) { 1826 len--; 1827 } 1828 } 1829 } 1830 } else { 1831 if (!(Flags & NV_RX2_DESCRIPTORVALID)) 1832 goto next_pkt; 1833 1834 if (Flags & NV_RX2_ERROR) { 1835 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { 1836 np->stats.rx_errors++; 1837 goto next_pkt; 1838 } 1839 if (Flags & NV_RX2_CRCERR) { 1840 np->stats.rx_crc_errors++; 1841 np->stats.rx_errors++; 1842 goto next_pkt; 1843 } 1844 if (Flags & NV_RX2_OVERFLOW) { 1845 np->stats.rx_over_errors++; 1846 np->stats.rx_errors++; 1847 goto next_pkt; 1848 } 1849 if (Flags & NV_RX2_ERROR4) { 1850 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1851 if (len < 0) { 1852 np->stats.rx_errors++; 1853 goto next_pkt; 1854 } 1855 } 1856 /* framing errors are soft errors */ 1857 if (Flags & NV_RX2_FRAMINGERR) { 1858 if (Flags & NV_RX2_SUBSTRACT1) { 1859 len--; 1860 } 1861 } 1862 } 1863 if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) { 1864 Flags &= NV_RX2_CHECKSUMMASK; 1865 if (Flags == NV_RX2_CHECKSUMOK1 || 1866 Flags == NV_RX2_CHECKSUMOK2 || 1867 Flags == NV_RX2_CHECKSUMOK3) { 1868 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); 1869 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; 1870 } else { 1871 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name); 1872 } 1873 } 1874 } 1875 /* got a valid packet - forward it to the network core */ 1876 skb = np->rx_skbuff[i]; 1877 np->rx_skbuff[i] = NULL; 1878 1879 skb_put(skb, len); 1880 skb->protocol = eth_type_trans(skb, dev); 1881 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", 1882 dev->name, np->cur_rx, len, skb->protocol); 1883 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) { 1884 vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); 1885 } else { 1886 netif_rx(skb); 1887 } 1888 dev->last_rx = jiffies; 1889 np->stats.rx_packets++; 1890 np->stats.rx_bytes += len; 1891next_pkt: 1892 np->cur_rx++; 1893 } 1894} 1895 1896static void set_bufsize(struct net_device *dev) 1897{ 1898 struct fe_priv *np = netdev_priv(dev); 1899 1900 if (dev->mtu <= ETH_DATA_LEN) 1901 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; 1902 else 1903 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; 1904} 1905 1906/* 1907 * nv_change_mtu: dev->change_mtu function 1908 * Called with dev_base_lock held for read. 1909 */ 1910static int nv_change_mtu(struct net_device *dev, int new_mtu) 1911{ 1912 struct fe_priv *np = netdev_priv(dev); 1913 int old_mtu; 1914 1915 if (new_mtu < 64 || new_mtu > np->pkt_limit) 1916 return -EINVAL; 1917 1918 old_mtu = dev->mtu; 1919 dev->mtu = new_mtu; 1920 1921 /* return early if the buffer sizes will not change */ 1922 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) 1923 return 0; 1924 if (old_mtu == new_mtu) 1925 return 0; 1926 1927 /* synchronized against open : rtnl_lock() held by caller */ 1928 if (netif_running(dev)) { 1929 u8 __iomem *base = get_hwbase(dev); 1930 /* 1931 * It seems that the nic preloads valid ring entries into an 1932 * internal buffer. The procedure for flushing everything is 1933 * guessed, there is probably a simpler approach. 1934 * Changing the MTU is a rare event, it shouldn't matter. 1935 */ 1936 nv_disable_irq(dev); 1937 netif_tx_lock_bh(dev); 1938 spin_lock(&np->lock); 1939 /* stop engines */ 1940 nv_stop_rx(dev); 1941 nv_stop_tx(dev); 1942 nv_txrx_reset(dev); 1943 /* drain rx queue */ 1944 nv_drain_rx(dev); 1945 nv_drain_tx(dev); 1946 /* reinit driver view of the rx queue */ 1947 set_bufsize(dev); 1948 if (nv_init_ring(dev)) { 1949 if (!np->in_shutdown) 1950 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1951 } 1952 /* reinit nic view of the rx queue */ 1953 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 1954 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 1955 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 1956 base + NvRegRingSizes); 1957 pci_push(base); 1958 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 1959 pci_push(base); 1960 1961 /* restart rx engine */ 1962 nv_start_rx(dev); 1963 nv_start_tx(dev); 1964 spin_unlock(&np->lock); 1965 netif_tx_unlock_bh(dev); 1966 nv_enable_irq(dev); 1967 } 1968 return 0; 1969} 1970 1971static void nv_copy_mac_to_hw(struct net_device *dev) 1972{ 1973 u8 __iomem *base = get_hwbase(dev); 1974 u32 mac[2]; 1975 1976 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 1977 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 1978 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 1979 1980 writel(mac[0], base + NvRegMacAddrA); 1981 writel(mac[1], base + NvRegMacAddrB); 1982} 1983 1984/* 1985 * nv_set_mac_address: dev->set_mac_address function 1986 * Called with rtnl_lock() held. 1987 */ 1988static int nv_set_mac_address(struct net_device *dev, void *addr) 1989{ 1990 struct fe_priv *np = netdev_priv(dev); 1991 struct sockaddr *macaddr = (struct sockaddr*)addr; 1992 1993 if(!is_valid_ether_addr(macaddr->sa_data)) 1994 return -EADDRNOTAVAIL; 1995 1996 /* synchronized against open : rtnl_lock() held by caller */ 1997 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 1998 1999 if (netif_running(dev)) { 2000 netif_tx_lock_bh(dev); 2001 spin_lock_irq(&np->lock); 2002 2003 /* stop rx engine */ 2004 nv_stop_rx(dev); 2005 2006 /* set mac address */ 2007 nv_copy_mac_to_hw(dev); 2008 2009 /* restart rx engine */ 2010 nv_start_rx(dev); 2011 spin_unlock_irq(&np->lock); 2012 netif_tx_unlock_bh(dev); 2013 } else { 2014 nv_copy_mac_to_hw(dev); 2015 } 2016 return 0; 2017} 2018 2019/* 2020 * nv_set_multicast: dev->set_multicast function 2021 * Called with netif_tx_lock held. 2022 */ 2023static void nv_set_multicast(struct net_device *dev) 2024{ 2025 struct fe_priv *np = netdev_priv(dev); 2026 u8 __iomem *base = get_hwbase(dev); 2027 u32 addr[2]; 2028 u32 mask[2]; 2029 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; 2030 2031 memset(addr, 0, sizeof(addr)); 2032 memset(mask, 0, sizeof(mask)); 2033 2034 if (dev->flags & IFF_PROMISC) { 2035 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); 2036 pff |= NVREG_PFF_PROMISC; 2037 } else { 2038 pff |= NVREG_PFF_MYADDR; 2039 2040 if (dev->flags & IFF_ALLMULTI || dev->mc_list) { 2041 u32 alwaysOff[2]; 2042 u32 alwaysOn[2]; 2043 2044 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; 2045 if (dev->flags & IFF_ALLMULTI) { 2046 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 2047 } else { 2048 struct dev_mc_list *walk; 2049 2050 walk = dev->mc_list; 2051 while (walk != NULL) { 2052 u32 a, b; 2053 a = le32_to_cpu(*(u32 *) walk->dmi_addr); 2054 b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4])); 2055 alwaysOn[0] &= a; 2056 alwaysOff[0] &= ~a; 2057 alwaysOn[1] &= b; 2058 alwaysOff[1] &= ~b; 2059 walk = walk->next; 2060 } 2061 } 2062 addr[0] = alwaysOn[0]; 2063 addr[1] = alwaysOn[1]; 2064 mask[0] = alwaysOn[0] | alwaysOff[0]; 2065 mask[1] = alwaysOn[1] | alwaysOff[1]; 2066 } 2067 } 2068 addr[0] |= NVREG_MCASTADDRA_FORCE; 2069 pff |= NVREG_PFF_ALWAYS; 2070 spin_lock_irq(&np->lock); 2071 nv_stop_rx(dev); 2072 writel(addr[0], base + NvRegMulticastAddrA); 2073 writel(addr[1], base + NvRegMulticastAddrB); 2074 writel(mask[0], base + NvRegMulticastMaskA); 2075 writel(mask[1], base + NvRegMulticastMaskB); 2076 writel(pff, base + NvRegPacketFilterFlags); 2077 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", 2078 dev->name); 2079 nv_start_rx(dev); 2080 spin_unlock_irq(&np->lock); 2081} 2082 2083static void nv_update_pause(struct net_device *dev, u32 pause_flags) 2084{ 2085 struct fe_priv *np = netdev_priv(dev); 2086 u8 __iomem *base = get_hwbase(dev); 2087 2088 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); 2089 2090 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { 2091 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; 2092 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { 2093 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); 2094 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2095 } else { 2096 writel(pff, base + NvRegPacketFilterFlags); 2097 } 2098 } 2099 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 2100 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 2101 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 2102 writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame); 2103 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 2104 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2105 } else { 2106 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 2107 writel(regmisc, base + NvRegMisc1); 2108 } 2109 } 2110} 2111 2112/** 2113 * nv_update_linkspeed: Setup the MAC according to the link partner 2114 * @dev: Network device to be configured 2115 * 2116 * The function queries the PHY and checks if there is a link partner. 2117 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is 2118 * set to 10 MBit HD. 2119 * 2120 * The function returns 0 if there is no link partner and 1 if there is 2121 * a good link partner. 2122 */ 2123static int nv_update_linkspeed(struct net_device *dev) 2124{ 2125 struct fe_priv *np = netdev_priv(dev); 2126 u8 __iomem *base = get_hwbase(dev); 2127 int adv = 0; 2128 int lpa = 0; 2129 int adv_lpa, adv_pause, lpa_pause; 2130 int newls = np->linkspeed; 2131 int newdup = np->duplex; 2132 int mii_status; 2133 int retval = 0; 2134 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 2135 2136 /* BMSR_LSTATUS is latched, read it twice: 2137 * we want the current value. 2138 */ 2139 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 2140 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 2141 2142 if (!(mii_status & BMSR_LSTATUS)) { 2143 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", 2144 dev->name); 2145 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2146 newdup = 0; 2147 retval = 0; 2148 goto set_speed; 2149 } 2150 2151 if (np->autoneg == 0) { 2152 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", 2153 dev->name, np->fixed_mode); 2154 if (np->fixed_mode & LPA_100FULL) { 2155 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2156 newdup = 1; 2157 } else if (np->fixed_mode & LPA_100HALF) { 2158 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2159 newdup = 0; 2160 } else if (np->fixed_mode & LPA_10FULL) { 2161 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2162 newdup = 1; 2163 } else { 2164 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2165 newdup = 0; 2166 } 2167 retval = 1; 2168 goto set_speed; 2169 } 2170 /* check auto negotiation is complete */ 2171 if (!(mii_status & BMSR_ANEGCOMPLETE)) { 2172 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ 2173 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2174 newdup = 0; 2175 retval = 0; 2176 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); 2177 goto set_speed; 2178 } 2179 2180 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 2181 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 2182 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", 2183 dev->name, adv, lpa); 2184 2185 retval = 1; 2186 if (np->gigabit == PHY_GIGABIT) { 2187 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 2188 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); 2189 2190 if ((control_1000 & ADVERTISE_1000FULL) && 2191 (status_1000 & LPA_1000FULL)) { 2192 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", 2193 dev->name); 2194 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 2195 newdup = 1; 2196 goto set_speed; 2197 } 2198 } 2199 2200 /* FIXME: handle parallel detection properly */ 2201 adv_lpa = lpa & adv; 2202 if (adv_lpa & LPA_100FULL) { 2203 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2204 newdup = 1; 2205 } else if (adv_lpa & LPA_100HALF) { 2206 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2207 newdup = 0; 2208 } else if (adv_lpa & LPA_10FULL) { 2209 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2210 newdup = 1; 2211 } else if (adv_lpa & LPA_10HALF) { 2212 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2213 newdup = 0; 2214 } else { 2215 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); 2216 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2217 newdup = 0; 2218 } 2219 2220set_speed: 2221 if (np->duplex == newdup && np->linkspeed == newls) 2222 return retval; 2223 2224 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", 2225 dev->name, np->linkspeed, np->duplex, newls, newdup); 2226 2227 np->duplex = newdup; 2228 np->linkspeed = newls; 2229 2230 if (np->gigabit == PHY_GIGABIT) { 2231 phyreg = readl(base + NvRegRandomSeed); 2232 phyreg &= ~(0x3FF00); 2233 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) 2234 phyreg |= NVREG_RNDSEED_FORCE3; 2235 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) 2236 phyreg |= NVREG_RNDSEED_FORCE2; 2237 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 2238 phyreg |= NVREG_RNDSEED_FORCE; 2239 writel(phyreg, base + NvRegRandomSeed); 2240 } 2241 2242 phyreg = readl(base + NvRegPhyInterface); 2243 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 2244 if (np->duplex == 0) 2245 phyreg |= PHY_HALF; 2246 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 2247 phyreg |= PHY_100; 2248 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2249 phyreg |= PHY_1000; 2250 writel(phyreg, base + NvRegPhyInterface); 2251 2252 if (phyreg & PHY_RGMII) { 2253 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2254 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 2255 else 2256 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 2257 } else { 2258 txreg = NVREG_TX_DEFERRAL_DEFAULT; 2259 } 2260 writel(txreg, base + NvRegTxDeferral); 2261 2262 if (np->desc_ver == DESC_VER_1) { 2263 txreg = NVREG_TX_WM_DESC1_DEFAULT; 2264 } else { 2265 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2266 txreg = NVREG_TX_WM_DESC2_3_1000; 2267 else 2268 txreg = NVREG_TX_WM_DESC2_3_DEFAULT; 2269 } 2270 writel(txreg, base + NvRegTxWatermark); 2271 2272 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 2273 base + NvRegMisc1); 2274 pci_push(base); 2275 writel(np->linkspeed, base + NvRegLinkSpeed); 2276 pci_push(base); 2277 2278 pause_flags = 0; 2279 /* setup pause frame */ 2280 if (np->duplex != 0) { 2281 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 2282 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 2283 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 2284 2285 switch (adv_pause) { 2286 case (ADVERTISE_PAUSE_CAP): 2287 if (lpa_pause & LPA_PAUSE_CAP) { 2288 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2289 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2290 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2291 } 2292 break; 2293 case (ADVERTISE_PAUSE_ASYM): 2294 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 2295 { 2296 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2297 } 2298 break; 2299 case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM): 2300 if (lpa_pause & LPA_PAUSE_CAP) 2301 { 2302 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2303 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2304 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2305 } 2306 if (lpa_pause == LPA_PAUSE_ASYM) 2307 { 2308 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2309 } 2310 break; 2311 } 2312 } else { 2313 pause_flags = np->pause_flags; 2314 } 2315 } 2316 nv_update_pause(dev, pause_flags); 2317 2318 return retval; 2319} 2320 2321static void nv_linkchange(struct net_device *dev) 2322{ 2323 if (nv_update_linkspeed(dev)) { 2324 if (!netif_carrier_ok(dev)) { 2325 netif_carrier_on(dev); 2326 printk(KERN_INFO "%s: link up.\n", dev->name); 2327 nv_start_rx(dev); 2328 } 2329 } else { 2330 if (netif_carrier_ok(dev)) { 2331 netif_carrier_off(dev); 2332 printk(KERN_INFO "%s: link down.\n", dev->name); 2333 nv_stop_rx(dev); 2334 } 2335 } 2336} 2337 2338static void nv_link_irq(struct net_device *dev) 2339{ 2340 u8 __iomem *base = get_hwbase(dev); 2341 u32 miistat; 2342 2343 miistat = readl(base + NvRegMIIStatus); 2344 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 2345 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); 2346 2347 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 2348 nv_linkchange(dev); 2349 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); 2350} 2351 2352static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) 2353{ 2354 struct net_device *dev = (struct net_device *) data; 2355 struct fe_priv *np = netdev_priv(dev); 2356 u8 __iomem *base = get_hwbase(dev); 2357 u32 events; 2358 int i; 2359 2360 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 2361 2362 for (i=0; ; i++) { 2363 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 2364 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2365 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 2366 } else { 2367 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2368 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 2369 } 2370 pci_push(base); 2371 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2372 if (!(events & np->irqmask)) 2373 break; 2374 2375 spin_lock(&np->lock); 2376 nv_tx_done(dev); 2377 spin_unlock(&np->lock); 2378 2379 nv_rx_process(dev); 2380 if (nv_alloc_rx(dev)) { 2381 spin_lock(&np->lock); 2382 if (!np->in_shutdown) 2383 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2384 spin_unlock(&np->lock); 2385 } 2386 2387 if (events & NVREG_IRQ_LINK) { 2388 spin_lock(&np->lock); 2389 nv_link_irq(dev); 2390 spin_unlock(&np->lock); 2391 } 2392 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2393 spin_lock(&np->lock); 2394 nv_linkchange(dev); 2395 spin_unlock(&np->lock); 2396 np->link_timeout = jiffies + LINK_TIMEOUT; 2397 } 2398 if (events & (NVREG_IRQ_TX_ERR)) { 2399 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2400 dev->name, events); 2401 } 2402 if (events & (NVREG_IRQ_UNKNOWN)) { 2403 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2404 dev->name, events); 2405 } 2406 if (i > max_interrupt_work) { 2407 spin_lock(&np->lock); 2408 /* disable interrupts on the nic */ 2409 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 2410 writel(0, base + NvRegIrqMask); 2411 else 2412 writel(np->irqmask, base + NvRegIrqMask); 2413 pci_push(base); 2414 2415 if (!np->in_shutdown) { 2416 np->nic_poll_irq = np->irqmask; 2417 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2418 } 2419 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 2420 spin_unlock(&np->lock); 2421 break; 2422 } 2423 2424 } 2425 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 2426 2427 return IRQ_RETVAL(i); 2428} 2429 2430static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) 2431{ 2432 struct net_device *dev = (struct net_device *) data; 2433 struct fe_priv *np = netdev_priv(dev); 2434 u8 __iomem *base = get_hwbase(dev); 2435 u32 events; 2436 int i; 2437 2438 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 2439 2440 for (i=0; ; i++) { 2441 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 2442 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 2443 pci_push(base); 2444 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 2445 if (!(events & np->irqmask)) 2446 break; 2447 2448 spin_lock_irq(&np->lock); 2449 nv_tx_done(dev); 2450 spin_unlock_irq(&np->lock); 2451 2452 if (events & (NVREG_IRQ_TX_ERR)) { 2453 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2454 dev->name, events); 2455 } 2456 if (i > max_interrupt_work) { 2457 spin_lock_irq(&np->lock); 2458 /* disable interrupts on the nic */ 2459 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 2460 pci_push(base); 2461 2462 if (!np->in_shutdown) { 2463 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; 2464 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2465 } 2466 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 2467 spin_unlock_irq(&np->lock); 2468 break; 2469 } 2470 2471 } 2472 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); 2473 2474 return IRQ_RETVAL(i); 2475} 2476 2477static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) 2478{ 2479 struct net_device *dev = (struct net_device *) data; 2480 struct fe_priv *np = netdev_priv(dev); 2481 u8 __iomem *base = get_hwbase(dev); 2482 u32 events; 2483 int i; 2484 2485 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 2486 2487 for (i=0; ; i++) { 2488 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 2489 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 2490 pci_push(base); 2491 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 2492 if (!(events & np->irqmask)) 2493 break; 2494 2495 nv_rx_process(dev); 2496 if (nv_alloc_rx(dev)) { 2497 spin_lock_irq(&np->lock); 2498 if (!np->in_shutdown) 2499 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2500 spin_unlock_irq(&np->lock); 2501 } 2502 2503 if (i > max_interrupt_work) { 2504 spin_lock_irq(&np->lock); 2505 /* disable interrupts on the nic */ 2506 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2507 pci_push(base); 2508 2509 if (!np->in_shutdown) { 2510 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; 2511 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2512 } 2513 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 2514 spin_unlock_irq(&np->lock); 2515 break; 2516 } 2517 2518 } 2519 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 2520 2521 return IRQ_RETVAL(i); 2522} 2523 2524static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) 2525{ 2526 struct net_device *dev = (struct net_device *) data; 2527 struct fe_priv *np = netdev_priv(dev); 2528 u8 __iomem *base = get_hwbase(dev); 2529 u32 events; 2530 int i; 2531 2532 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 2533 2534 for (i=0; ; i++) { 2535 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 2536 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 2537 pci_push(base); 2538 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2539 if (!(events & np->irqmask)) 2540 break; 2541 2542 if (events & NVREG_IRQ_LINK) { 2543 spin_lock_irq(&np->lock); 2544 nv_link_irq(dev); 2545 spin_unlock_irq(&np->lock); 2546 } 2547 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2548 spin_lock_irq(&np->lock); 2549 nv_linkchange(dev); 2550 spin_unlock_irq(&np->lock); 2551 np->link_timeout = jiffies + LINK_TIMEOUT; 2552 } 2553 if (events & (NVREG_IRQ_UNKNOWN)) { 2554 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2555 dev->name, events); 2556 } 2557 if (i > max_interrupt_work) { 2558 spin_lock_irq(&np->lock); 2559 /* disable interrupts on the nic */ 2560 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 2561 pci_push(base); 2562 2563 if (!np->in_shutdown) { 2564 np->nic_poll_irq |= NVREG_IRQ_OTHER; 2565 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2566 } 2567 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 2568 spin_unlock_irq(&np->lock); 2569 break; 2570 } 2571 2572 } 2573 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); 2574 2575 return IRQ_RETVAL(i); 2576} 2577 2578static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs) 2579{ 2580 struct net_device *dev = (struct net_device *) data; 2581 struct fe_priv *np = netdev_priv(dev); 2582 u8 __iomem *base = get_hwbase(dev); 2583 u32 events; 2584 2585 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); 2586 2587 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 2588 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2589 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 2590 } else { 2591 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2592 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 2593 } 2594 pci_push(base); 2595 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2596 if (!(events & NVREG_IRQ_TIMER)) 2597 return IRQ_RETVAL(0); 2598 2599 spin_lock(&np->lock); 2600 np->intr_test = 1; 2601 spin_unlock(&np->lock); 2602 2603 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); 2604 2605 return IRQ_RETVAL(1); 2606} 2607 2608static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) 2609{ 2610 u8 __iomem *base = get_hwbase(dev); 2611 int i; 2612 u32 msixmap = 0; 2613 2614 /* Each interrupt bit can be mapped to a MSIX vector (4 bits). 2615 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents 2616 * the remaining 8 interrupts. 2617 */ 2618 for (i = 0; i < 8; i++) { 2619 if ((irqmask >> i) & 0x1) { 2620 msixmap |= vector << (i << 2); 2621 } 2622 } 2623 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 2624 2625 msixmap = 0; 2626 for (i = 0; i < 8; i++) { 2627 if ((irqmask >> (i + 8)) & 0x1) { 2628 msixmap |= vector << (i << 2); 2629 } 2630 } 2631 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 2632} 2633 2634static int nv_request_irq(struct net_device *dev, int intr_test) 2635{ 2636 struct fe_priv *np = get_nvpriv(dev); 2637 u8 __iomem *base = get_hwbase(dev); 2638 int ret = 1; 2639 int i; 2640 2641 if (np->msi_flags & NV_MSI_X_CAPABLE) { 2642 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 2643 np->msi_x_entry[i].entry = i; 2644 } 2645 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 2646 np->msi_flags |= NV_MSI_X_ENABLED; 2647 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 2648 /* Request irq for rx handling */ 2649 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) { 2650 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 2651 pci_disable_msix(np->pci_dev); 2652 np->msi_flags &= ~NV_MSI_X_ENABLED; 2653 goto out_err; 2654 } 2655 /* Request irq for tx handling */ 2656 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) { 2657 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 2658 pci_disable_msix(np->pci_dev); 2659 np->msi_flags &= ~NV_MSI_X_ENABLED; 2660 goto out_free_rx; 2661 } 2662 /* Request irq for link and timer handling */ 2663 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) { 2664 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 2665 pci_disable_msix(np->pci_dev); 2666 np->msi_flags &= ~NV_MSI_X_ENABLED; 2667 goto out_free_tx; 2668 } 2669 /* map interrupts to their respective vector */ 2670 writel(0, base + NvRegMSIXMap0); 2671 writel(0, base + NvRegMSIXMap1); 2672 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); 2673 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); 2674 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 2675 } else { 2676 /* Request irq for all interrupts */ 2677 if ((!intr_test && 2678 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || 2679 (intr_test && 2680 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) { 2681 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 2682 pci_disable_msix(np->pci_dev); 2683 np->msi_flags &= ~NV_MSI_X_ENABLED; 2684 goto out_err; 2685 } 2686 2687 /* map interrupts to vector 0 */ 2688 writel(0, base + NvRegMSIXMap0); 2689 writel(0, base + NvRegMSIXMap1); 2690 } 2691 } 2692 } 2693 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 2694 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 2695 np->msi_flags |= NV_MSI_ENABLED; 2696 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || 2697 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) { 2698 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 2699 pci_disable_msi(np->pci_dev); 2700 np->msi_flags &= ~NV_MSI_ENABLED; 2701 goto out_err; 2702 } 2703 2704 /* map interrupts to vector 0 */ 2705 writel(0, base + NvRegMSIMap0); 2706 writel(0, base + NvRegMSIMap1); 2707 /* enable msi vector 0 */ 2708 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 2709 } 2710 } 2711 if (ret != 0) { 2712 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || 2713 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) 2714 goto out_err; 2715 2716 } 2717 2718 return 0; 2719out_free_tx: 2720 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); 2721out_free_rx: 2722 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); 2723out_err: 2724 return 1; 2725} 2726 2727static void nv_free_irq(struct net_device *dev) 2728{ 2729 struct fe_priv *np = get_nvpriv(dev); 2730 int i; 2731 2732 if (np->msi_flags & NV_MSI_X_ENABLED) { 2733 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 2734 free_irq(np->msi_x_entry[i].vector, dev); 2735 } 2736 pci_disable_msix(np->pci_dev); 2737 np->msi_flags &= ~NV_MSI_X_ENABLED; 2738 } else { 2739 free_irq(np->pci_dev->irq, dev); 2740 if (np->msi_flags & NV_MSI_ENABLED) { 2741 pci_disable_msi(np->pci_dev); 2742 np->msi_flags &= ~NV_MSI_ENABLED; 2743 } 2744 } 2745} 2746 2747static void nv_do_nic_poll(unsigned long data) 2748{ 2749 struct net_device *dev = (struct net_device *) data; 2750 struct fe_priv *np = netdev_priv(dev); 2751 u8 __iomem *base = get_hwbase(dev); 2752 u32 mask = 0; 2753 2754 /* 2755 * First disable irq(s) and then 2756 * reenable interrupts on the nic, we have to do this before calling 2757 * nv_nic_irq because that may decide to do otherwise 2758 */ 2759 2760 if (!using_multi_irqs(dev)) { 2761 if (np->msi_flags & NV_MSI_X_ENABLED) 2762 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 2763 else 2764 disable_irq_lockdep(dev->irq); 2765 mask = np->irqmask; 2766 } else { 2767 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2768 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 2769 mask |= NVREG_IRQ_RX_ALL; 2770 } 2771 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 2772 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 2773 mask |= NVREG_IRQ_TX_ALL; 2774 } 2775 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 2776 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 2777 mask |= NVREG_IRQ_OTHER; 2778 } 2779 } 2780 np->nic_poll_irq = 0; 2781 2782 /* FIXME: Do we need synchronize_irq(dev->irq) here? */ 2783 2784 writel(mask, base + NvRegIrqMask); 2785 pci_push(base); 2786 2787 if (!using_multi_irqs(dev)) { 2788 nv_nic_irq(0, dev, NULL); 2789 if (np->msi_flags & NV_MSI_X_ENABLED) 2790 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 2791 else 2792 enable_irq_lockdep(dev->irq); 2793 } else { 2794 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2795 nv_nic_irq_rx(0, dev, NULL); 2796 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 2797 } 2798 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 2799 nv_nic_irq_tx(0, dev, NULL); 2800 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 2801 } 2802 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 2803 nv_nic_irq_other(0, dev, NULL); 2804 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 2805 } 2806 } 2807} 2808 2809#ifdef CONFIG_NET_POLL_CONTROLLER 2810static void nv_poll_controller(struct net_device *dev) 2811{ 2812 nv_do_nic_poll((unsigned long) dev); 2813} 2814#endif 2815 2816static void nv_do_stats_poll(unsigned long data) 2817{ 2818 struct net_device *dev = (struct net_device *) data; 2819 struct fe_priv *np = netdev_priv(dev); 2820 u8 __iomem *base = get_hwbase(dev); 2821 2822 np->estats.tx_bytes += readl(base + NvRegTxCnt); 2823 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); 2824 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); 2825 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); 2826 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); 2827 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); 2828 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); 2829 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); 2830 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); 2831 np->estats.tx_deferral += readl(base + NvRegTxDef); 2832 np->estats.tx_packets += readl(base + NvRegTxFrame); 2833 np->estats.tx_pause += readl(base + NvRegTxPause); 2834 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); 2835 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); 2836 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); 2837 np->estats.rx_runt += readl(base + NvRegRxRunt); 2838 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); 2839 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); 2840 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); 2841 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); 2842 np->estats.rx_length_error += readl(base + NvRegRxLenErr); 2843 np->estats.rx_unicast += readl(base + NvRegRxUnicast); 2844 np->estats.rx_multicast += readl(base + NvRegRxMulticast); 2845 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); 2846 np->estats.rx_bytes += readl(base + NvRegRxCnt); 2847 np->estats.rx_pause += readl(base + NvRegRxPause); 2848 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 2849 np->estats.rx_packets = 2850 np->estats.rx_unicast + 2851 np->estats.rx_multicast + 2852 np->estats.rx_broadcast; 2853 np->estats.rx_errors_total = 2854 np->estats.rx_crc_errors + 2855 np->estats.rx_over_errors + 2856 np->estats.rx_frame_error + 2857 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + 2858 np->estats.rx_late_collision + 2859 np->estats.rx_runt + 2860 np->estats.rx_frame_too_long; 2861 2862 if (!np->in_shutdown) 2863 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 2864} 2865 2866static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2867{ 2868 struct fe_priv *np = netdev_priv(dev); 2869 strcpy(info->driver, "forcedeth"); 2870 strcpy(info->version, FORCEDETH_VERSION); 2871 strcpy(info->bus_info, pci_name(np->pci_dev)); 2872} 2873 2874static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 2875{ 2876 struct fe_priv *np = netdev_priv(dev); 2877 wolinfo->supported = WAKE_MAGIC; 2878 2879 spin_lock_irq(&np->lock); 2880 if (np->wolenabled) 2881 wolinfo->wolopts = WAKE_MAGIC; 2882 spin_unlock_irq(&np->lock); 2883} 2884 2885static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 2886{ 2887 struct fe_priv *np = netdev_priv(dev); 2888 u8 __iomem *base = get_hwbase(dev); 2889 u32 flags = 0; 2890 2891 if (wolinfo->wolopts == 0) { 2892 np->wolenabled = 0; 2893 } else if (wolinfo->wolopts & WAKE_MAGIC) { 2894 np->wolenabled = 1; 2895 flags = NVREG_WAKEUPFLAGS_ENABLE; 2896 } 2897 if (netif_running(dev)) { 2898 spin_lock_irq(&np->lock); 2899 writel(flags, base + NvRegWakeUpFlags); 2900 spin_unlock_irq(&np->lock); 2901 } 2902 return 0; 2903} 2904 2905static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 2906{ 2907 struct fe_priv *np = netdev_priv(dev); 2908 int adv; 2909 2910 spin_lock_irq(&np->lock); 2911 ecmd->port = PORT_MII; 2912 if (!netif_running(dev)) { 2913 /* We do not track link speed / duplex setting if the 2914 * interface is disabled. Force a link check */ 2915 if (nv_update_linkspeed(dev)) { 2916 if (!netif_carrier_ok(dev)) 2917 netif_carrier_on(dev); 2918 } else { 2919 if (netif_carrier_ok(dev)) 2920 netif_carrier_off(dev); 2921 } 2922 } 2923 2924 if (netif_carrier_ok(dev)) { 2925 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 2926 case NVREG_LINKSPEED_10: 2927 ecmd->speed = SPEED_10; 2928 break; 2929 case NVREG_LINKSPEED_100: 2930 ecmd->speed = SPEED_100; 2931 break; 2932 case NVREG_LINKSPEED_1000: 2933 ecmd->speed = SPEED_1000; 2934 break; 2935 } 2936 ecmd->duplex = DUPLEX_HALF; 2937 if (np->duplex) 2938 ecmd->duplex = DUPLEX_FULL; 2939 } else { 2940 ecmd->speed = -1; 2941 ecmd->duplex = -1; 2942 } 2943 2944 ecmd->autoneg = np->autoneg; 2945 2946 ecmd->advertising = ADVERTISED_MII; 2947 if (np->autoneg) { 2948 ecmd->advertising |= ADVERTISED_Autoneg; 2949 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 2950 if (adv & ADVERTISE_10HALF) 2951 ecmd->advertising |= ADVERTISED_10baseT_Half; 2952 if (adv & ADVERTISE_10FULL) 2953 ecmd->advertising |= ADVERTISED_10baseT_Full; 2954 if (adv & ADVERTISE_100HALF) 2955 ecmd->advertising |= ADVERTISED_100baseT_Half; 2956 if (adv & ADVERTISE_100FULL) 2957 ecmd->advertising |= ADVERTISED_100baseT_Full; 2958 if (np->gigabit == PHY_GIGABIT) { 2959 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 2960 if (adv & ADVERTISE_1000FULL) 2961 ecmd->advertising |= ADVERTISED_1000baseT_Full; 2962 } 2963 } 2964 ecmd->supported = (SUPPORTED_Autoneg | 2965 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2966 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 2967 SUPPORTED_MII); 2968 if (np->gigabit == PHY_GIGABIT) 2969 ecmd->supported |= SUPPORTED_1000baseT_Full; 2970 2971 ecmd->phy_address = np->phyaddr; 2972 ecmd->transceiver = XCVR_EXTERNAL; 2973 2974 /* ignore maxtxpkt, maxrxpkt for now */ 2975 spin_unlock_irq(&np->lock); 2976 return 0; 2977} 2978 2979static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 2980{ 2981 struct fe_priv *np = netdev_priv(dev); 2982 2983 if (ecmd->port != PORT_MII) 2984 return -EINVAL; 2985 if (ecmd->transceiver != XCVR_EXTERNAL) 2986 return -EINVAL; 2987 if (ecmd->phy_address != np->phyaddr) { 2988 /* TODO: support switching between multiple phys. Should be 2989 * trivial, but not enabled due to lack of test hardware. */ 2990 return -EINVAL; 2991 } 2992 if (ecmd->autoneg == AUTONEG_ENABLE) { 2993 u32 mask; 2994 2995 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 2996 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 2997 if (np->gigabit == PHY_GIGABIT) 2998 mask |= ADVERTISED_1000baseT_Full; 2999 3000 if ((ecmd->advertising & mask) == 0) 3001 return -EINVAL; 3002 3003 } else if (ecmd->autoneg == AUTONEG_DISABLE) { 3004 /* Note: autonegotiation disable, speed 1000 intentionally 3005 * forbidden - noone should need that. */ 3006 3007 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) 3008 return -EINVAL; 3009 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 3010 return -EINVAL; 3011 } else { 3012 return -EINVAL; 3013 } 3014 3015 netif_carrier_off(dev); 3016 if (netif_running(dev)) { 3017 nv_disable_irq(dev); 3018 netif_tx_lock_bh(dev); 3019 spin_lock(&np->lock); 3020 /* stop engines */ 3021 nv_stop_rx(dev); 3022 nv_stop_tx(dev); 3023 spin_unlock(&np->lock); 3024 netif_tx_unlock_bh(dev); 3025 } 3026 3027 if (ecmd->autoneg == AUTONEG_ENABLE) { 3028 int adv, bmcr; 3029 3030 np->autoneg = 1; 3031 3032 /* advertise only what has been requested */ 3033 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3034 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3035 if (ecmd->advertising & ADVERTISED_10baseT_Half) 3036 adv |= ADVERTISE_10HALF; 3037 if (ecmd->advertising & ADVERTISED_10baseT_Full) 3038 adv |= ADVERTISE_10FULL; 3039 if (ecmd->advertising & ADVERTISED_100baseT_Half) 3040 adv |= ADVERTISE_100HALF; 3041 if (ecmd->advertising & ADVERTISED_100baseT_Full) 3042 adv |= ADVERTISE_100FULL; 3043 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 3044 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 3045 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3046 adv |= ADVERTISE_PAUSE_ASYM; 3047 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 3048 3049 if (np->gigabit == PHY_GIGABIT) { 3050 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3051 adv &= ~ADVERTISE_1000FULL; 3052 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 3053 adv |= ADVERTISE_1000FULL; 3054 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 3055 } 3056 3057 if (netif_running(dev)) 3058 printk(KERN_INFO "%s: link down.\n", dev->name); 3059 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3060 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 3061 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3062 3063 } else { 3064 int adv, bmcr; 3065 3066 np->autoneg = 0; 3067 3068 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3069 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3070 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) 3071 adv |= ADVERTISE_10HALF; 3072 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) 3073 adv |= ADVERTISE_10FULL; 3074 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) 3075 adv |= ADVERTISE_100HALF; 3076 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) 3077 adv |= ADVERTISE_100FULL; 3078 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 3079 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ 3080 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 3081 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3082 } 3083 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { 3084 adv |= ADVERTISE_PAUSE_ASYM; 3085 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3086 } 3087 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 3088 np->fixed_mode = adv; 3089 3090 if (np->gigabit == PHY_GIGABIT) { 3091 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3092 adv &= ~ADVERTISE_1000FULL; 3093 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 3094 } 3095 3096 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3097 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); 3098 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) 3099 bmcr |= BMCR_FULLDPLX; 3100 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 3101 bmcr |= BMCR_SPEED100; 3102 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3103 if (np->phy_oui == PHY_OUI_MARVELL) { 3104 /* reset the phy */ 3105 if (phy_reset(dev)) { 3106 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 3107 return -EINVAL; 3108 } 3109 } else if (netif_running(dev)) { 3110 /* Wait a bit and then reconfigure the nic. */ 3111 udelay(10); 3112 nv_linkchange(dev); 3113 } 3114 } 3115 3116 if (netif_running(dev)) { 3117 nv_start_rx(dev); 3118 nv_start_tx(dev); 3119 nv_enable_irq(dev); 3120 } 3121 3122 return 0; 3123} 3124 3125#define FORCEDETH_REGS_VER 1 3126 3127static int nv_get_regs_len(struct net_device *dev) 3128{ 3129 struct fe_priv *np = netdev_priv(dev); 3130 return np->register_size; 3131} 3132 3133static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 3134{ 3135 struct fe_priv *np = netdev_priv(dev); 3136 u8 __iomem *base = get_hwbase(dev); 3137 u32 *rbuf = buf; 3138 int i; 3139 3140 regs->version = FORCEDETH_REGS_VER; 3141 spin_lock_irq(&np->lock); 3142 for (i = 0;i <= np->register_size/sizeof(u32); i++) 3143 rbuf[i] = readl(base + i*sizeof(u32)); 3144 spin_unlock_irq(&np->lock); 3145} 3146 3147static int nv_nway_reset(struct net_device *dev) 3148{ 3149 struct fe_priv *np = netdev_priv(dev); 3150 int ret; 3151 3152 if (np->autoneg) { 3153 int bmcr; 3154 3155 netif_carrier_off(dev); 3156 if (netif_running(dev)) { 3157 nv_disable_irq(dev); 3158 netif_tx_lock_bh(dev); 3159 spin_lock(&np->lock); 3160 /* stop engines */ 3161 nv_stop_rx(dev); 3162 nv_stop_tx(dev); 3163 spin_unlock(&np->lock); 3164 netif_tx_unlock_bh(dev); 3165 printk(KERN_INFO "%s: link down.\n", dev->name); 3166 } 3167 3168 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3169 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 3170 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3171 3172 if (netif_running(dev)) { 3173 nv_start_rx(dev); 3174 nv_start_tx(dev); 3175 nv_enable_irq(dev); 3176 } 3177 ret = 0; 3178 } else { 3179 ret = -EINVAL; 3180 } 3181 3182 return ret; 3183} 3184 3185static int nv_set_tso(struct net_device *dev, u32 value) 3186{ 3187 struct fe_priv *np = netdev_priv(dev); 3188 3189 if ((np->driver_data & DEV_HAS_CHECKSUM)) 3190 return ethtool_op_set_tso(dev, value); 3191 else 3192 return -EOPNOTSUPP; 3193} 3194 3195static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 3196{ 3197 struct fe_priv *np = netdev_priv(dev); 3198 3199 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 3200 ring->rx_mini_max_pending = 0; 3201 ring->rx_jumbo_max_pending = 0; 3202 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 3203 3204 ring->rx_pending = np->rx_ring_size; 3205 ring->rx_mini_pending = 0; 3206 ring->rx_jumbo_pending = 0; 3207 ring->tx_pending = np->tx_ring_size; 3208} 3209 3210static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 3211{ 3212 struct fe_priv *np = netdev_priv(dev); 3213 u8 __iomem *base = get_hwbase(dev); 3214 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len; 3215 dma_addr_t ring_addr; 3216 3217 if (ring->rx_pending < RX_RING_MIN || 3218 ring->tx_pending < TX_RING_MIN || 3219 ring->rx_mini_pending != 0 || 3220 ring->rx_jumbo_pending != 0 || 3221 (np->desc_ver == DESC_VER_1 && 3222 (ring->rx_pending > RING_MAX_DESC_VER_1 || 3223 ring->tx_pending > RING_MAX_DESC_VER_1)) || 3224 (np->desc_ver != DESC_VER_1 && 3225 (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 3226 ring->tx_pending > RING_MAX_DESC_VER_2_3))) { 3227 return -EINVAL; 3228 } 3229 3230 /* allocate new rings */ 3231 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3232 rxtx_ring = pci_alloc_consistent(np->pci_dev, 3233 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 3234 &ring_addr); 3235 } else { 3236 rxtx_ring = pci_alloc_consistent(np->pci_dev, 3237 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 3238 &ring_addr); 3239 } 3240 rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL); 3241 rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL); 3242 tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL); 3243 tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL); 3244 tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL); 3245 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { 3246 /* fall back to old rings */ 3247 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3248 if(rxtx_ring) 3249 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 3250 rxtx_ring, ring_addr); 3251 } else { 3252 if (rxtx_ring) 3253 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 3254 rxtx_ring, ring_addr); 3255 } 3256 if (rx_skbuff) 3257 kfree(rx_skbuff); 3258 if (rx_dma) 3259 kfree(rx_dma); 3260 if (tx_skbuff) 3261 kfree(tx_skbuff); 3262 if (tx_dma) 3263 kfree(tx_dma); 3264 if (tx_dma_len) 3265 kfree(tx_dma_len); 3266 goto exit; 3267 } 3268 3269 if (netif_running(dev)) { 3270 nv_disable_irq(dev); 3271 netif_tx_lock_bh(dev); 3272 spin_lock(&np->lock); 3273 /* stop engines */ 3274 nv_stop_rx(dev); 3275 nv_stop_tx(dev); 3276 nv_txrx_reset(dev); 3277 /* drain queues */ 3278 nv_drain_rx(dev); 3279 nv_drain_tx(dev); 3280 /* delete queues */ 3281 free_rings(dev); 3282 } 3283 3284 /* set new values */ 3285 np->rx_ring_size = ring->rx_pending; 3286 np->tx_ring_size = ring->tx_pending; 3287 np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE; 3288 np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1; 3289 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3290 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 3291 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 3292 } else { 3293 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 3294 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 3295 } 3296 np->rx_skbuff = (struct sk_buff**)rx_skbuff; 3297 np->rx_dma = (dma_addr_t*)rx_dma; 3298 np->tx_skbuff = (struct sk_buff**)tx_skbuff; 3299 np->tx_dma = (dma_addr_t*)tx_dma; 3300 np->tx_dma_len = (unsigned int*)tx_dma_len; 3301 np->ring_addr = ring_addr; 3302 3303 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); 3304 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); 3305 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); 3306 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); 3307 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); 3308 3309 if (netif_running(dev)) { 3310 /* reinit driver view of the queues */ 3311 set_bufsize(dev); 3312 if (nv_init_ring(dev)) { 3313 if (!np->in_shutdown) 3314 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3315 } 3316 3317 /* reinit nic view of the queues */ 3318 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3319 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3320 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3321 base + NvRegRingSizes); 3322 pci_push(base); 3323 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3324 pci_push(base); 3325 3326 /* restart engines */ 3327 nv_start_rx(dev); 3328 nv_start_tx(dev); 3329 spin_unlock(&np->lock); 3330 netif_tx_unlock_bh(dev); 3331 nv_enable_irq(dev); 3332 } 3333 return 0; 3334exit: 3335 return -ENOMEM; 3336} 3337 3338static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 3339{ 3340 struct fe_priv *np = netdev_priv(dev); 3341 3342 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; 3343 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; 3344 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; 3345} 3346 3347static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 3348{ 3349 struct fe_priv *np = netdev_priv(dev); 3350 int adv, bmcr; 3351 3352 if ((!np->autoneg && np->duplex == 0) || 3353 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 3354 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 3355 dev->name); 3356 return -EINVAL; 3357 } 3358 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 3359 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); 3360 return -EINVAL; 3361 } 3362 3363 netif_carrier_off(dev); 3364 if (netif_running(dev)) { 3365 nv_disable_irq(dev); 3366 netif_tx_lock_bh(dev); 3367 spin_lock(&np->lock); 3368 /* stop engines */ 3369 nv_stop_rx(dev); 3370 nv_stop_tx(dev); 3371 spin_unlock(&np->lock); 3372 netif_tx_unlock_bh(dev); 3373 } 3374 3375 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); 3376 if (pause->rx_pause) 3377 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; 3378 if (pause->tx_pause) 3379 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; 3380 3381 if (np->autoneg && pause->autoneg) { 3382 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; 3383 3384 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3385 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3386 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 3387 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 3388 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3389 adv |= ADVERTISE_PAUSE_ASYM; 3390 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 3391 3392 if (netif_running(dev)) 3393 printk(KERN_INFO "%s: link down.\n", dev->name); 3394 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3395 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 3396 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3397 } else { 3398 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 3399 if (pause->rx_pause) 3400 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3401 if (pause->tx_pause) 3402 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3403 3404 if (!netif_running(dev)) 3405 nv_update_linkspeed(dev); 3406 else 3407 nv_update_pause(dev, np->pause_flags); 3408 } 3409 3410 if (netif_running(dev)) { 3411 nv_start_rx(dev); 3412 nv_start_tx(dev); 3413 nv_enable_irq(dev); 3414 } 3415 return 0; 3416} 3417 3418static u32 nv_get_rx_csum(struct net_device *dev) 3419{ 3420 struct fe_priv *np = netdev_priv(dev); 3421 return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0; 3422} 3423 3424static int nv_set_rx_csum(struct net_device *dev, u32 data) 3425{ 3426 struct fe_priv *np = netdev_priv(dev); 3427 u8 __iomem *base = get_hwbase(dev); 3428 int retcode = 0; 3429 3430 if (np->driver_data & DEV_HAS_CHECKSUM) { 3431 3432 if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) || 3433 (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) { 3434 /* already set or unset */ 3435 return 0; 3436 } 3437 3438 if (data) { 3439 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 3440 } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) { 3441 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; 3442 } else { 3443 printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n"); 3444 return -EINVAL; 3445 } 3446 3447 if (netif_running(dev)) { 3448 spin_lock_irq(&np->lock); 3449 writel(np->txrxctl_bits, base + NvRegTxRxControl); 3450 spin_unlock_irq(&np->lock); 3451 } 3452 } else { 3453 return -EINVAL; 3454 } 3455 3456 return retcode; 3457} 3458 3459static int nv_set_tx_csum(struct net_device *dev, u32 data) 3460{ 3461 struct fe_priv *np = netdev_priv(dev); 3462 3463 if (np->driver_data & DEV_HAS_CHECKSUM) 3464 return ethtool_op_set_tx_hw_csum(dev, data); 3465 else 3466 return -EOPNOTSUPP; 3467} 3468 3469static int nv_set_sg(struct net_device *dev, u32 data) 3470{ 3471 struct fe_priv *np = netdev_priv(dev); 3472 3473 if (np->driver_data & DEV_HAS_CHECKSUM) 3474 return ethtool_op_set_sg(dev, data); 3475 else 3476 return -EOPNOTSUPP; 3477} 3478 3479static int nv_get_stats_count(struct net_device *dev) 3480{ 3481 struct fe_priv *np = netdev_priv(dev); 3482 3483 if (np->driver_data & DEV_HAS_STATISTICS) 3484 return (sizeof(struct nv_ethtool_stats)/sizeof(u64)); 3485 else 3486 return 0; 3487} 3488 3489static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) 3490{ 3491 struct fe_priv *np = netdev_priv(dev); 3492 3493 /* update stats */ 3494 nv_do_stats_poll((unsigned long)dev); 3495 3496 memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64)); 3497} 3498 3499static int nv_self_test_count(struct net_device *dev) 3500{ 3501 struct fe_priv *np = netdev_priv(dev); 3502 3503 if (np->driver_data & DEV_HAS_TEST_EXTENDED) 3504 return NV_TEST_COUNT_EXTENDED; 3505 else 3506 return NV_TEST_COUNT_BASE; 3507} 3508 3509static int nv_link_test(struct net_device *dev) 3510{ 3511 struct fe_priv *np = netdev_priv(dev); 3512 int mii_status; 3513 3514 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3515 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3516 3517 /* check phy link status */ 3518 if (!(mii_status & BMSR_LSTATUS)) 3519 return 0; 3520 else 3521 return 1; 3522} 3523 3524static int nv_register_test(struct net_device *dev) 3525{ 3526 u8 __iomem *base = get_hwbase(dev); 3527 int i = 0; 3528 u32 orig_read, new_read; 3529 3530 do { 3531 orig_read = readl(base + nv_registers_test[i].reg); 3532 3533 /* xor with mask to toggle bits */ 3534 orig_read ^= nv_registers_test[i].mask; 3535 3536 writel(orig_read, base + nv_registers_test[i].reg); 3537 3538 new_read = readl(base + nv_registers_test[i].reg); 3539 3540 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) 3541 return 0; 3542 3543 /* restore original value */ 3544 orig_read ^= nv_registers_test[i].mask; 3545 writel(orig_read, base + nv_registers_test[i].reg); 3546 3547 } while (nv_registers_test[++i].reg != 0); 3548 3549 return 1; 3550} 3551 3552static int nv_interrupt_test(struct net_device *dev) 3553{ 3554 struct fe_priv *np = netdev_priv(dev); 3555 u8 __iomem *base = get_hwbase(dev); 3556 int ret = 1; 3557 int testcnt; 3558 u32 save_msi_flags, save_poll_interval = 0; 3559 3560 if (netif_running(dev)) { 3561 /* free current irq */ 3562 nv_free_irq(dev); 3563 save_poll_interval = readl(base+NvRegPollingInterval); 3564 } 3565 3566 /* flag to test interrupt handler */ 3567 np->intr_test = 0; 3568 3569 /* setup test irq */ 3570 save_msi_flags = np->msi_flags; 3571 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; 3572 np->msi_flags |= 0x001; /* setup 1 vector */ 3573 if (nv_request_irq(dev, 1)) 3574 return 0; 3575 3576 /* setup timer interrupt */ 3577 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 3578 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 3579 3580 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); 3581 3582 /* wait for at least one interrupt */ 3583 msleep(100); 3584 3585 spin_lock_irq(&np->lock); 3586 3587 /* flag should be set within ISR */ 3588 testcnt = np->intr_test; 3589 if (!testcnt) 3590 ret = 2; 3591 3592 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); 3593 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3594 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3595 else 3596 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 3597 3598 spin_unlock_irq(&np->lock); 3599 3600 nv_free_irq(dev); 3601 3602 np->msi_flags = save_msi_flags; 3603 3604 if (netif_running(dev)) { 3605 writel(save_poll_interval, base + NvRegPollingInterval); 3606 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 3607 /* restore original irq */ 3608 if (nv_request_irq(dev, 0)) 3609 return 0; 3610 } 3611 3612 return ret; 3613} 3614 3615static int nv_loopback_test(struct net_device *dev) 3616{ 3617 struct fe_priv *np = netdev_priv(dev); 3618 u8 __iomem *base = get_hwbase(dev); 3619 struct sk_buff *tx_skb, *rx_skb; 3620 dma_addr_t test_dma_addr; 3621 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 3622 u32 Flags; 3623 int len, i, pkt_len; 3624 u8 *pkt_data; 3625 u32 filter_flags = 0; 3626 u32 misc1_flags = 0; 3627 int ret = 1; 3628 3629 if (netif_running(dev)) { 3630 nv_disable_irq(dev); 3631 filter_flags = readl(base + NvRegPacketFilterFlags); 3632 misc1_flags = readl(base + NvRegMisc1); 3633 } else { 3634 nv_txrx_reset(dev); 3635 } 3636 3637 /* reinit driver view of the rx queue */ 3638 set_bufsize(dev); 3639 nv_init_ring(dev); 3640 3641 /* setup hardware for loopback */ 3642 writel(NVREG_MISC1_FORCE, base + NvRegMisc1); 3643 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); 3644 3645 /* reinit nic view of the rx queue */ 3646 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3647 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3648 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3649 base + NvRegRingSizes); 3650 pci_push(base); 3651 3652 /* restart rx engine */ 3653 nv_start_rx(dev); 3654 nv_start_tx(dev); 3655 3656 /* setup packet for tx */ 3657 pkt_len = ETH_DATA_LEN; 3658 tx_skb = dev_alloc_skb(pkt_len); 3659 pkt_data = skb_put(tx_skb, pkt_len); 3660 for (i = 0; i < pkt_len; i++) 3661 pkt_data[i] = (u8)(i & 0xff); 3662 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, 3663 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); 3664 3665 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3666 np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr); 3667 np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3668 } else { 3669 np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32; 3670 np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; 3671 np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3672 } 3673 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3674 pci_push(get_hwbase(dev)); 3675 3676 msleep(500); 3677 3678 /* check for rx of the packet */ 3679 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3680 Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen); 3681 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 3682 3683 } else { 3684 Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen); 3685 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 3686 } 3687 3688 if (Flags & NV_RX_AVAIL) { 3689 ret = 0; 3690 } else if (np->desc_ver == DESC_VER_1) { 3691 if (Flags & NV_RX_ERROR) 3692 ret = 0; 3693 } else { 3694 if (Flags & NV_RX2_ERROR) { 3695 ret = 0; 3696 } 3697 } 3698 3699 if (ret) { 3700 if (len != pkt_len) { 3701 ret = 0; 3702 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 3703 dev->name, len, pkt_len); 3704 } else { 3705 rx_skb = np->rx_skbuff[0]; 3706 for (i = 0; i < pkt_len; i++) { 3707 if (rx_skb->data[i] != (u8)(i & 0xff)) { 3708 ret = 0; 3709 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", 3710 dev->name, i); 3711 break; 3712 } 3713 } 3714 } 3715 } else { 3716 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); 3717 } 3718 3719 pci_unmap_page(np->pci_dev, test_dma_addr, 3720 tx_skb->end-tx_skb->data, 3721 PCI_DMA_TODEVICE); 3722 dev_kfree_skb_any(tx_skb); 3723 3724 /* stop engines */ 3725 nv_stop_rx(dev); 3726 nv_stop_tx(dev); 3727 nv_txrx_reset(dev); 3728 /* drain rx queue */ 3729 nv_drain_rx(dev); 3730 nv_drain_tx(dev); 3731 3732 if (netif_running(dev)) { 3733 writel(misc1_flags, base + NvRegMisc1); 3734 writel(filter_flags, base + NvRegPacketFilterFlags); 3735 nv_enable_irq(dev); 3736 } 3737 3738 return ret; 3739} 3740 3741static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) 3742{ 3743 struct fe_priv *np = netdev_priv(dev); 3744 u8 __iomem *base = get_hwbase(dev); 3745 int result; 3746 memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64)); 3747 3748 if (!nv_link_test(dev)) { 3749 test->flags |= ETH_TEST_FL_FAILED; 3750 buffer[0] = 1; 3751 } 3752 3753 if (test->flags & ETH_TEST_FL_OFFLINE) { 3754 if (netif_running(dev)) { 3755 netif_stop_queue(dev); 3756 netif_tx_lock_bh(dev); 3757 spin_lock_irq(&np->lock); 3758 nv_disable_hw_interrupts(dev, np->irqmask); 3759 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3760 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3761 } else { 3762 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 3763 } 3764 /* stop engines */ 3765 nv_stop_rx(dev); 3766 nv_stop_tx(dev); 3767 nv_txrx_reset(dev); 3768 /* drain rx queue */ 3769 nv_drain_rx(dev); 3770 nv_drain_tx(dev); 3771 spin_unlock_irq(&np->lock); 3772 netif_tx_unlock_bh(dev); 3773 } 3774 3775 if (!nv_register_test(dev)) { 3776 test->flags |= ETH_TEST_FL_FAILED; 3777 buffer[1] = 1; 3778 } 3779 3780 result = nv_interrupt_test(dev); 3781 if (result != 1) { 3782 test->flags |= ETH_TEST_FL_FAILED; 3783 buffer[2] = 1; 3784 } 3785 if (result == 0) { 3786 /* bail out */ 3787 return; 3788 } 3789 3790 if (!nv_loopback_test(dev)) { 3791 test->flags |= ETH_TEST_FL_FAILED; 3792 buffer[3] = 1; 3793 } 3794 3795 if (netif_running(dev)) { 3796 /* reinit driver view of the rx queue */ 3797 set_bufsize(dev); 3798 if (nv_init_ring(dev)) { 3799 if (!np->in_shutdown) 3800 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3801 } 3802 /* reinit nic view of the rx queue */ 3803 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3804 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3805 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3806 base + NvRegRingSizes); 3807 pci_push(base); 3808 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3809 pci_push(base); 3810 /* restart rx engine */ 3811 nv_start_rx(dev); 3812 nv_start_tx(dev); 3813 netif_start_queue(dev); 3814 nv_enable_hw_interrupts(dev, np->irqmask); 3815 } 3816 } 3817} 3818 3819static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) 3820{ 3821 switch (stringset) { 3822 case ETH_SS_STATS: 3823 memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str)); 3824 break; 3825 case ETH_SS_TEST: 3826 memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str)); 3827 break; 3828 } 3829} 3830 3831static struct ethtool_ops ops = { 3832 .get_drvinfo = nv_get_drvinfo, 3833 .get_link = ethtool_op_get_link, 3834 .get_wol = nv_get_wol, 3835 .set_wol = nv_set_wol, 3836 .get_settings = nv_get_settings, 3837 .set_settings = nv_set_settings, 3838 .get_regs_len = nv_get_regs_len, 3839 .get_regs = nv_get_regs, 3840 .nway_reset = nv_nway_reset, 3841 .get_perm_addr = ethtool_op_get_perm_addr, 3842 .get_tso = ethtool_op_get_tso, 3843 .set_tso = nv_set_tso, 3844 .get_ringparam = nv_get_ringparam, 3845 .set_ringparam = nv_set_ringparam, 3846 .get_pauseparam = nv_get_pauseparam, 3847 .set_pauseparam = nv_set_pauseparam, 3848 .get_rx_csum = nv_get_rx_csum, 3849 .set_rx_csum = nv_set_rx_csum, 3850 .get_tx_csum = ethtool_op_get_tx_csum, 3851 .set_tx_csum = nv_set_tx_csum, 3852 .get_sg = ethtool_op_get_sg, 3853 .set_sg = nv_set_sg, 3854 .get_strings = nv_get_strings, 3855 .get_stats_count = nv_get_stats_count, 3856 .get_ethtool_stats = nv_get_ethtool_stats, 3857 .self_test_count = nv_self_test_count, 3858 .self_test = nv_self_test, 3859}; 3860 3861static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 3862{ 3863 struct fe_priv *np = get_nvpriv(dev); 3864 3865 spin_lock_irq(&np->lock); 3866 3867 /* save vlan group */ 3868 np->vlangrp = grp; 3869 3870 if (grp) { 3871 /* enable vlan on MAC */ 3872 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; 3873 } else { 3874 /* disable vlan on MAC */ 3875 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; 3876 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; 3877 } 3878 3879 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3880 3881 spin_unlock_irq(&np->lock); 3882}; 3883 3884static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 3885{ 3886 /* nothing to do */ 3887}; 3888 3889static int nv_open(struct net_device *dev) 3890{ 3891 struct fe_priv *np = netdev_priv(dev); 3892 u8 __iomem *base = get_hwbase(dev); 3893 int ret = 1; 3894 int oom, i; 3895 3896 dprintk(KERN_DEBUG "nv_open: begin\n"); 3897 3898 /* 1) erase previous misconfiguration */ 3899 if (np->driver_data & DEV_HAS_POWER_CNTRL) 3900 nv_mac_reset(dev); 3901 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */ 3902 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 3903 writel(0, base + NvRegMulticastAddrB); 3904 writel(0, base + NvRegMulticastMaskA); 3905 writel(0, base + NvRegMulticastMaskB); 3906 writel(0, base + NvRegPacketFilterFlags); 3907 3908 writel(0, base + NvRegTransmitterControl); 3909 writel(0, base + NvRegReceiverControl); 3910 3911 writel(0, base + NvRegAdapterControl); 3912 3913 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 3914 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 3915 3916 /* 2) initialize descriptor rings */ 3917 set_bufsize(dev); 3918 oom = nv_init_ring(dev); 3919 3920 writel(0, base + NvRegLinkSpeed); 3921 writel(0, base + NvRegUnknownTransmitterReg); 3922 nv_txrx_reset(dev); 3923 writel(0, base + NvRegUnknownSetupReg6); 3924 3925 np->in_shutdown = 0; 3926 3927 /* 3) set mac address */ 3928 nv_copy_mac_to_hw(dev); 3929 3930 /* 4) give hw rings */ 3931 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3932 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3933 base + NvRegRingSizes); 3934 3935 /* 5) continue setup */ 3936 writel(np->linkspeed, base + NvRegLinkSpeed); 3937 if (np->desc_ver == DESC_VER_1) 3938 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 3939 else 3940 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); 3941 writel(np->txrxctl_bits, base + NvRegTxRxControl); 3942 writel(np->vlanctl_bits, base + NvRegVlanControl); 3943 pci_push(base); 3944 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 3945 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 3946 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 3947 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 3948 3949 writel(0, base + NvRegUnknownSetupReg4); 3950 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3951 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 3952 3953 /* 6) continue setup */ 3954 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 3955 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 3956 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 3957 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3958 3959 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 3960 get_random_bytes(&i, sizeof(i)); 3961 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); 3962 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); 3963 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); 3964 if (poll_interval == -1) { 3965 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 3966 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 3967 else 3968 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 3969 } 3970 else 3971 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 3972 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 3973 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 3974 base + NvRegAdapterControl); 3975 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); 3976 writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4); 3977 if (np->wolenabled) 3978 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 3979 3980 i = readl(base + NvRegPowerState); 3981 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 3982 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 3983 3984 pci_push(base); 3985 udelay(10); 3986 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 3987 3988 nv_disable_hw_interrupts(dev, np->irqmask); 3989 pci_push(base); 3990 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 3991 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3992 pci_push(base); 3993 3994 if (nv_request_irq(dev, 0)) { 3995 goto out_drain; 3996 } 3997 3998 /* ask for interrupts */ 3999 nv_enable_hw_interrupts(dev, np->irqmask); 4000 4001 spin_lock_irq(&np->lock); 4002 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 4003 writel(0, base + NvRegMulticastAddrB); 4004 writel(0, base + NvRegMulticastMaskA); 4005 writel(0, base + NvRegMulticastMaskB); 4006 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 4007 /* One manual link speed update: Interrupts are enabled, future link 4008 * speed changes cause interrupts and are handled by nv_link_irq(). 4009 */ 4010 { 4011 u32 miistat; 4012 miistat = readl(base + NvRegMIIStatus); 4013 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 4014 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); 4015 } 4016 /* set linkspeed to invalid value, thus force nv_update_linkspeed 4017 * to init hw */ 4018 np->linkspeed = 0; 4019 ret = nv_update_linkspeed(dev); 4020 nv_start_rx(dev); 4021 nv_start_tx(dev); 4022 netif_start_queue(dev); 4023 if (ret) { 4024 netif_carrier_on(dev); 4025 } else { 4026 printk("%s: no link during initialization.\n", dev->name); 4027 netif_carrier_off(dev); 4028 } 4029 if (oom) 4030 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4031 4032 /* start statistics timer */ 4033 if (np->driver_data & DEV_HAS_STATISTICS) 4034 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 4035 4036 spin_unlock_irq(&np->lock); 4037 4038 return 0; 4039out_drain: 4040 drain_ring(dev); 4041 return ret; 4042} 4043 4044static int nv_close(struct net_device *dev) 4045{ 4046 struct fe_priv *np = netdev_priv(dev); 4047 u8 __iomem *base; 4048 4049 spin_lock_irq(&np->lock); 4050 np->in_shutdown = 1; 4051 spin_unlock_irq(&np->lock); 4052 synchronize_irq(dev->irq); 4053 4054 del_timer_sync(&np->oom_kick); 4055 del_timer_sync(&np->nic_poll); 4056 del_timer_sync(&np->stats_poll); 4057 4058 netif_stop_queue(dev); 4059 spin_lock_irq(&np->lock); 4060 nv_stop_tx(dev); 4061 nv_stop_rx(dev); 4062 nv_txrx_reset(dev); 4063 4064 /* disable interrupts on the nic or we will lock up */ 4065 base = get_hwbase(dev); 4066 nv_disable_hw_interrupts(dev, np->irqmask); 4067 pci_push(base); 4068 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 4069 4070 spin_unlock_irq(&np->lock); 4071 4072 nv_free_irq(dev); 4073 4074 drain_ring(dev); 4075 4076 if (np->wolenabled) 4077 nv_start_rx(dev); 4078 4079 /* special op: write back the misordered MAC address - otherwise 4080 * the next nv_probe would see a wrong address. 4081 */ 4082 writel(np->orig_mac[0], base + NvRegMacAddrA); 4083 writel(np->orig_mac[1], base + NvRegMacAddrB); 4084 4085 /* FIXME: power down nic */ 4086 4087 return 0; 4088} 4089 4090static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 4091{ 4092 struct net_device *dev; 4093 struct fe_priv *np; 4094 unsigned long addr; 4095 u8 __iomem *base; 4096 int err, i; 4097 u32 powerstate; 4098 4099 dev = alloc_etherdev(sizeof(struct fe_priv)); 4100 err = -ENOMEM; 4101 if (!dev) 4102 goto out; 4103 4104 np = netdev_priv(dev); 4105 np->pci_dev = pci_dev; 4106 spin_lock_init(&np->lock); 4107 SET_MODULE_OWNER(dev); 4108 SET_NETDEV_DEV(dev, &pci_dev->dev); 4109 4110 init_timer(&np->oom_kick); 4111 np->oom_kick.data = (unsigned long) dev; 4112 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ 4113 init_timer(&np->nic_poll); 4114 np->nic_poll.data = (unsigned long) dev; 4115 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ 4116 init_timer(&np->stats_poll); 4117 np->stats_poll.data = (unsigned long) dev; 4118 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ 4119 4120 err = pci_enable_device(pci_dev); 4121 if (err) { 4122 printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n", 4123 err, pci_name(pci_dev)); 4124 goto out_free; 4125 } 4126 4127 pci_set_master(pci_dev); 4128 4129 err = pci_request_regions(pci_dev, DRV_NAME); 4130 if (err < 0) 4131 goto out_disable; 4132 4133 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS)) 4134 np->register_size = NV_PCI_REGSZ_VER2; 4135 else 4136 np->register_size = NV_PCI_REGSZ_VER1; 4137 4138 err = -EINVAL; 4139 addr = 0; 4140 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 4141 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", 4142 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), 4143 pci_resource_len(pci_dev, i), 4144 pci_resource_flags(pci_dev, i)); 4145 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 4146 pci_resource_len(pci_dev, i) >= np->register_size) { 4147 addr = pci_resource_start(pci_dev, i); 4148 break; 4149 } 4150 } 4151 if (i == DEVICE_COUNT_RESOURCE) { 4152 printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n", 4153 pci_name(pci_dev)); 4154 goto out_relreg; 4155 } 4156 4157 /* copy of driver data */ 4158 np->driver_data = id->driver_data; 4159 4160 /* handle different descriptor versions */ 4161 if (id->driver_data & DEV_HAS_HIGH_DMA) { 4162 /* packet format 3: supports 40-bit addressing */ 4163 np->desc_ver = DESC_VER_3; 4164 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 4165 if (dma_64bit) { 4166 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { 4167 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", 4168 pci_name(pci_dev)); 4169 } else { 4170 dev->features |= NETIF_F_HIGHDMA; 4171 printk(KERN_INFO "forcedeth: using HIGHDMA\n"); 4172 } 4173 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) { 4174 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n", 4175 pci_name(pci_dev)); 4176 } 4177 } 4178 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 4179 /* packet format 2: supports jumbo frames */ 4180 np->desc_ver = DESC_VER_2; 4181 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; 4182 } else { 4183 /* original packet format */ 4184 np->desc_ver = DESC_VER_1; 4185 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; 4186 } 4187 4188 np->pkt_limit = NV_PKTLIMIT_1; 4189 if (id->driver_data & DEV_HAS_LARGEDESC) 4190 np->pkt_limit = NV_PKTLIMIT_2; 4191 4192 if (id->driver_data & DEV_HAS_CHECKSUM) { 4193 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4194 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 4195#ifdef NETIF_F_TSO 4196 dev->features |= NETIF_F_TSO; 4197#endif 4198 } 4199 4200 np->vlanctl_bits = 0; 4201 if (id->driver_data & DEV_HAS_VLAN) { 4202 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 4203 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; 4204 dev->vlan_rx_register = nv_vlan_rx_register; 4205 dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid; 4206 } 4207 4208 np->msi_flags = 0; 4209 if ((id->driver_data & DEV_HAS_MSI) && msi) { 4210 np->msi_flags |= NV_MSI_CAPABLE; 4211 } 4212 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 4213 np->msi_flags |= NV_MSI_X_CAPABLE; 4214 } 4215 4216 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 4217 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { 4218 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 4219 } 4220 4221 4222 err = -ENOMEM; 4223 np->base = ioremap(addr, np->register_size); 4224 if (!np->base) 4225 goto out_relreg; 4226 dev->base_addr = (unsigned long)np->base; 4227 4228 dev->irq = pci_dev->irq; 4229 4230 np->rx_ring_size = RX_RING_DEFAULT; 4231 np->tx_ring_size = TX_RING_DEFAULT; 4232 np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE; 4233 np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1; 4234 4235 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4236 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 4237 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 4238 &np->ring_addr); 4239 if (!np->rx_ring.orig) 4240 goto out_unmap; 4241 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4242 } else { 4243 np->rx_ring.ex = pci_alloc_consistent(pci_dev, 4244 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 4245 &np->ring_addr); 4246 if (!np->rx_ring.ex) 4247 goto out_unmap; 4248 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4249 } 4250 np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL); 4251 np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL); 4252 np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL); 4253 np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL); 4254 np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL); 4255 if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len) 4256 goto out_freering; 4257 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); 4258 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); 4259 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); 4260 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); 4261 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); 4262 4263 dev->open = nv_open; 4264 dev->stop = nv_close; 4265 dev->hard_start_xmit = nv_start_xmit; 4266 dev->get_stats = nv_get_stats; 4267 dev->change_mtu = nv_change_mtu; 4268 dev->set_mac_address = nv_set_mac_address; 4269 dev->set_multicast_list = nv_set_multicast; 4270#ifdef CONFIG_NET_POLL_CONTROLLER 4271 dev->poll_controller = nv_poll_controller; 4272#endif 4273 SET_ETHTOOL_OPS(dev, &ops); 4274 dev->tx_timeout = nv_tx_timeout; 4275 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 4276 4277 pci_set_drvdata(pci_dev, dev); 4278 4279 /* read the mac address */ 4280 base = get_hwbase(dev); 4281 np->orig_mac[0] = readl(base + NvRegMacAddrA); 4282 np->orig_mac[1] = readl(base + NvRegMacAddrB); 4283 4284 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 4285 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 4286 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 4287 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 4288 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 4289 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 4290 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 4291 4292 if (!is_valid_ether_addr(dev->perm_addr)) { 4293 /* 4294 * Bad mac address. At least one bios sets the mac address 4295 * to 01:23:45:67:89:ab 4296 */ 4297 printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n", 4298 pci_name(pci_dev), 4299 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 4300 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 4301 printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n"); 4302 dev->dev_addr[0] = 0x00; 4303 dev->dev_addr[1] = 0x00; 4304 dev->dev_addr[2] = 0x6c; 4305 get_random_bytes(&dev->dev_addr[3], 3); 4306 } 4307 4308 dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev), 4309 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 4310 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 4311 4312 /* disable WOL */ 4313 writel(0, base + NvRegWakeUpFlags); 4314 np->wolenabled = 0; 4315 4316 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 4317 u8 revision_id; 4318 pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id); 4319 4320 /* take phy and nic out of low power mode */ 4321 powerstate = readl(base + NvRegPowerState2); 4322 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; 4323 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || 4324 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && 4325 revision_id >= 0xA3) 4326 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; 4327 writel(powerstate, base + NvRegPowerState2); 4328 } 4329 4330 if (np->desc_ver == DESC_VER_1) { 4331 np->tx_flags = NV_TX_VALID; 4332 } else { 4333 np->tx_flags = NV_TX2_VALID; 4334 } 4335 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { 4336 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 4337 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 4338 np->msi_flags |= 0x0003; 4339 } else { 4340 np->irqmask = NVREG_IRQMASK_CPU; 4341 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 4342 np->msi_flags |= 0x0001; 4343 } 4344 4345 if (id->driver_data & DEV_NEED_TIMERIRQ) 4346 np->irqmask |= NVREG_IRQ_TIMER; 4347 if (id->driver_data & DEV_NEED_LINKTIMER) { 4348 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); 4349 np->need_linktimer = 1; 4350 np->link_timeout = jiffies + LINK_TIMEOUT; 4351 } else { 4352 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); 4353 np->need_linktimer = 0; 4354 } 4355 4356 /* find a suitable phy */ 4357 for (i = 1; i <= 32; i++) { 4358 int id1, id2; 4359 int phyaddr = i & 0x1F; 4360 4361 spin_lock_irq(&np->lock); 4362 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); 4363 spin_unlock_irq(&np->lock); 4364 if (id1 < 0 || id1 == 0xffff) 4365 continue; 4366 spin_lock_irq(&np->lock); 4367 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); 4368 spin_unlock_irq(&np->lock); 4369 if (id2 < 0 || id2 == 0xffff) 4370 continue; 4371 4372 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 4373 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 4374 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 4375 pci_name(pci_dev), id1, id2, phyaddr); 4376 np->phyaddr = phyaddr; 4377 np->phy_oui = id1 | id2; 4378 break; 4379 } 4380 if (i == 33) { 4381 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", 4382 pci_name(pci_dev)); 4383 goto out_error; 4384 } 4385 4386 /* reset it */ 4387 phy_init(dev); 4388 4389 /* set default link speed settings */ 4390 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 4391 np->duplex = 0; 4392 np->autoneg = 1; 4393 4394 err = register_netdev(dev); 4395 if (err) { 4396 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); 4397 goto out_error; 4398 } 4399 printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n", 4400 dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device, 4401 pci_name(pci_dev)); 4402 4403 return 0; 4404 4405out_error: 4406 pci_set_drvdata(pci_dev, NULL); 4407out_freering: 4408 free_rings(dev); 4409out_unmap: 4410 iounmap(get_hwbase(dev)); 4411out_relreg: 4412 pci_release_regions(pci_dev); 4413out_disable: 4414 pci_disable_device(pci_dev); 4415out_free: 4416 free_netdev(dev); 4417out: 4418 return err; 4419} 4420 4421static void __devexit nv_remove(struct pci_dev *pci_dev) 4422{ 4423 struct net_device *dev = pci_get_drvdata(pci_dev); 4424 4425 unregister_netdev(dev); 4426 4427 /* free all structures */ 4428 free_rings(dev); 4429 iounmap(get_hwbase(dev)); 4430 pci_release_regions(pci_dev); 4431 pci_disable_device(pci_dev); 4432 free_netdev(dev); 4433 pci_set_drvdata(pci_dev, NULL); 4434} 4435 4436static struct pci_device_id pci_tbl[] = { 4437 { /* nForce Ethernet Controller */ 4438 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), 4439 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 4440 }, 4441 { /* nForce2 Ethernet Controller */ 4442 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), 4443 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 4444 }, 4445 { /* nForce3 Ethernet Controller */ 4446 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), 4447 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 4448 }, 4449 { /* nForce3 Ethernet Controller */ 4450 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), 4451 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 4452 }, 4453 { /* nForce3 Ethernet Controller */ 4454 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), 4455 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 4456 }, 4457 { /* nForce3 Ethernet Controller */ 4458 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), 4459 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 4460 }, 4461 { /* nForce3 Ethernet Controller */ 4462 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), 4463 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 4464 }, 4465 { /* CK804 Ethernet Controller */ 4466 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), 4467 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 4468 }, 4469 { /* CK804 Ethernet Controller */ 4470 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), 4471 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 4472 }, 4473 { /* MCP04 Ethernet Controller */ 4474 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), 4475 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 4476 }, 4477 { /* MCP04 Ethernet Controller */ 4478 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), 4479 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 4480 }, 4481 { /* MCP51 Ethernet Controller */ 4482 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 4483 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, 4484 }, 4485 { /* MCP51 Ethernet Controller */ 4486 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 4487 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, 4488 }, 4489 { /* MCP55 Ethernet Controller */ 4490 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 4491 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, 4492 }, 4493 { /* MCP55 Ethernet Controller */ 4494 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 4495 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, 4496 }, 4497 { /* MCP61 Ethernet Controller */ 4498 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 4499 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, 4500 }, 4501 { /* MCP61 Ethernet Controller */ 4502 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 4503 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, 4504 }, 4505 { /* MCP61 Ethernet Controller */ 4506 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 4507 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, 4508 }, 4509 { /* MCP61 Ethernet Controller */ 4510 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 4511 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, 4512 }, 4513 { /* MCP65 Ethernet Controller */ 4514 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 4515 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, 4516 }, 4517 { /* MCP65 Ethernet Controller */ 4518 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 4519 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, 4520 }, 4521 { /* MCP65 Ethernet Controller */ 4522 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 4523 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, 4524 }, 4525 { /* MCP65 Ethernet Controller */ 4526 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 4527 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, 4528 }, 4529 {0,}, 4530}; 4531 4532static struct pci_driver driver = { 4533 .name = "forcedeth", 4534 .id_table = pci_tbl, 4535 .probe = nv_probe, 4536 .remove = __devexit_p(nv_remove), 4537}; 4538 4539 4540static int __init init_nic(void) 4541{ 4542 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); 4543 return pci_module_init(&driver); 4544} 4545 4546static void __exit exit_nic(void) 4547{ 4548 pci_unregister_driver(&driver); 4549} 4550 4551module_param(max_interrupt_work, int, 0); 4552MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 4553module_param(optimization_mode, int, 0); 4554MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); 4555module_param(poll_interval, int, 0); 4556MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 4557module_param(msi, int, 0); 4558MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); 4559module_param(msix, int, 0); 4560MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); 4561module_param(dma_64bit, int, 0); 4562MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 4563 4564MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 4565MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 4566MODULE_LICENSE("GPL"); 4567 4568MODULE_DEVICE_TABLE(pci, pci_tbl); 4569 4570module_init(init_nic); 4571module_exit(exit_nic);