Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.20-rc7 4992 lines 152 kB view raw
1/* 2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. 3 * 4 * Note: This driver is a cleanroom reimplementation based on reverse 5 * engineered documentation written by Carl-Daniel Hailfinger 6 * and Andrew de Quincey. 7 * 8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered 9 * trademarks of NVIDIA Corporation in the United States and other 10 * countries. 11 * 12 * Copyright (C) 2003,4,5 Manfred Spraul 13 * Copyright (C) 2004 Andrew de Quincey (wol support) 14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 15 * IRQ rate fixes, bigendian fixes, cleanups, verification) 16 * Copyright (c) 2004,5,6 NVIDIA Corporation 17 * 18 * This program is free software; you can redistribute it and/or modify 19 * it under the terms of the GNU General Public License as published by 20 * the Free Software Foundation; either version 2 of the License, or 21 * (at your option) any later version. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, write to the Free Software 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 31 * 32 * Changelog: 33 * 0.01: 05 Oct 2003: First release that compiles without warnings. 34 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs. 35 * Check all PCI BARs for the register window. 36 * udelay added to mii_rw. 37 * 0.03: 06 Oct 2003: Initialize dev->irq. 38 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks. 39 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout. 40 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated, 41 * irq mask updated 42 * 0.07: 14 Oct 2003: Further irq mask updates. 43 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill 44 * added into irq handler, NULL check for drain_ring. 45 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the 46 * requested interrupt sources. 47 * 0.10: 20 Oct 2003: First cleanup for release. 48 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased. 49 * MAC Address init fix, set_multicast cleanup. 50 * 0.12: 23 Oct 2003: Cleanups for release. 51 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10. 52 * Set link speed correctly. start rx before starting 53 * tx (nv_start_rx sets the link speed). 54 * 0.14: 25 Oct 2003: Nic dependant irq mask. 55 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during 56 * open. 57 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size 58 * increased to 1628 bytes. 59 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from 60 * the tx length. 61 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats 62 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac 63 * addresses, really stop rx if already running 64 * in nv_start_rx, clean up a bit. 65 * 0.20: 07 Dec 2003: alloc fixes 66 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix. 67 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup 68 * on close. 69 * 0.23: 26 Jan 2004: various small cleanups 70 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces 71 * 0.25: 09 Mar 2004: wol support 72 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes 73 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings, 74 * added CK804/MCP04 device IDs, code fixes 75 * for registers, link status and other minor fixes. 76 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe 77 * 0.29: 31 Aug 2004: Add backup timer for link change notification. 78 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset 79 * into nv_close, otherwise reenabling for wol can 80 * cause DMA to kfree'd memory. 81 * 0.31: 14 Nov 2004: ethtool support for getting/setting link 82 * capabilities. 83 * 0.32: 16 Apr 2005: RX_ERROR4 handling added. 84 * 0.33: 16 May 2005: Support for MCP51 added. 85 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. 86 * 0.35: 26 Jun 2005: Support for MCP55 added. 87 * 0.36: 28 Jun 2005: Add jumbo frame support. 88 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list 89 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of 90 * per-packet flags. 91 * 0.39: 18 Jul 2005: Add 64bit descriptor support. 92 * 0.40: 19 Jul 2005: Add support for mac address change. 93 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead 94 * of nv_remove 95 * 0.42: 06 Aug 2005: Fix lack of link speed initialization 96 * in the second (and later) nv_open call 97 * 0.43: 10 Aug 2005: Add support for tx checksum. 98 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation. 99 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check 100 * 0.46: 20 Oct 2005: Add irq optimization modes. 101 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. 102 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single 103 * 0.49: 10 Dec 2005: Fix tso for large buffers. 104 * 0.50: 20 Jan 2006: Add 8021pq tagging support. 105 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. 106 * 0.52: 20 Jan 2006: Add MSI/MSIX support. 107 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. 108 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. 109 * 0.55: 22 Mar 2006: Add flow control (pause frame). 110 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. 111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections. 112 * 0.58: 30 Oct 2006: Added support for sideband management unit. 113 * 0.59: 30 Oct 2006: Added support for recoverable error. 114 * 115 * Known bugs: 116 * We suspect that on some hardware no TX done interrupts are generated. 117 * This means recovery from netif_stop_queue only happens if the hw timer 118 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) 119 * and the timer is active in the IRQMask, or if a rx packet arrives by chance. 120 * If your hardware reliably generates tx done interrupts, then you can remove 121 * DEV_NEED_TIMERIRQ from the driver_data flags. 122 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 123 * superfluous timer interrupts from the nic. 124 */ 125#ifdef CONFIG_FORCEDETH_NAPI 126#define DRIVERNAPI "-NAPI" 127#else 128#define DRIVERNAPI 129#endif 130#define FORCEDETH_VERSION "0.59" 131#define DRV_NAME "forcedeth" 132 133#include <linux/module.h> 134#include <linux/types.h> 135#include <linux/pci.h> 136#include <linux/interrupt.h> 137#include <linux/netdevice.h> 138#include <linux/etherdevice.h> 139#include <linux/delay.h> 140#include <linux/spinlock.h> 141#include <linux/ethtool.h> 142#include <linux/timer.h> 143#include <linux/skbuff.h> 144#include <linux/mii.h> 145#include <linux/random.h> 146#include <linux/init.h> 147#include <linux/if_vlan.h> 148#include <linux/dma-mapping.h> 149 150#include <asm/irq.h> 151#include <asm/io.h> 152#include <asm/uaccess.h> 153#include <asm/system.h> 154 155#if 0 156#define dprintk printk 157#else 158#define dprintk(x...) do { } while (0) 159#endif 160 161 162/* 163 * Hardware access: 164 */ 165 166#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ 167#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ 168#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ 169#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ 170#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ 171#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ 172#define DEV_HAS_MSI 0x0040 /* device supports MSI */ 173#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ 174#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ 175#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ 176#define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */ 177#define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */ 178#define DEV_HAS_MGMT_UNIT 0x1000 /* device supports management unit */ 179 180enum { 181 NvRegIrqStatus = 0x000, 182#define NVREG_IRQSTAT_MIIEVENT 0x040 183#define NVREG_IRQSTAT_MASK 0x81ff 184 NvRegIrqMask = 0x004, 185#define NVREG_IRQ_RX_ERROR 0x0001 186#define NVREG_IRQ_RX 0x0002 187#define NVREG_IRQ_RX_NOBUF 0x0004 188#define NVREG_IRQ_TX_ERR 0x0008 189#define NVREG_IRQ_TX_OK 0x0010 190#define NVREG_IRQ_TIMER 0x0020 191#define NVREG_IRQ_LINK 0x0040 192#define NVREG_IRQ_RX_FORCED 0x0080 193#define NVREG_IRQ_TX_FORCED 0x0100 194#define NVREG_IRQ_RECOVER_ERROR 0x8000 195#define NVREG_IRQMASK_THROUGHPUT 0x00df 196#define NVREG_IRQMASK_CPU 0x0040 197#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) 198#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) 199#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) 200 201#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ 202 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ 203 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR)) 204 205 NvRegUnknownSetupReg6 = 0x008, 206#define NVREG_UNKSETUP6_VAL 3 207 208/* 209 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic 210 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 211 */ 212 NvRegPollingInterval = 0x00c, 213#define NVREG_POLL_DEFAULT_THROUGHPUT 970 214#define NVREG_POLL_DEFAULT_CPU 13 215 NvRegMSIMap0 = 0x020, 216 NvRegMSIMap1 = 0x024, 217 NvRegMSIIrqMask = 0x030, 218#define NVREG_MSI_VECTOR_0_ENABLED 0x01 219 NvRegMisc1 = 0x080, 220#define NVREG_MISC1_PAUSE_TX 0x01 221#define NVREG_MISC1_HD 0x02 222#define NVREG_MISC1_FORCE 0x3b0f3c 223 224 NvRegMacReset = 0x3c, 225#define NVREG_MAC_RESET_ASSERT 0x0F3 226 NvRegTransmitterControl = 0x084, 227#define NVREG_XMITCTL_START 0x01 228#define NVREG_XMITCTL_MGMT_ST 0x40000000 229#define NVREG_XMITCTL_SYNC_MASK 0x000f0000 230#define NVREG_XMITCTL_SYNC_NOT_READY 0x0 231#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 232#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 233#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 234#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 235#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 236#define NVREG_XMITCTL_HOST_LOADED 0x00004000 237#define NVREG_XMITCTL_TX_PATH_EN 0x01000000 238 NvRegTransmitterStatus = 0x088, 239#define NVREG_XMITSTAT_BUSY 0x01 240 241 NvRegPacketFilterFlags = 0x8c, 242#define NVREG_PFF_PAUSE_RX 0x08 243#define NVREG_PFF_ALWAYS 0x7F0000 244#define NVREG_PFF_PROMISC 0x80 245#define NVREG_PFF_MYADDR 0x20 246#define NVREG_PFF_LOOPBACK 0x10 247 248 NvRegOffloadConfig = 0x90, 249#define NVREG_OFFLOAD_HOMEPHY 0x601 250#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 251 NvRegReceiverControl = 0x094, 252#define NVREG_RCVCTL_START 0x01 253#define NVREG_RCVCTL_RX_PATH_EN 0x01000000 254 NvRegReceiverStatus = 0x98, 255#define NVREG_RCVSTAT_BUSY 0x01 256 257 NvRegRandomSeed = 0x9c, 258#define NVREG_RNDSEED_MASK 0x00ff 259#define NVREG_RNDSEED_FORCE 0x7f00 260#define NVREG_RNDSEED_FORCE2 0x2d00 261#define NVREG_RNDSEED_FORCE3 0x7400 262 263 NvRegTxDeferral = 0xA0, 264#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 265#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 266#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 267 NvRegRxDeferral = 0xA4, 268#define NVREG_RX_DEFERRAL_DEFAULT 0x16 269 NvRegMacAddrA = 0xA8, 270 NvRegMacAddrB = 0xAC, 271 NvRegMulticastAddrA = 0xB0, 272#define NVREG_MCASTADDRA_FORCE 0x01 273 NvRegMulticastAddrB = 0xB4, 274 NvRegMulticastMaskA = 0xB8, 275 NvRegMulticastMaskB = 0xBC, 276 277 NvRegPhyInterface = 0xC0, 278#define PHY_RGMII 0x10000000 279 280 NvRegTxRingPhysAddr = 0x100, 281 NvRegRxRingPhysAddr = 0x104, 282 NvRegRingSizes = 0x108, 283#define NVREG_RINGSZ_TXSHIFT 0 284#define NVREG_RINGSZ_RXSHIFT 16 285 NvRegTransmitPoll = 0x10c, 286#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 287 NvRegLinkSpeed = 0x110, 288#define NVREG_LINKSPEED_FORCE 0x10000 289#define NVREG_LINKSPEED_10 1000 290#define NVREG_LINKSPEED_100 100 291#define NVREG_LINKSPEED_1000 50 292#define NVREG_LINKSPEED_MASK (0xFFF) 293 NvRegUnknownSetupReg5 = 0x130, 294#define NVREG_UNKSETUP5_BIT31 (1<<31) 295 NvRegTxWatermark = 0x13c, 296#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 297#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 298#define NVREG_TX_WM_DESC2_3_1000 0xfe08000 299 NvRegTxRxControl = 0x144, 300#define NVREG_TXRXCTL_KICK 0x0001 301#define NVREG_TXRXCTL_BIT1 0x0002 302#define NVREG_TXRXCTL_BIT2 0x0004 303#define NVREG_TXRXCTL_IDLE 0x0008 304#define NVREG_TXRXCTL_RESET 0x0010 305#define NVREG_TXRXCTL_RXCHECK 0x0400 306#define NVREG_TXRXCTL_DESC_1 0 307#define NVREG_TXRXCTL_DESC_2 0x02100 308#define NVREG_TXRXCTL_DESC_3 0x02200 309#define NVREG_TXRXCTL_VLANSTRIP 0x00040 310#define NVREG_TXRXCTL_VLANINS 0x00080 311 NvRegTxRingPhysAddrHigh = 0x148, 312 NvRegRxRingPhysAddrHigh = 0x14C, 313 NvRegTxPauseFrame = 0x170, 314#define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080 315#define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030 316 NvRegMIIStatus = 0x180, 317#define NVREG_MIISTAT_ERROR 0x0001 318#define NVREG_MIISTAT_LINKCHANGE 0x0008 319#define NVREG_MIISTAT_MASK 0x000f 320#define NVREG_MIISTAT_MASK2 0x000f 321 NvRegMIIMask = 0x184, 322#define NVREG_MII_LINKCHANGE 0x0008 323 324 NvRegAdapterControl = 0x188, 325#define NVREG_ADAPTCTL_START 0x02 326#define NVREG_ADAPTCTL_LINKUP 0x04 327#define NVREG_ADAPTCTL_PHYVALID 0x40000 328#define NVREG_ADAPTCTL_RUNNING 0x100000 329#define NVREG_ADAPTCTL_PHYSHIFT 24 330 NvRegMIISpeed = 0x18c, 331#define NVREG_MIISPEED_BIT8 (1<<8) 332#define NVREG_MIIDELAY 5 333 NvRegMIIControl = 0x190, 334#define NVREG_MIICTL_INUSE 0x08000 335#define NVREG_MIICTL_WRITE 0x00400 336#define NVREG_MIICTL_ADDRSHIFT 5 337 NvRegMIIData = 0x194, 338 NvRegWakeUpFlags = 0x200, 339#define NVREG_WAKEUPFLAGS_VAL 0x7770 340#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 341#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 342#define NVREG_WAKEUPFLAGS_D3SHIFT 12 343#define NVREG_WAKEUPFLAGS_D2SHIFT 8 344#define NVREG_WAKEUPFLAGS_D1SHIFT 4 345#define NVREG_WAKEUPFLAGS_D0SHIFT 0 346#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 347#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 348#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 349#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 350 351 NvRegPatternCRC = 0x204, 352 NvRegPatternMask = 0x208, 353 NvRegPowerCap = 0x268, 354#define NVREG_POWERCAP_D3SUPP (1<<30) 355#define NVREG_POWERCAP_D2SUPP (1<<26) 356#define NVREG_POWERCAP_D1SUPP (1<<25) 357 NvRegPowerState = 0x26c, 358#define NVREG_POWERSTATE_POWEREDUP 0x8000 359#define NVREG_POWERSTATE_VALID 0x0100 360#define NVREG_POWERSTATE_MASK 0x0003 361#define NVREG_POWERSTATE_D0 0x0000 362#define NVREG_POWERSTATE_D1 0x0001 363#define NVREG_POWERSTATE_D2 0x0002 364#define NVREG_POWERSTATE_D3 0x0003 365 NvRegTxCnt = 0x280, 366 NvRegTxZeroReXmt = 0x284, 367 NvRegTxOneReXmt = 0x288, 368 NvRegTxManyReXmt = 0x28c, 369 NvRegTxLateCol = 0x290, 370 NvRegTxUnderflow = 0x294, 371 NvRegTxLossCarrier = 0x298, 372 NvRegTxExcessDef = 0x29c, 373 NvRegTxRetryErr = 0x2a0, 374 NvRegRxFrameErr = 0x2a4, 375 NvRegRxExtraByte = 0x2a8, 376 NvRegRxLateCol = 0x2ac, 377 NvRegRxRunt = 0x2b0, 378 NvRegRxFrameTooLong = 0x2b4, 379 NvRegRxOverflow = 0x2b8, 380 NvRegRxFCSErr = 0x2bc, 381 NvRegRxFrameAlignErr = 0x2c0, 382 NvRegRxLenErr = 0x2c4, 383 NvRegRxUnicast = 0x2c8, 384 NvRegRxMulticast = 0x2cc, 385 NvRegRxBroadcast = 0x2d0, 386 NvRegTxDef = 0x2d4, 387 NvRegTxFrame = 0x2d8, 388 NvRegRxCnt = 0x2dc, 389 NvRegTxPause = 0x2e0, 390 NvRegRxPause = 0x2e4, 391 NvRegRxDropFrame = 0x2e8, 392 NvRegVlanControl = 0x300, 393#define NVREG_VLANCONTROL_ENABLE 0x2000 394 NvRegMSIXMap0 = 0x3e0, 395 NvRegMSIXMap1 = 0x3e4, 396 NvRegMSIXIrqStatus = 0x3f0, 397 398 NvRegPowerState2 = 0x600, 399#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11 400#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 401}; 402 403/* Big endian: should work, but is untested */ 404struct ring_desc { 405 __le32 buf; 406 __le32 flaglen; 407}; 408 409struct ring_desc_ex { 410 __le32 bufhigh; 411 __le32 buflow; 412 __le32 txvlan; 413 __le32 flaglen; 414}; 415 416union ring_type { 417 struct ring_desc* orig; 418 struct ring_desc_ex* ex; 419}; 420 421#define FLAG_MASK_V1 0xffff0000 422#define FLAG_MASK_V2 0xffffc000 423#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 424#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) 425 426#define NV_TX_LASTPACKET (1<<16) 427#define NV_TX_RETRYERROR (1<<19) 428#define NV_TX_FORCED_INTERRUPT (1<<24) 429#define NV_TX_DEFERRED (1<<26) 430#define NV_TX_CARRIERLOST (1<<27) 431#define NV_TX_LATECOLLISION (1<<28) 432#define NV_TX_UNDERFLOW (1<<29) 433#define NV_TX_ERROR (1<<30) 434#define NV_TX_VALID (1<<31) 435 436#define NV_TX2_LASTPACKET (1<<29) 437#define NV_TX2_RETRYERROR (1<<18) 438#define NV_TX2_FORCED_INTERRUPT (1<<30) 439#define NV_TX2_DEFERRED (1<<25) 440#define NV_TX2_CARRIERLOST (1<<26) 441#define NV_TX2_LATECOLLISION (1<<27) 442#define NV_TX2_UNDERFLOW (1<<28) 443/* error and valid are the same for both */ 444#define NV_TX2_ERROR (1<<30) 445#define NV_TX2_VALID (1<<31) 446#define NV_TX2_TSO (1<<28) 447#define NV_TX2_TSO_SHIFT 14 448#define NV_TX2_TSO_MAX_SHIFT 14 449#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) 450#define NV_TX2_CHECKSUM_L3 (1<<27) 451#define NV_TX2_CHECKSUM_L4 (1<<26) 452 453#define NV_TX3_VLAN_TAG_PRESENT (1<<18) 454 455#define NV_RX_DESCRIPTORVALID (1<<16) 456#define NV_RX_MISSEDFRAME (1<<17) 457#define NV_RX_SUBSTRACT1 (1<<18) 458#define NV_RX_ERROR1 (1<<23) 459#define NV_RX_ERROR2 (1<<24) 460#define NV_RX_ERROR3 (1<<25) 461#define NV_RX_ERROR4 (1<<26) 462#define NV_RX_CRCERR (1<<27) 463#define NV_RX_OVERFLOW (1<<28) 464#define NV_RX_FRAMINGERR (1<<29) 465#define NV_RX_ERROR (1<<30) 466#define NV_RX_AVAIL (1<<31) 467 468#define NV_RX2_CHECKSUMMASK (0x1C000000) 469#define NV_RX2_CHECKSUMOK1 (0x10000000) 470#define NV_RX2_CHECKSUMOK2 (0x14000000) 471#define NV_RX2_CHECKSUMOK3 (0x18000000) 472#define NV_RX2_DESCRIPTORVALID (1<<29) 473#define NV_RX2_SUBSTRACT1 (1<<25) 474#define NV_RX2_ERROR1 (1<<18) 475#define NV_RX2_ERROR2 (1<<19) 476#define NV_RX2_ERROR3 (1<<20) 477#define NV_RX2_ERROR4 (1<<21) 478#define NV_RX2_CRCERR (1<<22) 479#define NV_RX2_OVERFLOW (1<<23) 480#define NV_RX2_FRAMINGERR (1<<24) 481/* error and avail are the same for both */ 482#define NV_RX2_ERROR (1<<30) 483#define NV_RX2_AVAIL (1<<31) 484 485#define NV_RX3_VLAN_TAG_PRESENT (1<<16) 486#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 487 488/* Miscelaneous hardware related defines: */ 489#define NV_PCI_REGSZ_VER1 0x270 490#define NV_PCI_REGSZ_VER2 0x604 491 492/* various timeout delays: all in usec */ 493#define NV_TXRX_RESET_DELAY 4 494#define NV_TXSTOP_DELAY1 10 495#define NV_TXSTOP_DELAY1MAX 500000 496#define NV_TXSTOP_DELAY2 100 497#define NV_RXSTOP_DELAY1 10 498#define NV_RXSTOP_DELAY1MAX 500000 499#define NV_RXSTOP_DELAY2 100 500#define NV_SETUP5_DELAY 5 501#define NV_SETUP5_DELAYMAX 50000 502#define NV_POWERUP_DELAY 5 503#define NV_POWERUP_DELAYMAX 5000 504#define NV_MIIBUSY_DELAY 50 505#define NV_MIIPHY_DELAY 10 506#define NV_MIIPHY_DELAYMAX 10000 507#define NV_MAC_RESET_DELAY 64 508 509#define NV_WAKEUPPATTERNS 5 510#define NV_WAKEUPMASKENTRIES 4 511 512/* General driver defaults */ 513#define NV_WATCHDOG_TIMEO (5*HZ) 514 515#define RX_RING_DEFAULT 128 516#define TX_RING_DEFAULT 256 517#define RX_RING_MIN 128 518#define TX_RING_MIN 64 519#define RING_MAX_DESC_VER_1 1024 520#define RING_MAX_DESC_VER_2_3 16384 521/* 522 * Difference between the get and put pointers for the tx ring. 523 * This is used to throttle the amount of data outstanding in the 524 * tx ring. 525 */ 526#define TX_LIMIT_DIFFERENCE 1 527 528/* rx/tx mac addr + type + vlan + align + slack*/ 529#define NV_RX_HEADERS (64) 530/* even more slack. */ 531#define NV_RX_ALLOC_PAD (64) 532 533/* maximum mtu size */ 534#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ 535#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ 536 537#define OOM_REFILL (1+HZ/20) 538#define POLL_WAIT (1+HZ/100) 539#define LINK_TIMEOUT (3*HZ) 540#define STATS_INTERVAL (10*HZ) 541 542/* 543 * desc_ver values: 544 * The nic supports three different descriptor types: 545 * - DESC_VER_1: Original 546 * - DESC_VER_2: support for jumbo frames. 547 * - DESC_VER_3: 64-bit format. 548 */ 549#define DESC_VER_1 1 550#define DESC_VER_2 2 551#define DESC_VER_3 3 552 553/* PHY defines */ 554#define PHY_OUI_MARVELL 0x5043 555#define PHY_OUI_CICADA 0x03f1 556#define PHYID1_OUI_MASK 0x03ff 557#define PHYID1_OUI_SHFT 6 558#define PHYID2_OUI_MASK 0xfc00 559#define PHYID2_OUI_SHFT 10 560#define PHYID2_MODEL_MASK 0x03f0 561#define PHY_MODEL_MARVELL_E3016 0x220 562#define PHY_MARVELL_E3016_INITMASK 0x0300 563#define PHY_INIT1 0x0f000 564#define PHY_INIT2 0x0e00 565#define PHY_INIT3 0x01000 566#define PHY_INIT4 0x0200 567#define PHY_INIT5 0x0004 568#define PHY_INIT6 0x02000 569#define PHY_GIGABIT 0x0100 570 571#define PHY_TIMEOUT 0x1 572#define PHY_ERROR 0x2 573 574#define PHY_100 0x1 575#define PHY_1000 0x2 576#define PHY_HALF 0x100 577 578#define NV_PAUSEFRAME_RX_CAPABLE 0x0001 579#define NV_PAUSEFRAME_TX_CAPABLE 0x0002 580#define NV_PAUSEFRAME_RX_ENABLE 0x0004 581#define NV_PAUSEFRAME_TX_ENABLE 0x0008 582#define NV_PAUSEFRAME_RX_REQ 0x0010 583#define NV_PAUSEFRAME_TX_REQ 0x0020 584#define NV_PAUSEFRAME_AUTONEG 0x0040 585 586/* MSI/MSI-X defines */ 587#define NV_MSI_X_MAX_VECTORS 8 588#define NV_MSI_X_VECTORS_MASK 0x000f 589#define NV_MSI_CAPABLE 0x0010 590#define NV_MSI_X_CAPABLE 0x0020 591#define NV_MSI_ENABLED 0x0040 592#define NV_MSI_X_ENABLED 0x0080 593 594#define NV_MSI_X_VECTOR_ALL 0x0 595#define NV_MSI_X_VECTOR_RX 0x0 596#define NV_MSI_X_VECTOR_TX 0x1 597#define NV_MSI_X_VECTOR_OTHER 0x2 598 599/* statistics */ 600struct nv_ethtool_str { 601 char name[ETH_GSTRING_LEN]; 602}; 603 604static const struct nv_ethtool_str nv_estats_str[] = { 605 { "tx_bytes" }, 606 { "tx_zero_rexmt" }, 607 { "tx_one_rexmt" }, 608 { "tx_many_rexmt" }, 609 { "tx_late_collision" }, 610 { "tx_fifo_errors" }, 611 { "tx_carrier_errors" }, 612 { "tx_excess_deferral" }, 613 { "tx_retry_error" }, 614 { "tx_deferral" }, 615 { "tx_packets" }, 616 { "tx_pause" }, 617 { "rx_frame_error" }, 618 { "rx_extra_byte" }, 619 { "rx_late_collision" }, 620 { "rx_runt" }, 621 { "rx_frame_too_long" }, 622 { "rx_over_errors" }, 623 { "rx_crc_errors" }, 624 { "rx_frame_align_error" }, 625 { "rx_length_error" }, 626 { "rx_unicast" }, 627 { "rx_multicast" }, 628 { "rx_broadcast" }, 629 { "rx_bytes" }, 630 { "rx_pause" }, 631 { "rx_drop_frame" }, 632 { "rx_packets" }, 633 { "rx_errors_total" } 634}; 635 636struct nv_ethtool_stats { 637 u64 tx_bytes; 638 u64 tx_zero_rexmt; 639 u64 tx_one_rexmt; 640 u64 tx_many_rexmt; 641 u64 tx_late_collision; 642 u64 tx_fifo_errors; 643 u64 tx_carrier_errors; 644 u64 tx_excess_deferral; 645 u64 tx_retry_error; 646 u64 tx_deferral; 647 u64 tx_packets; 648 u64 tx_pause; 649 u64 rx_frame_error; 650 u64 rx_extra_byte; 651 u64 rx_late_collision; 652 u64 rx_runt; 653 u64 rx_frame_too_long; 654 u64 rx_over_errors; 655 u64 rx_crc_errors; 656 u64 rx_frame_align_error; 657 u64 rx_length_error; 658 u64 rx_unicast; 659 u64 rx_multicast; 660 u64 rx_broadcast; 661 u64 rx_bytes; 662 u64 rx_pause; 663 u64 rx_drop_frame; 664 u64 rx_packets; 665 u64 rx_errors_total; 666}; 667 668/* diagnostics */ 669#define NV_TEST_COUNT_BASE 3 670#define NV_TEST_COUNT_EXTENDED 4 671 672static const struct nv_ethtool_str nv_etests_str[] = { 673 { "link (online/offline)" }, 674 { "register (offline) " }, 675 { "interrupt (offline) " }, 676 { "loopback (offline) " } 677}; 678 679struct register_test { 680 __le32 reg; 681 __le32 mask; 682}; 683 684static const struct register_test nv_registers_test[] = { 685 { NvRegUnknownSetupReg6, 0x01 }, 686 { NvRegMisc1, 0x03c }, 687 { NvRegOffloadConfig, 0x03ff }, 688 { NvRegMulticastAddrA, 0xffffffff }, 689 { NvRegTxWatermark, 0x0ff }, 690 { NvRegWakeUpFlags, 0x07777 }, 691 { 0,0 } 692}; 693 694/* 695 * SMP locking: 696 * All hardware access under dev->priv->lock, except the performance 697 * critical parts: 698 * - rx is (pseudo-) lockless: it relies on the single-threading provided 699 * by the arch code for interrupts. 700 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 701 * needs dev->priv->lock :-( 702 * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 703 */ 704 705/* in dev: base, irq */ 706struct fe_priv { 707 spinlock_t lock; 708 709 /* General data: 710 * Locking: spin_lock(&np->lock); */ 711 struct net_device_stats stats; 712 struct nv_ethtool_stats estats; 713 int in_shutdown; 714 u32 linkspeed; 715 int duplex; 716 int autoneg; 717 int fixed_mode; 718 int phyaddr; 719 int wolenabled; 720 unsigned int phy_oui; 721 unsigned int phy_model; 722 u16 gigabit; 723 int intr_test; 724 int recover_error; 725 726 /* General data: RO fields */ 727 dma_addr_t ring_addr; 728 struct pci_dev *pci_dev; 729 u32 orig_mac[2]; 730 u32 irqmask; 731 u32 desc_ver; 732 u32 txrxctl_bits; 733 u32 vlanctl_bits; 734 u32 driver_data; 735 u32 register_size; 736 int rx_csum; 737 u32 mac_in_use; 738 739 void __iomem *base; 740 741 /* rx specific fields. 742 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 743 */ 744 union ring_type rx_ring; 745 unsigned int cur_rx, refill_rx; 746 struct sk_buff **rx_skbuff; 747 dma_addr_t *rx_dma; 748 unsigned int rx_buf_sz; 749 unsigned int pkt_limit; 750 struct timer_list oom_kick; 751 struct timer_list nic_poll; 752 struct timer_list stats_poll; 753 u32 nic_poll_irq; 754 int rx_ring_size; 755 756 /* media detection workaround. 757 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 758 */ 759 int need_linktimer; 760 unsigned long link_timeout; 761 /* 762 * tx specific fields. 763 */ 764 union ring_type tx_ring; 765 unsigned int next_tx, nic_tx; 766 struct sk_buff **tx_skbuff; 767 dma_addr_t *tx_dma; 768 unsigned int *tx_dma_len; 769 u32 tx_flags; 770 int tx_ring_size; 771 int tx_limit_start; 772 int tx_limit_stop; 773 774 /* vlan fields */ 775 struct vlan_group *vlangrp; 776 777 /* msi/msi-x fields */ 778 u32 msi_flags; 779 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; 780 781 /* flow control */ 782 u32 pause_flags; 783}; 784 785/* 786 * Maximum number of loops until we assume that a bit in the irq mask 787 * is stuck. Overridable with module param. 788 */ 789static int max_interrupt_work = 5; 790 791/* 792 * Optimization can be either throuput mode or cpu mode 793 * 794 * Throughput Mode: Every tx and rx packet will generate an interrupt. 795 * CPU Mode: Interrupts are controlled by a timer. 796 */ 797enum { 798 NV_OPTIMIZATION_MODE_THROUGHPUT, 799 NV_OPTIMIZATION_MODE_CPU 800}; 801static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; 802 803/* 804 * Poll interval for timer irq 805 * 806 * This interval determines how frequent an interrupt is generated. 807 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] 808 * Min = 0, and Max = 65535 809 */ 810static int poll_interval = -1; 811 812/* 813 * MSI interrupts 814 */ 815enum { 816 NV_MSI_INT_DISABLED, 817 NV_MSI_INT_ENABLED 818}; 819static int msi = NV_MSI_INT_ENABLED; 820 821/* 822 * MSIX interrupts 823 */ 824enum { 825 NV_MSIX_INT_DISABLED, 826 NV_MSIX_INT_ENABLED 827}; 828static int msix = NV_MSIX_INT_ENABLED; 829 830/* 831 * DMA 64bit 832 */ 833enum { 834 NV_DMA_64BIT_DISABLED, 835 NV_DMA_64BIT_ENABLED 836}; 837static int dma_64bit = NV_DMA_64BIT_ENABLED; 838 839static inline struct fe_priv *get_nvpriv(struct net_device *dev) 840{ 841 return netdev_priv(dev); 842} 843 844static inline u8 __iomem *get_hwbase(struct net_device *dev) 845{ 846 return ((struct fe_priv *)netdev_priv(dev))->base; 847} 848 849static inline void pci_push(u8 __iomem *base) 850{ 851 /* force out pending posted writes */ 852 readl(base); 853} 854 855static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 856{ 857 return le32_to_cpu(prd->flaglen) 858 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 859} 860 861static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 862{ 863 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; 864} 865 866static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 867 int delay, int delaymax, const char *msg) 868{ 869 u8 __iomem *base = get_hwbase(dev); 870 871 pci_push(base); 872 do { 873 udelay(delay); 874 delaymax -= delay; 875 if (delaymax < 0) { 876 if (msg) 877 printk(msg); 878 return 1; 879 } 880 } while ((readl(base + offset) & mask) != target); 881 return 0; 882} 883 884#define NV_SETUP_RX_RING 0x01 885#define NV_SETUP_TX_RING 0x02 886 887static void setup_hw_rings(struct net_device *dev, int rxtx_flags) 888{ 889 struct fe_priv *np = get_nvpriv(dev); 890 u8 __iomem *base = get_hwbase(dev); 891 892 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 893 if (rxtx_flags & NV_SETUP_RX_RING) { 894 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); 895 } 896 if (rxtx_flags & NV_SETUP_TX_RING) { 897 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 898 } 899 } else { 900 if (rxtx_flags & NV_SETUP_RX_RING) { 901 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); 902 writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); 903 } 904 if (rxtx_flags & NV_SETUP_TX_RING) { 905 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); 906 writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); 907 } 908 } 909} 910 911static void free_rings(struct net_device *dev) 912{ 913 struct fe_priv *np = get_nvpriv(dev); 914 915 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 916 if (np->rx_ring.orig) 917 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 918 np->rx_ring.orig, np->ring_addr); 919 } else { 920 if (np->rx_ring.ex) 921 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 922 np->rx_ring.ex, np->ring_addr); 923 } 924 if (np->rx_skbuff) 925 kfree(np->rx_skbuff); 926 if (np->rx_dma) 927 kfree(np->rx_dma); 928 if (np->tx_skbuff) 929 kfree(np->tx_skbuff); 930 if (np->tx_dma) 931 kfree(np->tx_dma); 932 if (np->tx_dma_len) 933 kfree(np->tx_dma_len); 934} 935 936static int using_multi_irqs(struct net_device *dev) 937{ 938 struct fe_priv *np = get_nvpriv(dev); 939 940 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 941 ((np->msi_flags & NV_MSI_X_ENABLED) && 942 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) 943 return 0; 944 else 945 return 1; 946} 947 948static void nv_enable_irq(struct net_device *dev) 949{ 950 struct fe_priv *np = get_nvpriv(dev); 951 952 if (!using_multi_irqs(dev)) { 953 if (np->msi_flags & NV_MSI_X_ENABLED) 954 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 955 else 956 enable_irq(dev->irq); 957 } else { 958 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 959 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 960 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 961 } 962} 963 964static void nv_disable_irq(struct net_device *dev) 965{ 966 struct fe_priv *np = get_nvpriv(dev); 967 968 if (!using_multi_irqs(dev)) { 969 if (np->msi_flags & NV_MSI_X_ENABLED) 970 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 971 else 972 disable_irq(dev->irq); 973 } else { 974 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 975 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 976 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 977 } 978} 979 980/* In MSIX mode, a write to irqmask behaves as XOR */ 981static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) 982{ 983 u8 __iomem *base = get_hwbase(dev); 984 985 writel(mask, base + NvRegIrqMask); 986} 987 988static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) 989{ 990 struct fe_priv *np = get_nvpriv(dev); 991 u8 __iomem *base = get_hwbase(dev); 992 993 if (np->msi_flags & NV_MSI_X_ENABLED) { 994 writel(mask, base + NvRegIrqMask); 995 } else { 996 if (np->msi_flags & NV_MSI_ENABLED) 997 writel(0, base + NvRegMSIIrqMask); 998 writel(0, base + NvRegIrqMask); 999 } 1000} 1001 1002#define MII_READ (-1) 1003/* mii_rw: read/write a register on the PHY. 1004 * 1005 * Caller must guarantee serialization 1006 */ 1007static int mii_rw(struct net_device *dev, int addr, int miireg, int value) 1008{ 1009 u8 __iomem *base = get_hwbase(dev); 1010 u32 reg; 1011 int retval; 1012 1013 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 1014 1015 reg = readl(base + NvRegMIIControl); 1016 if (reg & NVREG_MIICTL_INUSE) { 1017 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); 1018 udelay(NV_MIIBUSY_DELAY); 1019 } 1020 1021 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; 1022 if (value != MII_READ) { 1023 writel(value, base + NvRegMIIData); 1024 reg |= NVREG_MIICTL_WRITE; 1025 } 1026 writel(reg, base + NvRegMIIControl); 1027 1028 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1029 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { 1030 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", 1031 dev->name, miireg, addr); 1032 retval = -1; 1033 } else if (value != MII_READ) { 1034 /* it was a write operation - fewer failures are detectable */ 1035 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", 1036 dev->name, value, miireg, addr); 1037 retval = 0; 1038 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1039 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", 1040 dev->name, miireg, addr); 1041 retval = -1; 1042 } else { 1043 retval = readl(base + NvRegMIIData); 1044 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", 1045 dev->name, miireg, addr, retval); 1046 } 1047 1048 return retval; 1049} 1050 1051static int phy_reset(struct net_device *dev, u32 bmcr_setup) 1052{ 1053 struct fe_priv *np = netdev_priv(dev); 1054 u32 miicontrol; 1055 unsigned int tries = 0; 1056 1057 miicontrol = BMCR_RESET | bmcr_setup; 1058 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1059 return -1; 1060 } 1061 1062 /* wait for 500ms */ 1063 msleep(500); 1064 1065 /* must wait till reset is deasserted */ 1066 while (miicontrol & BMCR_RESET) { 1067 msleep(10); 1068 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1069 /* FIXME: 100 tries seem excessive */ 1070 if (tries++ > 100) 1071 return -1; 1072 } 1073 return 0; 1074} 1075 1076static int phy_init(struct net_device *dev) 1077{ 1078 struct fe_priv *np = get_nvpriv(dev); 1079 u8 __iomem *base = get_hwbase(dev); 1080 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1081 1082 /* phy errata for E3016 phy */ 1083 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1084 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1085 reg &= ~PHY_MARVELL_E3016_INITMASK; 1086 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { 1087 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); 1088 return PHY_ERROR; 1089 } 1090 } 1091 1092 /* set advertise register */ 1093 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1094 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1095 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1096 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); 1097 return PHY_ERROR; 1098 } 1099 1100 /* get phy interface type */ 1101 phyinterface = readl(base + NvRegPhyInterface); 1102 1103 /* see if gigabit phy */ 1104 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1105 if (mii_status & PHY_GIGABIT) { 1106 np->gigabit = PHY_GIGABIT; 1107 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 1108 mii_control_1000 &= ~ADVERTISE_1000HALF; 1109 if (phyinterface & PHY_RGMII) 1110 mii_control_1000 |= ADVERTISE_1000FULL; 1111 else 1112 mii_control_1000 &= ~ADVERTISE_1000FULL; 1113 1114 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1115 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1116 return PHY_ERROR; 1117 } 1118 } 1119 else 1120 np->gigabit = 0; 1121 1122 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1123 mii_control |= BMCR_ANENABLE; 1124 1125 /* reset the phy 1126 * (certain phys need bmcr to be setup with reset) 1127 */ 1128 if (phy_reset(dev, mii_control)) { 1129 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1130 return PHY_ERROR; 1131 } 1132 1133 /* phy vendor specific configuration */ 1134 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1135 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1136 phy_reserved &= ~(PHY_INIT1 | PHY_INIT2); 1137 phy_reserved |= (PHY_INIT3 | PHY_INIT4); 1138 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { 1139 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1140 return PHY_ERROR; 1141 } 1142 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1143 phy_reserved |= PHY_INIT5; 1144 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { 1145 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1146 return PHY_ERROR; 1147 } 1148 } 1149 if (np->phy_oui == PHY_OUI_CICADA) { 1150 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); 1151 phy_reserved |= PHY_INIT6; 1152 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { 1153 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1154 return PHY_ERROR; 1155 } 1156 } 1157 /* some phys clear out pause advertisment on reset, set it back */ 1158 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); 1159 1160 /* restart auto negotiation */ 1161 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1162 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1163 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1164 return PHY_ERROR; 1165 } 1166 1167 return 0; 1168} 1169 1170static void nv_start_rx(struct net_device *dev) 1171{ 1172 struct fe_priv *np = netdev_priv(dev); 1173 u8 __iomem *base = get_hwbase(dev); 1174 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1175 1176 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 1177 /* Already running? Stop it. */ 1178 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1179 rx_ctrl &= ~NVREG_RCVCTL_START; 1180 writel(rx_ctrl, base + NvRegReceiverControl); 1181 pci_push(base); 1182 } 1183 writel(np->linkspeed, base + NvRegLinkSpeed); 1184 pci_push(base); 1185 rx_ctrl |= NVREG_RCVCTL_START; 1186 if (np->mac_in_use) 1187 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1188 writel(rx_ctrl, base + NvRegReceiverControl); 1189 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1190 dev->name, np->duplex, np->linkspeed); 1191 pci_push(base); 1192} 1193 1194static void nv_stop_rx(struct net_device *dev) 1195{ 1196 struct fe_priv *np = netdev_priv(dev); 1197 u8 __iomem *base = get_hwbase(dev); 1198 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1199 1200 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 1201 if (!np->mac_in_use) 1202 rx_ctrl &= ~NVREG_RCVCTL_START; 1203 else 1204 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1205 writel(rx_ctrl, base + NvRegReceiverControl); 1206 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1207 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1208 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1209 1210 udelay(NV_RXSTOP_DELAY2); 1211 if (!np->mac_in_use) 1212 writel(0, base + NvRegLinkSpeed); 1213} 1214 1215static void nv_start_tx(struct net_device *dev) 1216{ 1217 struct fe_priv *np = netdev_priv(dev); 1218 u8 __iomem *base = get_hwbase(dev); 1219 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1220 1221 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 1222 tx_ctrl |= NVREG_XMITCTL_START; 1223 if (np->mac_in_use) 1224 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1225 writel(tx_ctrl, base + NvRegTransmitterControl); 1226 pci_push(base); 1227} 1228 1229static void nv_stop_tx(struct net_device *dev) 1230{ 1231 struct fe_priv *np = netdev_priv(dev); 1232 u8 __iomem *base = get_hwbase(dev); 1233 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1234 1235 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 1236 if (!np->mac_in_use) 1237 tx_ctrl &= ~NVREG_XMITCTL_START; 1238 else 1239 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1240 writel(tx_ctrl, base + NvRegTransmitterControl); 1241 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1242 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1243 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1244 1245 udelay(NV_TXSTOP_DELAY2); 1246 if (!np->mac_in_use) 1247 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, 1248 base + NvRegTransmitPoll); 1249} 1250 1251static void nv_txrx_reset(struct net_device *dev) 1252{ 1253 struct fe_priv *np = netdev_priv(dev); 1254 u8 __iomem *base = get_hwbase(dev); 1255 1256 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); 1257 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1258 pci_push(base); 1259 udelay(NV_TXRX_RESET_DELAY); 1260 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1261 pci_push(base); 1262} 1263 1264static void nv_mac_reset(struct net_device *dev) 1265{ 1266 struct fe_priv *np = netdev_priv(dev); 1267 u8 __iomem *base = get_hwbase(dev); 1268 1269 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); 1270 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1271 pci_push(base); 1272 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); 1273 pci_push(base); 1274 udelay(NV_MAC_RESET_DELAY); 1275 writel(0, base + NvRegMacReset); 1276 pci_push(base); 1277 udelay(NV_MAC_RESET_DELAY); 1278 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1279 pci_push(base); 1280} 1281 1282/* 1283 * nv_get_stats: dev->get_stats function 1284 * Get latest stats value from the nic. 1285 * Called with read_lock(&dev_base_lock) held for read - 1286 * only synchronized against unregister_netdevice. 1287 */ 1288static struct net_device_stats *nv_get_stats(struct net_device *dev) 1289{ 1290 struct fe_priv *np = netdev_priv(dev); 1291 1292 /* It seems that the nic always generates interrupts and doesn't 1293 * accumulate errors internally. Thus the current values in np->stats 1294 * are already up to date. 1295 */ 1296 return &np->stats; 1297} 1298 1299/* 1300 * nv_alloc_rx: fill rx ring entries. 1301 * Return 1 if the allocations for the skbs failed and the 1302 * rx engine is without Available descriptors 1303 */ 1304static int nv_alloc_rx(struct net_device *dev) 1305{ 1306 struct fe_priv *np = netdev_priv(dev); 1307 unsigned int refill_rx = np->refill_rx; 1308 int nr; 1309 1310 while (np->cur_rx != refill_rx) { 1311 struct sk_buff *skb; 1312 1313 nr = refill_rx % np->rx_ring_size; 1314 if (np->rx_skbuff[nr] == NULL) { 1315 1316 skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1317 if (!skb) 1318 break; 1319 1320 skb->dev = dev; 1321 np->rx_skbuff[nr] = skb; 1322 } else { 1323 skb = np->rx_skbuff[nr]; 1324 } 1325 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, 1326 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1327 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1328 np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]); 1329 wmb(); 1330 np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1331 } else { 1332 np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32; 1333 np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; 1334 wmb(); 1335 np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1336 } 1337 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", 1338 dev->name, refill_rx); 1339 refill_rx++; 1340 } 1341 np->refill_rx = refill_rx; 1342 if (np->cur_rx - refill_rx == np->rx_ring_size) 1343 return 1; 1344 return 0; 1345} 1346 1347/* If rx bufs are exhausted called after 50ms to attempt to refresh */ 1348#ifdef CONFIG_FORCEDETH_NAPI 1349static void nv_do_rx_refill(unsigned long data) 1350{ 1351 struct net_device *dev = (struct net_device *) data; 1352 1353 /* Just reschedule NAPI rx processing */ 1354 netif_rx_schedule(dev); 1355} 1356#else 1357static void nv_do_rx_refill(unsigned long data) 1358{ 1359 struct net_device *dev = (struct net_device *) data; 1360 struct fe_priv *np = netdev_priv(dev); 1361 1362 if (!using_multi_irqs(dev)) { 1363 if (np->msi_flags & NV_MSI_X_ENABLED) 1364 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1365 else 1366 disable_irq(dev->irq); 1367 } else { 1368 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1369 } 1370 if (nv_alloc_rx(dev)) { 1371 spin_lock_irq(&np->lock); 1372 if (!np->in_shutdown) 1373 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1374 spin_unlock_irq(&np->lock); 1375 } 1376 if (!using_multi_irqs(dev)) { 1377 if (np->msi_flags & NV_MSI_X_ENABLED) 1378 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1379 else 1380 enable_irq(dev->irq); 1381 } else { 1382 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1383 } 1384} 1385#endif 1386 1387static void nv_init_rx(struct net_device *dev) 1388{ 1389 struct fe_priv *np = netdev_priv(dev); 1390 int i; 1391 1392 np->cur_rx = np->rx_ring_size; 1393 np->refill_rx = 0; 1394 for (i = 0; i < np->rx_ring_size; i++) 1395 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1396 np->rx_ring.orig[i].flaglen = 0; 1397 else 1398 np->rx_ring.ex[i].flaglen = 0; 1399} 1400 1401static void nv_init_tx(struct net_device *dev) 1402{ 1403 struct fe_priv *np = netdev_priv(dev); 1404 int i; 1405 1406 np->next_tx = np->nic_tx = 0; 1407 for (i = 0; i < np->tx_ring_size; i++) { 1408 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1409 np->tx_ring.orig[i].flaglen = 0; 1410 else 1411 np->tx_ring.ex[i].flaglen = 0; 1412 np->tx_skbuff[i] = NULL; 1413 np->tx_dma[i] = 0; 1414 } 1415} 1416 1417static int nv_init_ring(struct net_device *dev) 1418{ 1419 nv_init_tx(dev); 1420 nv_init_rx(dev); 1421 return nv_alloc_rx(dev); 1422} 1423 1424static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) 1425{ 1426 struct fe_priv *np = netdev_priv(dev); 1427 1428 dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n", 1429 dev->name, skbnr); 1430 1431 if (np->tx_dma[skbnr]) { 1432 pci_unmap_page(np->pci_dev, np->tx_dma[skbnr], 1433 np->tx_dma_len[skbnr], 1434 PCI_DMA_TODEVICE); 1435 np->tx_dma[skbnr] = 0; 1436 } 1437 1438 if (np->tx_skbuff[skbnr]) { 1439 dev_kfree_skb_any(np->tx_skbuff[skbnr]); 1440 np->tx_skbuff[skbnr] = NULL; 1441 return 1; 1442 } else { 1443 return 0; 1444 } 1445} 1446 1447static void nv_drain_tx(struct net_device *dev) 1448{ 1449 struct fe_priv *np = netdev_priv(dev); 1450 unsigned int i; 1451 1452 for (i = 0; i < np->tx_ring_size; i++) { 1453 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1454 np->tx_ring.orig[i].flaglen = 0; 1455 else 1456 np->tx_ring.ex[i].flaglen = 0; 1457 if (nv_release_txskb(dev, i)) 1458 np->stats.tx_dropped++; 1459 } 1460} 1461 1462static void nv_drain_rx(struct net_device *dev) 1463{ 1464 struct fe_priv *np = netdev_priv(dev); 1465 int i; 1466 for (i = 0; i < np->rx_ring_size; i++) { 1467 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1468 np->rx_ring.orig[i].flaglen = 0; 1469 else 1470 np->rx_ring.ex[i].flaglen = 0; 1471 wmb(); 1472 if (np->rx_skbuff[i]) { 1473 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1474 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, 1475 PCI_DMA_FROMDEVICE); 1476 dev_kfree_skb(np->rx_skbuff[i]); 1477 np->rx_skbuff[i] = NULL; 1478 } 1479 } 1480} 1481 1482static void drain_ring(struct net_device *dev) 1483{ 1484 nv_drain_tx(dev); 1485 nv_drain_rx(dev); 1486} 1487 1488/* 1489 * nv_start_xmit: dev->hard_start_xmit function 1490 * Called with netif_tx_lock held. 1491 */ 1492static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 1493{ 1494 struct fe_priv *np = netdev_priv(dev); 1495 u32 tx_flags = 0; 1496 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 1497 unsigned int fragments = skb_shinfo(skb)->nr_frags; 1498 unsigned int nr = (np->next_tx - 1) % np->tx_ring_size; 1499 unsigned int start_nr = np->next_tx % np->tx_ring_size; 1500 unsigned int i; 1501 u32 offset = 0; 1502 u32 bcnt; 1503 u32 size = skb->len-skb->data_len; 1504 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1505 u32 tx_flags_vlan = 0; 1506 1507 /* add fragments to entries count */ 1508 for (i = 0; i < fragments; i++) { 1509 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 1510 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1511 } 1512 1513 spin_lock_irq(&np->lock); 1514 1515 if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) { 1516 spin_unlock_irq(&np->lock); 1517 netif_stop_queue(dev); 1518 return NETDEV_TX_BUSY; 1519 } 1520 1521 /* setup the header buffer */ 1522 do { 1523 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1524 nr = (nr + 1) % np->tx_ring_size; 1525 1526 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 1527 PCI_DMA_TODEVICE); 1528 np->tx_dma_len[nr] = bcnt; 1529 1530 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1531 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]); 1532 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1533 } else { 1534 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1535 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1536 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1537 } 1538 tx_flags = np->tx_flags; 1539 offset += bcnt; 1540 size -= bcnt; 1541 } while (size); 1542 1543 /* setup the fragments */ 1544 for (i = 0; i < fragments; i++) { 1545 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1546 u32 size = frag->size; 1547 offset = 0; 1548 1549 do { 1550 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1551 nr = (nr + 1) % np->tx_ring_size; 1552 1553 np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 1554 PCI_DMA_TODEVICE); 1555 np->tx_dma_len[nr] = bcnt; 1556 1557 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1558 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]); 1559 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1560 } else { 1561 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1562 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1563 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1564 } 1565 offset += bcnt; 1566 size -= bcnt; 1567 } while (size); 1568 } 1569 1570 /* set last fragment flag */ 1571 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1572 np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra); 1573 } else { 1574 np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra); 1575 } 1576 1577 np->tx_skbuff[nr] = skb; 1578 1579#ifdef NETIF_F_TSO 1580 if (skb_is_gso(skb)) 1581 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1582 else 1583#endif 1584 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 1585 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 1586 1587 /* vlan tag */ 1588 if (np->vlangrp && vlan_tx_tag_present(skb)) { 1589 tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); 1590 } 1591 1592 /* set tx flags */ 1593 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1594 np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 1595 } else { 1596 np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan); 1597 np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 1598 } 1599 1600 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", 1601 dev->name, np->next_tx, entries, tx_flags_extra); 1602 { 1603 int j; 1604 for (j=0; j<64; j++) { 1605 if ((j%16) == 0) 1606 dprintk("\n%03x:", j); 1607 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 1608 } 1609 dprintk("\n"); 1610 } 1611 1612 np->next_tx += entries; 1613 1614 dev->trans_start = jiffies; 1615 spin_unlock_irq(&np->lock); 1616 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 1617 pci_push(get_hwbase(dev)); 1618 return NETDEV_TX_OK; 1619} 1620 1621/* 1622 * nv_tx_done: check for completed packets, release the skbs. 1623 * 1624 * Caller must own np->lock. 1625 */ 1626static void nv_tx_done(struct net_device *dev) 1627{ 1628 struct fe_priv *np = netdev_priv(dev); 1629 u32 flags; 1630 unsigned int i; 1631 struct sk_buff *skb; 1632 1633 while (np->nic_tx != np->next_tx) { 1634 i = np->nic_tx % np->tx_ring_size; 1635 1636 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1637 flags = le32_to_cpu(np->tx_ring.orig[i].flaglen); 1638 else 1639 flags = le32_to_cpu(np->tx_ring.ex[i].flaglen); 1640 1641 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n", 1642 dev->name, np->nic_tx, flags); 1643 if (flags & NV_TX_VALID) 1644 break; 1645 if (np->desc_ver == DESC_VER_1) { 1646 if (flags & NV_TX_LASTPACKET) { 1647 skb = np->tx_skbuff[i]; 1648 if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| 1649 NV_TX_UNDERFLOW|NV_TX_ERROR)) { 1650 if (flags & NV_TX_UNDERFLOW) 1651 np->stats.tx_fifo_errors++; 1652 if (flags & NV_TX_CARRIERLOST) 1653 np->stats.tx_carrier_errors++; 1654 np->stats.tx_errors++; 1655 } else { 1656 np->stats.tx_packets++; 1657 np->stats.tx_bytes += skb->len; 1658 } 1659 } 1660 } else { 1661 if (flags & NV_TX2_LASTPACKET) { 1662 skb = np->tx_skbuff[i]; 1663 if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| 1664 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { 1665 if (flags & NV_TX2_UNDERFLOW) 1666 np->stats.tx_fifo_errors++; 1667 if (flags & NV_TX2_CARRIERLOST) 1668 np->stats.tx_carrier_errors++; 1669 np->stats.tx_errors++; 1670 } else { 1671 np->stats.tx_packets++; 1672 np->stats.tx_bytes += skb->len; 1673 } 1674 } 1675 } 1676 nv_release_txskb(dev, i); 1677 np->nic_tx++; 1678 } 1679 if (np->next_tx - np->nic_tx < np->tx_limit_start) 1680 netif_wake_queue(dev); 1681} 1682 1683/* 1684 * nv_tx_timeout: dev->tx_timeout function 1685 * Called with netif_tx_lock held. 1686 */ 1687static void nv_tx_timeout(struct net_device *dev) 1688{ 1689 struct fe_priv *np = netdev_priv(dev); 1690 u8 __iomem *base = get_hwbase(dev); 1691 u32 status; 1692 1693 if (np->msi_flags & NV_MSI_X_ENABLED) 1694 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 1695 else 1696 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 1697 1698 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); 1699 1700 { 1701 int i; 1702 1703 printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n", 1704 dev->name, (unsigned long)np->ring_addr, 1705 np->next_tx, np->nic_tx); 1706 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 1707 for (i=0;i<=np->register_size;i+= 32) { 1708 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 1709 i, 1710 readl(base + i + 0), readl(base + i + 4), 1711 readl(base + i + 8), readl(base + i + 12), 1712 readl(base + i + 16), readl(base + i + 20), 1713 readl(base + i + 24), readl(base + i + 28)); 1714 } 1715 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 1716 for (i=0;i<np->tx_ring_size;i+= 4) { 1717 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1718 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 1719 i, 1720 le32_to_cpu(np->tx_ring.orig[i].buf), 1721 le32_to_cpu(np->tx_ring.orig[i].flaglen), 1722 le32_to_cpu(np->tx_ring.orig[i+1].buf), 1723 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), 1724 le32_to_cpu(np->tx_ring.orig[i+2].buf), 1725 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), 1726 le32_to_cpu(np->tx_ring.orig[i+3].buf), 1727 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); 1728 } else { 1729 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 1730 i, 1731 le32_to_cpu(np->tx_ring.ex[i].bufhigh), 1732 le32_to_cpu(np->tx_ring.ex[i].buflow), 1733 le32_to_cpu(np->tx_ring.ex[i].flaglen), 1734 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), 1735 le32_to_cpu(np->tx_ring.ex[i+1].buflow), 1736 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), 1737 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), 1738 le32_to_cpu(np->tx_ring.ex[i+2].buflow), 1739 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), 1740 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), 1741 le32_to_cpu(np->tx_ring.ex[i+3].buflow), 1742 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); 1743 } 1744 } 1745 } 1746 1747 spin_lock_irq(&np->lock); 1748 1749 /* 1) stop tx engine */ 1750 nv_stop_tx(dev); 1751 1752 /* 2) check that the packets were not sent already: */ 1753 nv_tx_done(dev); 1754 1755 /* 3) if there are dead entries: clear everything */ 1756 if (np->next_tx != np->nic_tx) { 1757 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 1758 nv_drain_tx(dev); 1759 np->next_tx = np->nic_tx = 0; 1760 setup_hw_rings(dev, NV_SETUP_TX_RING); 1761 netif_wake_queue(dev); 1762 } 1763 1764 /* 4) restart tx engine */ 1765 nv_start_tx(dev); 1766 spin_unlock_irq(&np->lock); 1767} 1768 1769/* 1770 * Called when the nic notices a mismatch between the actual data len on the 1771 * wire and the len indicated in the 802 header 1772 */ 1773static int nv_getlen(struct net_device *dev, void *packet, int datalen) 1774{ 1775 int hdrlen; /* length of the 802 header */ 1776 int protolen; /* length as stored in the proto field */ 1777 1778 /* 1) calculate len according to header */ 1779 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 1780 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 1781 hdrlen = VLAN_HLEN; 1782 } else { 1783 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 1784 hdrlen = ETH_HLEN; 1785 } 1786 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", 1787 dev->name, datalen, protolen, hdrlen); 1788 if (protolen > ETH_DATA_LEN) 1789 return datalen; /* Value in proto field not a len, no checks possible */ 1790 1791 protolen += hdrlen; 1792 /* consistency checks: */ 1793 if (datalen > ETH_ZLEN) { 1794 if (datalen >= protolen) { 1795 /* more data on wire than in 802 header, trim of 1796 * additional data. 1797 */ 1798 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 1799 dev->name, protolen); 1800 return protolen; 1801 } else { 1802 /* less data on wire than mentioned in header. 1803 * Discard the packet. 1804 */ 1805 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", 1806 dev->name); 1807 return -1; 1808 } 1809 } else { 1810 /* short packet. Accept only if 802 values are also short */ 1811 if (protolen > ETH_ZLEN) { 1812 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", 1813 dev->name); 1814 return -1; 1815 } 1816 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 1817 dev->name, datalen); 1818 return datalen; 1819 } 1820} 1821 1822static int nv_rx_process(struct net_device *dev, int limit) 1823{ 1824 struct fe_priv *np = netdev_priv(dev); 1825 u32 flags; 1826 u32 vlanflags = 0; 1827 int count; 1828 1829 for (count = 0; count < limit; ++count) { 1830 struct sk_buff *skb; 1831 int len; 1832 int i; 1833 if (np->cur_rx - np->refill_rx >= np->rx_ring_size) 1834 break; /* we scanned the whole ring - do not continue */ 1835 1836 i = np->cur_rx % np->rx_ring_size; 1837 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1838 flags = le32_to_cpu(np->rx_ring.orig[i].flaglen); 1839 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); 1840 } else { 1841 flags = le32_to_cpu(np->rx_ring.ex[i].flaglen); 1842 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); 1843 vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow); 1844 } 1845 1846 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n", 1847 dev->name, np->cur_rx, flags); 1848 1849 if (flags & NV_RX_AVAIL) 1850 break; /* still owned by hardware, */ 1851 1852 /* 1853 * the packet is for us - immediately tear down the pci mapping. 1854 * TODO: check if a prefetch of the first cacheline improves 1855 * the performance. 1856 */ 1857 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1858 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, 1859 PCI_DMA_FROMDEVICE); 1860 1861 { 1862 int j; 1863 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 1864 for (j=0; j<64; j++) { 1865 if ((j%16) == 0) 1866 dprintk("\n%03x:", j); 1867 dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]); 1868 } 1869 dprintk("\n"); 1870 } 1871 /* look at what we actually got: */ 1872 if (np->desc_ver == DESC_VER_1) { 1873 if (!(flags & NV_RX_DESCRIPTORVALID)) 1874 goto next_pkt; 1875 1876 if (flags & NV_RX_ERROR) { 1877 if (flags & NV_RX_MISSEDFRAME) { 1878 np->stats.rx_missed_errors++; 1879 np->stats.rx_errors++; 1880 goto next_pkt; 1881 } 1882 if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { 1883 np->stats.rx_errors++; 1884 goto next_pkt; 1885 } 1886 if (flags & NV_RX_CRCERR) { 1887 np->stats.rx_crc_errors++; 1888 np->stats.rx_errors++; 1889 goto next_pkt; 1890 } 1891 if (flags & NV_RX_OVERFLOW) { 1892 np->stats.rx_over_errors++; 1893 np->stats.rx_errors++; 1894 goto next_pkt; 1895 } 1896 if (flags & NV_RX_ERROR4) { 1897 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1898 if (len < 0) { 1899 np->stats.rx_errors++; 1900 goto next_pkt; 1901 } 1902 } 1903 /* framing errors are soft errors. */ 1904 if (flags & NV_RX_FRAMINGERR) { 1905 if (flags & NV_RX_SUBSTRACT1) { 1906 len--; 1907 } 1908 } 1909 } 1910 } else { 1911 if (!(flags & NV_RX2_DESCRIPTORVALID)) 1912 goto next_pkt; 1913 1914 if (flags & NV_RX2_ERROR) { 1915 if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { 1916 np->stats.rx_errors++; 1917 goto next_pkt; 1918 } 1919 if (flags & NV_RX2_CRCERR) { 1920 np->stats.rx_crc_errors++; 1921 np->stats.rx_errors++; 1922 goto next_pkt; 1923 } 1924 if (flags & NV_RX2_OVERFLOW) { 1925 np->stats.rx_over_errors++; 1926 np->stats.rx_errors++; 1927 goto next_pkt; 1928 } 1929 if (flags & NV_RX2_ERROR4) { 1930 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1931 if (len < 0) { 1932 np->stats.rx_errors++; 1933 goto next_pkt; 1934 } 1935 } 1936 /* framing errors are soft errors */ 1937 if (flags & NV_RX2_FRAMINGERR) { 1938 if (flags & NV_RX2_SUBSTRACT1) { 1939 len--; 1940 } 1941 } 1942 } 1943 if (np->rx_csum) { 1944 flags &= NV_RX2_CHECKSUMMASK; 1945 if (flags == NV_RX2_CHECKSUMOK1 || 1946 flags == NV_RX2_CHECKSUMOK2 || 1947 flags == NV_RX2_CHECKSUMOK3) { 1948 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); 1949 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; 1950 } else { 1951 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name); 1952 } 1953 } 1954 } 1955 /* got a valid packet - forward it to the network core */ 1956 skb = np->rx_skbuff[i]; 1957 np->rx_skbuff[i] = NULL; 1958 1959 skb_put(skb, len); 1960 skb->protocol = eth_type_trans(skb, dev); 1961 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", 1962 dev->name, np->cur_rx, len, skb->protocol); 1963#ifdef CONFIG_FORCEDETH_NAPI 1964 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) 1965 vlan_hwaccel_receive_skb(skb, np->vlangrp, 1966 vlanflags & NV_RX3_VLAN_TAG_MASK); 1967 else 1968 netif_receive_skb(skb); 1969#else 1970 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) 1971 vlan_hwaccel_rx(skb, np->vlangrp, 1972 vlanflags & NV_RX3_VLAN_TAG_MASK); 1973 else 1974 netif_rx(skb); 1975#endif 1976 dev->last_rx = jiffies; 1977 np->stats.rx_packets++; 1978 np->stats.rx_bytes += len; 1979next_pkt: 1980 np->cur_rx++; 1981 } 1982 1983 return count; 1984} 1985 1986static void set_bufsize(struct net_device *dev) 1987{ 1988 struct fe_priv *np = netdev_priv(dev); 1989 1990 if (dev->mtu <= ETH_DATA_LEN) 1991 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; 1992 else 1993 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; 1994} 1995 1996/* 1997 * nv_change_mtu: dev->change_mtu function 1998 * Called with dev_base_lock held for read. 1999 */ 2000static int nv_change_mtu(struct net_device *dev, int new_mtu) 2001{ 2002 struct fe_priv *np = netdev_priv(dev); 2003 int old_mtu; 2004 2005 if (new_mtu < 64 || new_mtu > np->pkt_limit) 2006 return -EINVAL; 2007 2008 old_mtu = dev->mtu; 2009 dev->mtu = new_mtu; 2010 2011 /* return early if the buffer sizes will not change */ 2012 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) 2013 return 0; 2014 if (old_mtu == new_mtu) 2015 return 0; 2016 2017 /* synchronized against open : rtnl_lock() held by caller */ 2018 if (netif_running(dev)) { 2019 u8 __iomem *base = get_hwbase(dev); 2020 /* 2021 * It seems that the nic preloads valid ring entries into an 2022 * internal buffer. The procedure for flushing everything is 2023 * guessed, there is probably a simpler approach. 2024 * Changing the MTU is a rare event, it shouldn't matter. 2025 */ 2026 nv_disable_irq(dev); 2027 netif_tx_lock_bh(dev); 2028 spin_lock(&np->lock); 2029 /* stop engines */ 2030 nv_stop_rx(dev); 2031 nv_stop_tx(dev); 2032 nv_txrx_reset(dev); 2033 /* drain rx queue */ 2034 nv_drain_rx(dev); 2035 nv_drain_tx(dev); 2036 /* reinit driver view of the rx queue */ 2037 set_bufsize(dev); 2038 if (nv_init_ring(dev)) { 2039 if (!np->in_shutdown) 2040 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2041 } 2042 /* reinit nic view of the rx queue */ 2043 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 2044 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 2045 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 2046 base + NvRegRingSizes); 2047 pci_push(base); 2048 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2049 pci_push(base); 2050 2051 /* restart rx engine */ 2052 nv_start_rx(dev); 2053 nv_start_tx(dev); 2054 spin_unlock(&np->lock); 2055 netif_tx_unlock_bh(dev); 2056 nv_enable_irq(dev); 2057 } 2058 return 0; 2059} 2060 2061static void nv_copy_mac_to_hw(struct net_device *dev) 2062{ 2063 u8 __iomem *base = get_hwbase(dev); 2064 u32 mac[2]; 2065 2066 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 2067 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 2068 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 2069 2070 writel(mac[0], base + NvRegMacAddrA); 2071 writel(mac[1], base + NvRegMacAddrB); 2072} 2073 2074/* 2075 * nv_set_mac_address: dev->set_mac_address function 2076 * Called with rtnl_lock() held. 2077 */ 2078static int nv_set_mac_address(struct net_device *dev, void *addr) 2079{ 2080 struct fe_priv *np = netdev_priv(dev); 2081 struct sockaddr *macaddr = (struct sockaddr*)addr; 2082 2083 if (!is_valid_ether_addr(macaddr->sa_data)) 2084 return -EADDRNOTAVAIL; 2085 2086 /* synchronized against open : rtnl_lock() held by caller */ 2087 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 2088 2089 if (netif_running(dev)) { 2090 netif_tx_lock_bh(dev); 2091 spin_lock_irq(&np->lock); 2092 2093 /* stop rx engine */ 2094 nv_stop_rx(dev); 2095 2096 /* set mac address */ 2097 nv_copy_mac_to_hw(dev); 2098 2099 /* restart rx engine */ 2100 nv_start_rx(dev); 2101 spin_unlock_irq(&np->lock); 2102 netif_tx_unlock_bh(dev); 2103 } else { 2104 nv_copy_mac_to_hw(dev); 2105 } 2106 return 0; 2107} 2108 2109/* 2110 * nv_set_multicast: dev->set_multicast function 2111 * Called with netif_tx_lock held. 2112 */ 2113static void nv_set_multicast(struct net_device *dev) 2114{ 2115 struct fe_priv *np = netdev_priv(dev); 2116 u8 __iomem *base = get_hwbase(dev); 2117 u32 addr[2]; 2118 u32 mask[2]; 2119 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; 2120 2121 memset(addr, 0, sizeof(addr)); 2122 memset(mask, 0, sizeof(mask)); 2123 2124 if (dev->flags & IFF_PROMISC) { 2125 pff |= NVREG_PFF_PROMISC; 2126 } else { 2127 pff |= NVREG_PFF_MYADDR; 2128 2129 if (dev->flags & IFF_ALLMULTI || dev->mc_list) { 2130 u32 alwaysOff[2]; 2131 u32 alwaysOn[2]; 2132 2133 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; 2134 if (dev->flags & IFF_ALLMULTI) { 2135 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 2136 } else { 2137 struct dev_mc_list *walk; 2138 2139 walk = dev->mc_list; 2140 while (walk != NULL) { 2141 u32 a, b; 2142 a = le32_to_cpu(*(u32 *) walk->dmi_addr); 2143 b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4])); 2144 alwaysOn[0] &= a; 2145 alwaysOff[0] &= ~a; 2146 alwaysOn[1] &= b; 2147 alwaysOff[1] &= ~b; 2148 walk = walk->next; 2149 } 2150 } 2151 addr[0] = alwaysOn[0]; 2152 addr[1] = alwaysOn[1]; 2153 mask[0] = alwaysOn[0] | alwaysOff[0]; 2154 mask[1] = alwaysOn[1] | alwaysOff[1]; 2155 } 2156 } 2157 addr[0] |= NVREG_MCASTADDRA_FORCE; 2158 pff |= NVREG_PFF_ALWAYS; 2159 spin_lock_irq(&np->lock); 2160 nv_stop_rx(dev); 2161 writel(addr[0], base + NvRegMulticastAddrA); 2162 writel(addr[1], base + NvRegMulticastAddrB); 2163 writel(mask[0], base + NvRegMulticastMaskA); 2164 writel(mask[1], base + NvRegMulticastMaskB); 2165 writel(pff, base + NvRegPacketFilterFlags); 2166 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", 2167 dev->name); 2168 nv_start_rx(dev); 2169 spin_unlock_irq(&np->lock); 2170} 2171 2172static void nv_update_pause(struct net_device *dev, u32 pause_flags) 2173{ 2174 struct fe_priv *np = netdev_priv(dev); 2175 u8 __iomem *base = get_hwbase(dev); 2176 2177 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); 2178 2179 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { 2180 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; 2181 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { 2182 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); 2183 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2184 } else { 2185 writel(pff, base + NvRegPacketFilterFlags); 2186 } 2187 } 2188 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 2189 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 2190 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 2191 writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame); 2192 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 2193 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2194 } else { 2195 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 2196 writel(regmisc, base + NvRegMisc1); 2197 } 2198 } 2199} 2200 2201/** 2202 * nv_update_linkspeed: Setup the MAC according to the link partner 2203 * @dev: Network device to be configured 2204 * 2205 * The function queries the PHY and checks if there is a link partner. 2206 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is 2207 * set to 10 MBit HD. 2208 * 2209 * The function returns 0 if there is no link partner and 1 if there is 2210 * a good link partner. 2211 */ 2212static int nv_update_linkspeed(struct net_device *dev) 2213{ 2214 struct fe_priv *np = netdev_priv(dev); 2215 u8 __iomem *base = get_hwbase(dev); 2216 int adv = 0; 2217 int lpa = 0; 2218 int adv_lpa, adv_pause, lpa_pause; 2219 int newls = np->linkspeed; 2220 int newdup = np->duplex; 2221 int mii_status; 2222 int retval = 0; 2223 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 2224 2225 /* BMSR_LSTATUS is latched, read it twice: 2226 * we want the current value. 2227 */ 2228 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 2229 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 2230 2231 if (!(mii_status & BMSR_LSTATUS)) { 2232 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", 2233 dev->name); 2234 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2235 newdup = 0; 2236 retval = 0; 2237 goto set_speed; 2238 } 2239 2240 if (np->autoneg == 0) { 2241 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", 2242 dev->name, np->fixed_mode); 2243 if (np->fixed_mode & LPA_100FULL) { 2244 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2245 newdup = 1; 2246 } else if (np->fixed_mode & LPA_100HALF) { 2247 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2248 newdup = 0; 2249 } else if (np->fixed_mode & LPA_10FULL) { 2250 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2251 newdup = 1; 2252 } else { 2253 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2254 newdup = 0; 2255 } 2256 retval = 1; 2257 goto set_speed; 2258 } 2259 /* check auto negotiation is complete */ 2260 if (!(mii_status & BMSR_ANEGCOMPLETE)) { 2261 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ 2262 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2263 newdup = 0; 2264 retval = 0; 2265 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); 2266 goto set_speed; 2267 } 2268 2269 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 2270 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 2271 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", 2272 dev->name, adv, lpa); 2273 2274 retval = 1; 2275 if (np->gigabit == PHY_GIGABIT) { 2276 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 2277 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); 2278 2279 if ((control_1000 & ADVERTISE_1000FULL) && 2280 (status_1000 & LPA_1000FULL)) { 2281 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", 2282 dev->name); 2283 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 2284 newdup = 1; 2285 goto set_speed; 2286 } 2287 } 2288 2289 /* FIXME: handle parallel detection properly */ 2290 adv_lpa = lpa & adv; 2291 if (adv_lpa & LPA_100FULL) { 2292 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2293 newdup = 1; 2294 } else if (adv_lpa & LPA_100HALF) { 2295 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 2296 newdup = 0; 2297 } else if (adv_lpa & LPA_10FULL) { 2298 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2299 newdup = 1; 2300 } else if (adv_lpa & LPA_10HALF) { 2301 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2302 newdup = 0; 2303 } else { 2304 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); 2305 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2306 newdup = 0; 2307 } 2308 2309set_speed: 2310 if (np->duplex == newdup && np->linkspeed == newls) 2311 return retval; 2312 2313 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", 2314 dev->name, np->linkspeed, np->duplex, newls, newdup); 2315 2316 np->duplex = newdup; 2317 np->linkspeed = newls; 2318 2319 if (np->gigabit == PHY_GIGABIT) { 2320 phyreg = readl(base + NvRegRandomSeed); 2321 phyreg &= ~(0x3FF00); 2322 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) 2323 phyreg |= NVREG_RNDSEED_FORCE3; 2324 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) 2325 phyreg |= NVREG_RNDSEED_FORCE2; 2326 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 2327 phyreg |= NVREG_RNDSEED_FORCE; 2328 writel(phyreg, base + NvRegRandomSeed); 2329 } 2330 2331 phyreg = readl(base + NvRegPhyInterface); 2332 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 2333 if (np->duplex == 0) 2334 phyreg |= PHY_HALF; 2335 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 2336 phyreg |= PHY_100; 2337 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2338 phyreg |= PHY_1000; 2339 writel(phyreg, base + NvRegPhyInterface); 2340 2341 if (phyreg & PHY_RGMII) { 2342 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2343 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 2344 else 2345 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 2346 } else { 2347 txreg = NVREG_TX_DEFERRAL_DEFAULT; 2348 } 2349 writel(txreg, base + NvRegTxDeferral); 2350 2351 if (np->desc_ver == DESC_VER_1) { 2352 txreg = NVREG_TX_WM_DESC1_DEFAULT; 2353 } else { 2354 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2355 txreg = NVREG_TX_WM_DESC2_3_1000; 2356 else 2357 txreg = NVREG_TX_WM_DESC2_3_DEFAULT; 2358 } 2359 writel(txreg, base + NvRegTxWatermark); 2360 2361 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 2362 base + NvRegMisc1); 2363 pci_push(base); 2364 writel(np->linkspeed, base + NvRegLinkSpeed); 2365 pci_push(base); 2366 2367 pause_flags = 0; 2368 /* setup pause frame */ 2369 if (np->duplex != 0) { 2370 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 2371 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 2372 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 2373 2374 switch (adv_pause) { 2375 case ADVERTISE_PAUSE_CAP: 2376 if (lpa_pause & LPA_PAUSE_CAP) { 2377 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2378 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2379 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2380 } 2381 break; 2382 case ADVERTISE_PAUSE_ASYM: 2383 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 2384 { 2385 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2386 } 2387 break; 2388 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: 2389 if (lpa_pause & LPA_PAUSE_CAP) 2390 { 2391 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2392 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2393 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2394 } 2395 if (lpa_pause == LPA_PAUSE_ASYM) 2396 { 2397 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2398 } 2399 break; 2400 } 2401 } else { 2402 pause_flags = np->pause_flags; 2403 } 2404 } 2405 nv_update_pause(dev, pause_flags); 2406 2407 return retval; 2408} 2409 2410static void nv_linkchange(struct net_device *dev) 2411{ 2412 if (nv_update_linkspeed(dev)) { 2413 if (!netif_carrier_ok(dev)) { 2414 netif_carrier_on(dev); 2415 printk(KERN_INFO "%s: link up.\n", dev->name); 2416 nv_start_rx(dev); 2417 } 2418 } else { 2419 if (netif_carrier_ok(dev)) { 2420 netif_carrier_off(dev); 2421 printk(KERN_INFO "%s: link down.\n", dev->name); 2422 nv_stop_rx(dev); 2423 } 2424 } 2425} 2426 2427static void nv_link_irq(struct net_device *dev) 2428{ 2429 u8 __iomem *base = get_hwbase(dev); 2430 u32 miistat; 2431 2432 miistat = readl(base + NvRegMIIStatus); 2433 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 2434 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); 2435 2436 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 2437 nv_linkchange(dev); 2438 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); 2439} 2440 2441static irqreturn_t nv_nic_irq(int foo, void *data) 2442{ 2443 struct net_device *dev = (struct net_device *) data; 2444 struct fe_priv *np = netdev_priv(dev); 2445 u8 __iomem *base = get_hwbase(dev); 2446 u32 events; 2447 int i; 2448 2449 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 2450 2451 for (i=0; ; i++) { 2452 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 2453 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2454 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 2455 } else { 2456 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2457 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 2458 } 2459 pci_push(base); 2460 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2461 if (!(events & np->irqmask)) 2462 break; 2463 2464 spin_lock(&np->lock); 2465 nv_tx_done(dev); 2466 spin_unlock(&np->lock); 2467 2468 if (events & NVREG_IRQ_LINK) { 2469 spin_lock(&np->lock); 2470 nv_link_irq(dev); 2471 spin_unlock(&np->lock); 2472 } 2473 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2474 spin_lock(&np->lock); 2475 nv_linkchange(dev); 2476 spin_unlock(&np->lock); 2477 np->link_timeout = jiffies + LINK_TIMEOUT; 2478 } 2479 if (events & (NVREG_IRQ_TX_ERR)) { 2480 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2481 dev->name, events); 2482 } 2483 if (events & (NVREG_IRQ_UNKNOWN)) { 2484 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2485 dev->name, events); 2486 } 2487 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { 2488 spin_lock(&np->lock); 2489 /* disable interrupts on the nic */ 2490 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 2491 writel(0, base + NvRegIrqMask); 2492 else 2493 writel(np->irqmask, base + NvRegIrqMask); 2494 pci_push(base); 2495 2496 if (!np->in_shutdown) { 2497 np->nic_poll_irq = np->irqmask; 2498 np->recover_error = 1; 2499 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2500 } 2501 spin_unlock(&np->lock); 2502 break; 2503 } 2504#ifdef CONFIG_FORCEDETH_NAPI 2505 if (events & NVREG_IRQ_RX_ALL) { 2506 netif_rx_schedule(dev); 2507 2508 /* Disable furthur receive irq's */ 2509 spin_lock(&np->lock); 2510 np->irqmask &= ~NVREG_IRQ_RX_ALL; 2511 2512 if (np->msi_flags & NV_MSI_X_ENABLED) 2513 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2514 else 2515 writel(np->irqmask, base + NvRegIrqMask); 2516 spin_unlock(&np->lock); 2517 } 2518#else 2519 nv_rx_process(dev, dev->weight); 2520 if (nv_alloc_rx(dev)) { 2521 spin_lock(&np->lock); 2522 if (!np->in_shutdown) 2523 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2524 spin_unlock(&np->lock); 2525 } 2526#endif 2527 if (i > max_interrupt_work) { 2528 spin_lock(&np->lock); 2529 /* disable interrupts on the nic */ 2530 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 2531 writel(0, base + NvRegIrqMask); 2532 else 2533 writel(np->irqmask, base + NvRegIrqMask); 2534 pci_push(base); 2535 2536 if (!np->in_shutdown) { 2537 np->nic_poll_irq = np->irqmask; 2538 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2539 } 2540 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 2541 spin_unlock(&np->lock); 2542 break; 2543 } 2544 2545 } 2546 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 2547 2548 return IRQ_RETVAL(i); 2549} 2550 2551static irqreturn_t nv_nic_irq_tx(int foo, void *data) 2552{ 2553 struct net_device *dev = (struct net_device *) data; 2554 struct fe_priv *np = netdev_priv(dev); 2555 u8 __iomem *base = get_hwbase(dev); 2556 u32 events; 2557 int i; 2558 unsigned long flags; 2559 2560 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 2561 2562 for (i=0; ; i++) { 2563 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 2564 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 2565 pci_push(base); 2566 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 2567 if (!(events & np->irqmask)) 2568 break; 2569 2570 spin_lock_irqsave(&np->lock, flags); 2571 nv_tx_done(dev); 2572 spin_unlock_irqrestore(&np->lock, flags); 2573 2574 if (events & (NVREG_IRQ_TX_ERR)) { 2575 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2576 dev->name, events); 2577 } 2578 if (i > max_interrupt_work) { 2579 spin_lock_irqsave(&np->lock, flags); 2580 /* disable interrupts on the nic */ 2581 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 2582 pci_push(base); 2583 2584 if (!np->in_shutdown) { 2585 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; 2586 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2587 } 2588 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 2589 spin_unlock_irqrestore(&np->lock, flags); 2590 break; 2591 } 2592 2593 } 2594 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); 2595 2596 return IRQ_RETVAL(i); 2597} 2598 2599#ifdef CONFIG_FORCEDETH_NAPI 2600static int nv_napi_poll(struct net_device *dev, int *budget) 2601{ 2602 int pkts, limit = min(*budget, dev->quota); 2603 struct fe_priv *np = netdev_priv(dev); 2604 u8 __iomem *base = get_hwbase(dev); 2605 unsigned long flags; 2606 2607 pkts = nv_rx_process(dev, limit); 2608 2609 if (nv_alloc_rx(dev)) { 2610 spin_lock_irqsave(&np->lock, flags); 2611 if (!np->in_shutdown) 2612 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2613 spin_unlock_irqrestore(&np->lock, flags); 2614 } 2615 2616 if (pkts < limit) { 2617 /* all done, no more packets present */ 2618 netif_rx_complete(dev); 2619 2620 /* re-enable receive interrupts */ 2621 spin_lock_irqsave(&np->lock, flags); 2622 2623 np->irqmask |= NVREG_IRQ_RX_ALL; 2624 if (np->msi_flags & NV_MSI_X_ENABLED) 2625 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2626 else 2627 writel(np->irqmask, base + NvRegIrqMask); 2628 2629 spin_unlock_irqrestore(&np->lock, flags); 2630 return 0; 2631 } else { 2632 /* used up our quantum, so reschedule */ 2633 dev->quota -= pkts; 2634 *budget -= pkts; 2635 return 1; 2636 } 2637} 2638#endif 2639 2640#ifdef CONFIG_FORCEDETH_NAPI 2641static irqreturn_t nv_nic_irq_rx(int foo, void *data) 2642{ 2643 struct net_device *dev = (struct net_device *) data; 2644 u8 __iomem *base = get_hwbase(dev); 2645 u32 events; 2646 2647 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 2648 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 2649 2650 if (events) { 2651 netif_rx_schedule(dev); 2652 /* disable receive interrupts on the nic */ 2653 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2654 pci_push(base); 2655 } 2656 return IRQ_HANDLED; 2657} 2658#else 2659static irqreturn_t nv_nic_irq_rx(int foo, void *data) 2660{ 2661 struct net_device *dev = (struct net_device *) data; 2662 struct fe_priv *np = netdev_priv(dev); 2663 u8 __iomem *base = get_hwbase(dev); 2664 u32 events; 2665 int i; 2666 unsigned long flags; 2667 2668 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 2669 2670 for (i=0; ; i++) { 2671 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 2672 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 2673 pci_push(base); 2674 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 2675 if (!(events & np->irqmask)) 2676 break; 2677 2678 nv_rx_process(dev, dev->weight); 2679 if (nv_alloc_rx(dev)) { 2680 spin_lock_irqsave(&np->lock, flags); 2681 if (!np->in_shutdown) 2682 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2683 spin_unlock_irqrestore(&np->lock, flags); 2684 } 2685 2686 if (i > max_interrupt_work) { 2687 spin_lock_irqsave(&np->lock, flags); 2688 /* disable interrupts on the nic */ 2689 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2690 pci_push(base); 2691 2692 if (!np->in_shutdown) { 2693 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; 2694 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2695 } 2696 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 2697 spin_unlock_irqrestore(&np->lock, flags); 2698 break; 2699 } 2700 } 2701 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 2702 2703 return IRQ_RETVAL(i); 2704} 2705#endif 2706 2707static irqreturn_t nv_nic_irq_other(int foo, void *data) 2708{ 2709 struct net_device *dev = (struct net_device *) data; 2710 struct fe_priv *np = netdev_priv(dev); 2711 u8 __iomem *base = get_hwbase(dev); 2712 u32 events; 2713 int i; 2714 unsigned long flags; 2715 2716 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 2717 2718 for (i=0; ; i++) { 2719 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 2720 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 2721 pci_push(base); 2722 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2723 if (!(events & np->irqmask)) 2724 break; 2725 2726 if (events & NVREG_IRQ_LINK) { 2727 spin_lock_irqsave(&np->lock, flags); 2728 nv_link_irq(dev); 2729 spin_unlock_irqrestore(&np->lock, flags); 2730 } 2731 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2732 spin_lock_irqsave(&np->lock, flags); 2733 nv_linkchange(dev); 2734 spin_unlock_irqrestore(&np->lock, flags); 2735 np->link_timeout = jiffies + LINK_TIMEOUT; 2736 } 2737 if (events & NVREG_IRQ_RECOVER_ERROR) { 2738 spin_lock_irq(&np->lock); 2739 /* disable interrupts on the nic */ 2740 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 2741 pci_push(base); 2742 2743 if (!np->in_shutdown) { 2744 np->nic_poll_irq |= NVREG_IRQ_OTHER; 2745 np->recover_error = 1; 2746 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2747 } 2748 spin_unlock_irq(&np->lock); 2749 break; 2750 } 2751 if (events & (NVREG_IRQ_UNKNOWN)) { 2752 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2753 dev->name, events); 2754 } 2755 if (i > max_interrupt_work) { 2756 spin_lock_irqsave(&np->lock, flags); 2757 /* disable interrupts on the nic */ 2758 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 2759 pci_push(base); 2760 2761 if (!np->in_shutdown) { 2762 np->nic_poll_irq |= NVREG_IRQ_OTHER; 2763 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2764 } 2765 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 2766 spin_unlock_irqrestore(&np->lock, flags); 2767 break; 2768 } 2769 2770 } 2771 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); 2772 2773 return IRQ_RETVAL(i); 2774} 2775 2776static irqreturn_t nv_nic_irq_test(int foo, void *data) 2777{ 2778 struct net_device *dev = (struct net_device *) data; 2779 struct fe_priv *np = netdev_priv(dev); 2780 u8 __iomem *base = get_hwbase(dev); 2781 u32 events; 2782 2783 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); 2784 2785 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 2786 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2787 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 2788 } else { 2789 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2790 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 2791 } 2792 pci_push(base); 2793 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2794 if (!(events & NVREG_IRQ_TIMER)) 2795 return IRQ_RETVAL(0); 2796 2797 spin_lock(&np->lock); 2798 np->intr_test = 1; 2799 spin_unlock(&np->lock); 2800 2801 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); 2802 2803 return IRQ_RETVAL(1); 2804} 2805 2806static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) 2807{ 2808 u8 __iomem *base = get_hwbase(dev); 2809 int i; 2810 u32 msixmap = 0; 2811 2812 /* Each interrupt bit can be mapped to a MSIX vector (4 bits). 2813 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents 2814 * the remaining 8 interrupts. 2815 */ 2816 for (i = 0; i < 8; i++) { 2817 if ((irqmask >> i) & 0x1) { 2818 msixmap |= vector << (i << 2); 2819 } 2820 } 2821 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 2822 2823 msixmap = 0; 2824 for (i = 0; i < 8; i++) { 2825 if ((irqmask >> (i + 8)) & 0x1) { 2826 msixmap |= vector << (i << 2); 2827 } 2828 } 2829 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 2830} 2831 2832static int nv_request_irq(struct net_device *dev, int intr_test) 2833{ 2834 struct fe_priv *np = get_nvpriv(dev); 2835 u8 __iomem *base = get_hwbase(dev); 2836 int ret = 1; 2837 int i; 2838 2839 if (np->msi_flags & NV_MSI_X_CAPABLE) { 2840 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 2841 np->msi_x_entry[i].entry = i; 2842 } 2843 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 2844 np->msi_flags |= NV_MSI_X_ENABLED; 2845 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 2846 /* Request irq for rx handling */ 2847 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) { 2848 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 2849 pci_disable_msix(np->pci_dev); 2850 np->msi_flags &= ~NV_MSI_X_ENABLED; 2851 goto out_err; 2852 } 2853 /* Request irq for tx handling */ 2854 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) { 2855 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 2856 pci_disable_msix(np->pci_dev); 2857 np->msi_flags &= ~NV_MSI_X_ENABLED; 2858 goto out_free_rx; 2859 } 2860 /* Request irq for link and timer handling */ 2861 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) { 2862 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 2863 pci_disable_msix(np->pci_dev); 2864 np->msi_flags &= ~NV_MSI_X_ENABLED; 2865 goto out_free_tx; 2866 } 2867 /* map interrupts to their respective vector */ 2868 writel(0, base + NvRegMSIXMap0); 2869 writel(0, base + NvRegMSIXMap1); 2870 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); 2871 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); 2872 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 2873 } else { 2874 /* Request irq for all interrupts */ 2875 if ((!intr_test && 2876 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || 2877 (intr_test && 2878 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) { 2879 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 2880 pci_disable_msix(np->pci_dev); 2881 np->msi_flags &= ~NV_MSI_X_ENABLED; 2882 goto out_err; 2883 } 2884 2885 /* map interrupts to vector 0 */ 2886 writel(0, base + NvRegMSIXMap0); 2887 writel(0, base + NvRegMSIXMap1); 2888 } 2889 } 2890 } 2891 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 2892 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 2893 np->msi_flags |= NV_MSI_ENABLED; 2894 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || 2895 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) { 2896 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 2897 pci_disable_msi(np->pci_dev); 2898 np->msi_flags &= ~NV_MSI_ENABLED; 2899 goto out_err; 2900 } 2901 2902 /* map interrupts to vector 0 */ 2903 writel(0, base + NvRegMSIMap0); 2904 writel(0, base + NvRegMSIMap1); 2905 /* enable msi vector 0 */ 2906 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 2907 } 2908 } 2909 if (ret != 0) { 2910 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || 2911 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) 2912 goto out_err; 2913 2914 } 2915 2916 return 0; 2917out_free_tx: 2918 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); 2919out_free_rx: 2920 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); 2921out_err: 2922 return 1; 2923} 2924 2925static void nv_free_irq(struct net_device *dev) 2926{ 2927 struct fe_priv *np = get_nvpriv(dev); 2928 int i; 2929 2930 if (np->msi_flags & NV_MSI_X_ENABLED) { 2931 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 2932 free_irq(np->msi_x_entry[i].vector, dev); 2933 } 2934 pci_disable_msix(np->pci_dev); 2935 np->msi_flags &= ~NV_MSI_X_ENABLED; 2936 } else { 2937 free_irq(np->pci_dev->irq, dev); 2938 if (np->msi_flags & NV_MSI_ENABLED) { 2939 pci_disable_msi(np->pci_dev); 2940 np->msi_flags &= ~NV_MSI_ENABLED; 2941 } 2942 } 2943} 2944 2945static void nv_do_nic_poll(unsigned long data) 2946{ 2947 struct net_device *dev = (struct net_device *) data; 2948 struct fe_priv *np = netdev_priv(dev); 2949 u8 __iomem *base = get_hwbase(dev); 2950 u32 mask = 0; 2951 2952 /* 2953 * First disable irq(s) and then 2954 * reenable interrupts on the nic, we have to do this before calling 2955 * nv_nic_irq because that may decide to do otherwise 2956 */ 2957 2958 if (!using_multi_irqs(dev)) { 2959 if (np->msi_flags & NV_MSI_X_ENABLED) 2960 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 2961 else 2962 disable_irq_lockdep(dev->irq); 2963 mask = np->irqmask; 2964 } else { 2965 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2966 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 2967 mask |= NVREG_IRQ_RX_ALL; 2968 } 2969 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 2970 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 2971 mask |= NVREG_IRQ_TX_ALL; 2972 } 2973 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 2974 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 2975 mask |= NVREG_IRQ_OTHER; 2976 } 2977 } 2978 np->nic_poll_irq = 0; 2979 2980 if (np->recover_error) { 2981 np->recover_error = 0; 2982 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n"); 2983 if (netif_running(dev)) { 2984 netif_tx_lock_bh(dev); 2985 spin_lock(&np->lock); 2986 /* stop engines */ 2987 nv_stop_rx(dev); 2988 nv_stop_tx(dev); 2989 nv_txrx_reset(dev); 2990 /* drain rx queue */ 2991 nv_drain_rx(dev); 2992 nv_drain_tx(dev); 2993 /* reinit driver view of the rx queue */ 2994 set_bufsize(dev); 2995 if (nv_init_ring(dev)) { 2996 if (!np->in_shutdown) 2997 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2998 } 2999 /* reinit nic view of the rx queue */ 3000 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3001 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3002 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3003 base + NvRegRingSizes); 3004 pci_push(base); 3005 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3006 pci_push(base); 3007 3008 /* restart rx engine */ 3009 nv_start_rx(dev); 3010 nv_start_tx(dev); 3011 spin_unlock(&np->lock); 3012 netif_tx_unlock_bh(dev); 3013 } 3014 } 3015 3016 /* FIXME: Do we need synchronize_irq(dev->irq) here? */ 3017 3018 writel(mask, base + NvRegIrqMask); 3019 pci_push(base); 3020 3021 if (!using_multi_irqs(dev)) { 3022 nv_nic_irq(0, dev); 3023 if (np->msi_flags & NV_MSI_X_ENABLED) 3024 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 3025 else 3026 enable_irq_lockdep(dev->irq); 3027 } else { 3028 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 3029 nv_nic_irq_rx(0, dev); 3030 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 3031 } 3032 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 3033 nv_nic_irq_tx(0, dev); 3034 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 3035 } 3036 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 3037 nv_nic_irq_other(0, dev); 3038 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 3039 } 3040 } 3041} 3042 3043#ifdef CONFIG_NET_POLL_CONTROLLER 3044static void nv_poll_controller(struct net_device *dev) 3045{ 3046 nv_do_nic_poll((unsigned long) dev); 3047} 3048#endif 3049 3050static void nv_do_stats_poll(unsigned long data) 3051{ 3052 struct net_device *dev = (struct net_device *) data; 3053 struct fe_priv *np = netdev_priv(dev); 3054 u8 __iomem *base = get_hwbase(dev); 3055 3056 np->estats.tx_bytes += readl(base + NvRegTxCnt); 3057 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); 3058 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); 3059 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); 3060 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); 3061 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); 3062 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); 3063 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); 3064 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); 3065 np->estats.tx_deferral += readl(base + NvRegTxDef); 3066 np->estats.tx_packets += readl(base + NvRegTxFrame); 3067 np->estats.tx_pause += readl(base + NvRegTxPause); 3068 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); 3069 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); 3070 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); 3071 np->estats.rx_runt += readl(base + NvRegRxRunt); 3072 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); 3073 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); 3074 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); 3075 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); 3076 np->estats.rx_length_error += readl(base + NvRegRxLenErr); 3077 np->estats.rx_unicast += readl(base + NvRegRxUnicast); 3078 np->estats.rx_multicast += readl(base + NvRegRxMulticast); 3079 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); 3080 np->estats.rx_bytes += readl(base + NvRegRxCnt); 3081 np->estats.rx_pause += readl(base + NvRegRxPause); 3082 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 3083 np->estats.rx_packets = 3084 np->estats.rx_unicast + 3085 np->estats.rx_multicast + 3086 np->estats.rx_broadcast; 3087 np->estats.rx_errors_total = 3088 np->estats.rx_crc_errors + 3089 np->estats.rx_over_errors + 3090 np->estats.rx_frame_error + 3091 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + 3092 np->estats.rx_late_collision + 3093 np->estats.rx_runt + 3094 np->estats.rx_frame_too_long; 3095 3096 if (!np->in_shutdown) 3097 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 3098} 3099 3100static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 3101{ 3102 struct fe_priv *np = netdev_priv(dev); 3103 strcpy(info->driver, "forcedeth"); 3104 strcpy(info->version, FORCEDETH_VERSION); 3105 strcpy(info->bus_info, pci_name(np->pci_dev)); 3106} 3107 3108static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 3109{ 3110 struct fe_priv *np = netdev_priv(dev); 3111 wolinfo->supported = WAKE_MAGIC; 3112 3113 spin_lock_irq(&np->lock); 3114 if (np->wolenabled) 3115 wolinfo->wolopts = WAKE_MAGIC; 3116 spin_unlock_irq(&np->lock); 3117} 3118 3119static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 3120{ 3121 struct fe_priv *np = netdev_priv(dev); 3122 u8 __iomem *base = get_hwbase(dev); 3123 u32 flags = 0; 3124 3125 if (wolinfo->wolopts == 0) { 3126 np->wolenabled = 0; 3127 } else if (wolinfo->wolopts & WAKE_MAGIC) { 3128 np->wolenabled = 1; 3129 flags = NVREG_WAKEUPFLAGS_ENABLE; 3130 } 3131 if (netif_running(dev)) { 3132 spin_lock_irq(&np->lock); 3133 writel(flags, base + NvRegWakeUpFlags); 3134 spin_unlock_irq(&np->lock); 3135 } 3136 return 0; 3137} 3138 3139static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 3140{ 3141 struct fe_priv *np = netdev_priv(dev); 3142 int adv; 3143 3144 spin_lock_irq(&np->lock); 3145 ecmd->port = PORT_MII; 3146 if (!netif_running(dev)) { 3147 /* We do not track link speed / duplex setting if the 3148 * interface is disabled. Force a link check */ 3149 if (nv_update_linkspeed(dev)) { 3150 if (!netif_carrier_ok(dev)) 3151 netif_carrier_on(dev); 3152 } else { 3153 if (netif_carrier_ok(dev)) 3154 netif_carrier_off(dev); 3155 } 3156 } 3157 3158 if (netif_carrier_ok(dev)) { 3159 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 3160 case NVREG_LINKSPEED_10: 3161 ecmd->speed = SPEED_10; 3162 break; 3163 case NVREG_LINKSPEED_100: 3164 ecmd->speed = SPEED_100; 3165 break; 3166 case NVREG_LINKSPEED_1000: 3167 ecmd->speed = SPEED_1000; 3168 break; 3169 } 3170 ecmd->duplex = DUPLEX_HALF; 3171 if (np->duplex) 3172 ecmd->duplex = DUPLEX_FULL; 3173 } else { 3174 ecmd->speed = -1; 3175 ecmd->duplex = -1; 3176 } 3177 3178 ecmd->autoneg = np->autoneg; 3179 3180 ecmd->advertising = ADVERTISED_MII; 3181 if (np->autoneg) { 3182 ecmd->advertising |= ADVERTISED_Autoneg; 3183 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3184 if (adv & ADVERTISE_10HALF) 3185 ecmd->advertising |= ADVERTISED_10baseT_Half; 3186 if (adv & ADVERTISE_10FULL) 3187 ecmd->advertising |= ADVERTISED_10baseT_Full; 3188 if (adv & ADVERTISE_100HALF) 3189 ecmd->advertising |= ADVERTISED_100baseT_Half; 3190 if (adv & ADVERTISE_100FULL) 3191 ecmd->advertising |= ADVERTISED_100baseT_Full; 3192 if (np->gigabit == PHY_GIGABIT) { 3193 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3194 if (adv & ADVERTISE_1000FULL) 3195 ecmd->advertising |= ADVERTISED_1000baseT_Full; 3196 } 3197 } 3198 ecmd->supported = (SUPPORTED_Autoneg | 3199 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 3200 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 3201 SUPPORTED_MII); 3202 if (np->gigabit == PHY_GIGABIT) 3203 ecmd->supported |= SUPPORTED_1000baseT_Full; 3204 3205 ecmd->phy_address = np->phyaddr; 3206 ecmd->transceiver = XCVR_EXTERNAL; 3207 3208 /* ignore maxtxpkt, maxrxpkt for now */ 3209 spin_unlock_irq(&np->lock); 3210 return 0; 3211} 3212 3213static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 3214{ 3215 struct fe_priv *np = netdev_priv(dev); 3216 3217 if (ecmd->port != PORT_MII) 3218 return -EINVAL; 3219 if (ecmd->transceiver != XCVR_EXTERNAL) 3220 return -EINVAL; 3221 if (ecmd->phy_address != np->phyaddr) { 3222 /* TODO: support switching between multiple phys. Should be 3223 * trivial, but not enabled due to lack of test hardware. */ 3224 return -EINVAL; 3225 } 3226 if (ecmd->autoneg == AUTONEG_ENABLE) { 3227 u32 mask; 3228 3229 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 3230 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 3231 if (np->gigabit == PHY_GIGABIT) 3232 mask |= ADVERTISED_1000baseT_Full; 3233 3234 if ((ecmd->advertising & mask) == 0) 3235 return -EINVAL; 3236 3237 } else if (ecmd->autoneg == AUTONEG_DISABLE) { 3238 /* Note: autonegotiation disable, speed 1000 intentionally 3239 * forbidden - noone should need that. */ 3240 3241 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) 3242 return -EINVAL; 3243 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 3244 return -EINVAL; 3245 } else { 3246 return -EINVAL; 3247 } 3248 3249 netif_carrier_off(dev); 3250 if (netif_running(dev)) { 3251 nv_disable_irq(dev); 3252 netif_tx_lock_bh(dev); 3253 spin_lock(&np->lock); 3254 /* stop engines */ 3255 nv_stop_rx(dev); 3256 nv_stop_tx(dev); 3257 spin_unlock(&np->lock); 3258 netif_tx_unlock_bh(dev); 3259 } 3260 3261 if (ecmd->autoneg == AUTONEG_ENABLE) { 3262 int adv, bmcr; 3263 3264 np->autoneg = 1; 3265 3266 /* advertise only what has been requested */ 3267 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3268 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3269 if (ecmd->advertising & ADVERTISED_10baseT_Half) 3270 adv |= ADVERTISE_10HALF; 3271 if (ecmd->advertising & ADVERTISED_10baseT_Full) 3272 adv |= ADVERTISE_10FULL; 3273 if (ecmd->advertising & ADVERTISED_100baseT_Half) 3274 adv |= ADVERTISE_100HALF; 3275 if (ecmd->advertising & ADVERTISED_100baseT_Full) 3276 adv |= ADVERTISE_100FULL; 3277 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 3278 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 3279 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3280 adv |= ADVERTISE_PAUSE_ASYM; 3281 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 3282 3283 if (np->gigabit == PHY_GIGABIT) { 3284 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3285 adv &= ~ADVERTISE_1000FULL; 3286 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 3287 adv |= ADVERTISE_1000FULL; 3288 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 3289 } 3290 3291 if (netif_running(dev)) 3292 printk(KERN_INFO "%s: link down.\n", dev->name); 3293 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3294 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 3295 bmcr |= BMCR_ANENABLE; 3296 /* reset the phy in order for settings to stick, 3297 * and cause autoneg to start */ 3298 if (phy_reset(dev, bmcr)) { 3299 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 3300 return -EINVAL; 3301 } 3302 } else { 3303 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 3304 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3305 } 3306 } else { 3307 int adv, bmcr; 3308 3309 np->autoneg = 0; 3310 3311 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3312 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3313 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) 3314 adv |= ADVERTISE_10HALF; 3315 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) 3316 adv |= ADVERTISE_10FULL; 3317 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) 3318 adv |= ADVERTISE_100HALF; 3319 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) 3320 adv |= ADVERTISE_100FULL; 3321 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 3322 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ 3323 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 3324 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3325 } 3326 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { 3327 adv |= ADVERTISE_PAUSE_ASYM; 3328 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3329 } 3330 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 3331 np->fixed_mode = adv; 3332 3333 if (np->gigabit == PHY_GIGABIT) { 3334 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3335 adv &= ~ADVERTISE_1000FULL; 3336 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 3337 } 3338 3339 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3340 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); 3341 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) 3342 bmcr |= BMCR_FULLDPLX; 3343 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 3344 bmcr |= BMCR_SPEED100; 3345 if (np->phy_oui == PHY_OUI_MARVELL) { 3346 /* reset the phy in order for forced mode settings to stick */ 3347 if (phy_reset(dev, bmcr)) { 3348 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 3349 return -EINVAL; 3350 } 3351 } else { 3352 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3353 if (netif_running(dev)) { 3354 /* Wait a bit and then reconfigure the nic. */ 3355 udelay(10); 3356 nv_linkchange(dev); 3357 } 3358 } 3359 } 3360 3361 if (netif_running(dev)) { 3362 nv_start_rx(dev); 3363 nv_start_tx(dev); 3364 nv_enable_irq(dev); 3365 } 3366 3367 return 0; 3368} 3369 3370#define FORCEDETH_REGS_VER 1 3371 3372static int nv_get_regs_len(struct net_device *dev) 3373{ 3374 struct fe_priv *np = netdev_priv(dev); 3375 return np->register_size; 3376} 3377 3378static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 3379{ 3380 struct fe_priv *np = netdev_priv(dev); 3381 u8 __iomem *base = get_hwbase(dev); 3382 u32 *rbuf = buf; 3383 int i; 3384 3385 regs->version = FORCEDETH_REGS_VER; 3386 spin_lock_irq(&np->lock); 3387 for (i = 0;i <= np->register_size/sizeof(u32); i++) 3388 rbuf[i] = readl(base + i*sizeof(u32)); 3389 spin_unlock_irq(&np->lock); 3390} 3391 3392static int nv_nway_reset(struct net_device *dev) 3393{ 3394 struct fe_priv *np = netdev_priv(dev); 3395 int ret; 3396 3397 if (np->autoneg) { 3398 int bmcr; 3399 3400 netif_carrier_off(dev); 3401 if (netif_running(dev)) { 3402 nv_disable_irq(dev); 3403 netif_tx_lock_bh(dev); 3404 spin_lock(&np->lock); 3405 /* stop engines */ 3406 nv_stop_rx(dev); 3407 nv_stop_tx(dev); 3408 spin_unlock(&np->lock); 3409 netif_tx_unlock_bh(dev); 3410 printk(KERN_INFO "%s: link down.\n", dev->name); 3411 } 3412 3413 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3414 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 3415 bmcr |= BMCR_ANENABLE; 3416 /* reset the phy in order for settings to stick*/ 3417 if (phy_reset(dev, bmcr)) { 3418 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 3419 return -EINVAL; 3420 } 3421 } else { 3422 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 3423 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3424 } 3425 3426 if (netif_running(dev)) { 3427 nv_start_rx(dev); 3428 nv_start_tx(dev); 3429 nv_enable_irq(dev); 3430 } 3431 ret = 0; 3432 } else { 3433 ret = -EINVAL; 3434 } 3435 3436 return ret; 3437} 3438 3439static int nv_set_tso(struct net_device *dev, u32 value) 3440{ 3441 struct fe_priv *np = netdev_priv(dev); 3442 3443 if ((np->driver_data & DEV_HAS_CHECKSUM)) 3444 return ethtool_op_set_tso(dev, value); 3445 else 3446 return -EOPNOTSUPP; 3447} 3448 3449static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 3450{ 3451 struct fe_priv *np = netdev_priv(dev); 3452 3453 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 3454 ring->rx_mini_max_pending = 0; 3455 ring->rx_jumbo_max_pending = 0; 3456 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 3457 3458 ring->rx_pending = np->rx_ring_size; 3459 ring->rx_mini_pending = 0; 3460 ring->rx_jumbo_pending = 0; 3461 ring->tx_pending = np->tx_ring_size; 3462} 3463 3464static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 3465{ 3466 struct fe_priv *np = netdev_priv(dev); 3467 u8 __iomem *base = get_hwbase(dev); 3468 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len; 3469 dma_addr_t ring_addr; 3470 3471 if (ring->rx_pending < RX_RING_MIN || 3472 ring->tx_pending < TX_RING_MIN || 3473 ring->rx_mini_pending != 0 || 3474 ring->rx_jumbo_pending != 0 || 3475 (np->desc_ver == DESC_VER_1 && 3476 (ring->rx_pending > RING_MAX_DESC_VER_1 || 3477 ring->tx_pending > RING_MAX_DESC_VER_1)) || 3478 (np->desc_ver != DESC_VER_1 && 3479 (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 3480 ring->tx_pending > RING_MAX_DESC_VER_2_3))) { 3481 return -EINVAL; 3482 } 3483 3484 /* allocate new rings */ 3485 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3486 rxtx_ring = pci_alloc_consistent(np->pci_dev, 3487 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 3488 &ring_addr); 3489 } else { 3490 rxtx_ring = pci_alloc_consistent(np->pci_dev, 3491 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 3492 &ring_addr); 3493 } 3494 rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL); 3495 rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL); 3496 tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL); 3497 tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL); 3498 tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL); 3499 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { 3500 /* fall back to old rings */ 3501 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3502 if (rxtx_ring) 3503 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 3504 rxtx_ring, ring_addr); 3505 } else { 3506 if (rxtx_ring) 3507 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 3508 rxtx_ring, ring_addr); 3509 } 3510 if (rx_skbuff) 3511 kfree(rx_skbuff); 3512 if (rx_dma) 3513 kfree(rx_dma); 3514 if (tx_skbuff) 3515 kfree(tx_skbuff); 3516 if (tx_dma) 3517 kfree(tx_dma); 3518 if (tx_dma_len) 3519 kfree(tx_dma_len); 3520 goto exit; 3521 } 3522 3523 if (netif_running(dev)) { 3524 nv_disable_irq(dev); 3525 netif_tx_lock_bh(dev); 3526 spin_lock(&np->lock); 3527 /* stop engines */ 3528 nv_stop_rx(dev); 3529 nv_stop_tx(dev); 3530 nv_txrx_reset(dev); 3531 /* drain queues */ 3532 nv_drain_rx(dev); 3533 nv_drain_tx(dev); 3534 /* delete queues */ 3535 free_rings(dev); 3536 } 3537 3538 /* set new values */ 3539 np->rx_ring_size = ring->rx_pending; 3540 np->tx_ring_size = ring->tx_pending; 3541 np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE; 3542 np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1; 3543 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3544 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 3545 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 3546 } else { 3547 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 3548 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 3549 } 3550 np->rx_skbuff = (struct sk_buff**)rx_skbuff; 3551 np->rx_dma = (dma_addr_t*)rx_dma; 3552 np->tx_skbuff = (struct sk_buff**)tx_skbuff; 3553 np->tx_dma = (dma_addr_t*)tx_dma; 3554 np->tx_dma_len = (unsigned int*)tx_dma_len; 3555 np->ring_addr = ring_addr; 3556 3557 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); 3558 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); 3559 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); 3560 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); 3561 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); 3562 3563 if (netif_running(dev)) { 3564 /* reinit driver view of the queues */ 3565 set_bufsize(dev); 3566 if (nv_init_ring(dev)) { 3567 if (!np->in_shutdown) 3568 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3569 } 3570 3571 /* reinit nic view of the queues */ 3572 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3573 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3574 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3575 base + NvRegRingSizes); 3576 pci_push(base); 3577 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3578 pci_push(base); 3579 3580 /* restart engines */ 3581 nv_start_rx(dev); 3582 nv_start_tx(dev); 3583 spin_unlock(&np->lock); 3584 netif_tx_unlock_bh(dev); 3585 nv_enable_irq(dev); 3586 } 3587 return 0; 3588exit: 3589 return -ENOMEM; 3590} 3591 3592static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 3593{ 3594 struct fe_priv *np = netdev_priv(dev); 3595 3596 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; 3597 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; 3598 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; 3599} 3600 3601static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 3602{ 3603 struct fe_priv *np = netdev_priv(dev); 3604 int adv, bmcr; 3605 3606 if ((!np->autoneg && np->duplex == 0) || 3607 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 3608 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 3609 dev->name); 3610 return -EINVAL; 3611 } 3612 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 3613 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); 3614 return -EINVAL; 3615 } 3616 3617 netif_carrier_off(dev); 3618 if (netif_running(dev)) { 3619 nv_disable_irq(dev); 3620 netif_tx_lock_bh(dev); 3621 spin_lock(&np->lock); 3622 /* stop engines */ 3623 nv_stop_rx(dev); 3624 nv_stop_tx(dev); 3625 spin_unlock(&np->lock); 3626 netif_tx_unlock_bh(dev); 3627 } 3628 3629 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); 3630 if (pause->rx_pause) 3631 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; 3632 if (pause->tx_pause) 3633 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; 3634 3635 if (np->autoneg && pause->autoneg) { 3636 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; 3637 3638 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3639 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3640 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 3641 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 3642 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3643 adv |= ADVERTISE_PAUSE_ASYM; 3644 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 3645 3646 if (netif_running(dev)) 3647 printk(KERN_INFO "%s: link down.\n", dev->name); 3648 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3649 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 3650 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3651 } else { 3652 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 3653 if (pause->rx_pause) 3654 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3655 if (pause->tx_pause) 3656 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3657 3658 if (!netif_running(dev)) 3659 nv_update_linkspeed(dev); 3660 else 3661 nv_update_pause(dev, np->pause_flags); 3662 } 3663 3664 if (netif_running(dev)) { 3665 nv_start_rx(dev); 3666 nv_start_tx(dev); 3667 nv_enable_irq(dev); 3668 } 3669 return 0; 3670} 3671 3672static u32 nv_get_rx_csum(struct net_device *dev) 3673{ 3674 struct fe_priv *np = netdev_priv(dev); 3675 return (np->rx_csum) != 0; 3676} 3677 3678static int nv_set_rx_csum(struct net_device *dev, u32 data) 3679{ 3680 struct fe_priv *np = netdev_priv(dev); 3681 u8 __iomem *base = get_hwbase(dev); 3682 int retcode = 0; 3683 3684 if (np->driver_data & DEV_HAS_CHECKSUM) { 3685 if (data) { 3686 np->rx_csum = 1; 3687 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 3688 } else { 3689 np->rx_csum = 0; 3690 /* vlan is dependent on rx checksum offload */ 3691 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) 3692 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; 3693 } 3694 if (netif_running(dev)) { 3695 spin_lock_irq(&np->lock); 3696 writel(np->txrxctl_bits, base + NvRegTxRxControl); 3697 spin_unlock_irq(&np->lock); 3698 } 3699 } else { 3700 return -EINVAL; 3701 } 3702 3703 return retcode; 3704} 3705 3706static int nv_set_tx_csum(struct net_device *dev, u32 data) 3707{ 3708 struct fe_priv *np = netdev_priv(dev); 3709 3710 if (np->driver_data & DEV_HAS_CHECKSUM) 3711 return ethtool_op_set_tx_hw_csum(dev, data); 3712 else 3713 return -EOPNOTSUPP; 3714} 3715 3716static int nv_set_sg(struct net_device *dev, u32 data) 3717{ 3718 struct fe_priv *np = netdev_priv(dev); 3719 3720 if (np->driver_data & DEV_HAS_CHECKSUM) 3721 return ethtool_op_set_sg(dev, data); 3722 else 3723 return -EOPNOTSUPP; 3724} 3725 3726static int nv_get_stats_count(struct net_device *dev) 3727{ 3728 struct fe_priv *np = netdev_priv(dev); 3729 3730 if (np->driver_data & DEV_HAS_STATISTICS) 3731 return sizeof(struct nv_ethtool_stats)/sizeof(u64); 3732 else 3733 return 0; 3734} 3735 3736static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) 3737{ 3738 struct fe_priv *np = netdev_priv(dev); 3739 3740 /* update stats */ 3741 nv_do_stats_poll((unsigned long)dev); 3742 3743 memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64)); 3744} 3745 3746static int nv_self_test_count(struct net_device *dev) 3747{ 3748 struct fe_priv *np = netdev_priv(dev); 3749 3750 if (np->driver_data & DEV_HAS_TEST_EXTENDED) 3751 return NV_TEST_COUNT_EXTENDED; 3752 else 3753 return NV_TEST_COUNT_BASE; 3754} 3755 3756static int nv_link_test(struct net_device *dev) 3757{ 3758 struct fe_priv *np = netdev_priv(dev); 3759 int mii_status; 3760 3761 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3762 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3763 3764 /* check phy link status */ 3765 if (!(mii_status & BMSR_LSTATUS)) 3766 return 0; 3767 else 3768 return 1; 3769} 3770 3771static int nv_register_test(struct net_device *dev) 3772{ 3773 u8 __iomem *base = get_hwbase(dev); 3774 int i = 0; 3775 u32 orig_read, new_read; 3776 3777 do { 3778 orig_read = readl(base + nv_registers_test[i].reg); 3779 3780 /* xor with mask to toggle bits */ 3781 orig_read ^= nv_registers_test[i].mask; 3782 3783 writel(orig_read, base + nv_registers_test[i].reg); 3784 3785 new_read = readl(base + nv_registers_test[i].reg); 3786 3787 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) 3788 return 0; 3789 3790 /* restore original value */ 3791 orig_read ^= nv_registers_test[i].mask; 3792 writel(orig_read, base + nv_registers_test[i].reg); 3793 3794 } while (nv_registers_test[++i].reg != 0); 3795 3796 return 1; 3797} 3798 3799static int nv_interrupt_test(struct net_device *dev) 3800{ 3801 struct fe_priv *np = netdev_priv(dev); 3802 u8 __iomem *base = get_hwbase(dev); 3803 int ret = 1; 3804 int testcnt; 3805 u32 save_msi_flags, save_poll_interval = 0; 3806 3807 if (netif_running(dev)) { 3808 /* free current irq */ 3809 nv_free_irq(dev); 3810 save_poll_interval = readl(base+NvRegPollingInterval); 3811 } 3812 3813 /* flag to test interrupt handler */ 3814 np->intr_test = 0; 3815 3816 /* setup test irq */ 3817 save_msi_flags = np->msi_flags; 3818 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; 3819 np->msi_flags |= 0x001; /* setup 1 vector */ 3820 if (nv_request_irq(dev, 1)) 3821 return 0; 3822 3823 /* setup timer interrupt */ 3824 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 3825 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 3826 3827 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); 3828 3829 /* wait for at least one interrupt */ 3830 msleep(100); 3831 3832 spin_lock_irq(&np->lock); 3833 3834 /* flag should be set within ISR */ 3835 testcnt = np->intr_test; 3836 if (!testcnt) 3837 ret = 2; 3838 3839 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); 3840 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3841 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3842 else 3843 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 3844 3845 spin_unlock_irq(&np->lock); 3846 3847 nv_free_irq(dev); 3848 3849 np->msi_flags = save_msi_flags; 3850 3851 if (netif_running(dev)) { 3852 writel(save_poll_interval, base + NvRegPollingInterval); 3853 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 3854 /* restore original irq */ 3855 if (nv_request_irq(dev, 0)) 3856 return 0; 3857 } 3858 3859 return ret; 3860} 3861 3862static int nv_loopback_test(struct net_device *dev) 3863{ 3864 struct fe_priv *np = netdev_priv(dev); 3865 u8 __iomem *base = get_hwbase(dev); 3866 struct sk_buff *tx_skb, *rx_skb; 3867 dma_addr_t test_dma_addr; 3868 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 3869 u32 flags; 3870 int len, i, pkt_len; 3871 u8 *pkt_data; 3872 u32 filter_flags = 0; 3873 u32 misc1_flags = 0; 3874 int ret = 1; 3875 3876 if (netif_running(dev)) { 3877 nv_disable_irq(dev); 3878 filter_flags = readl(base + NvRegPacketFilterFlags); 3879 misc1_flags = readl(base + NvRegMisc1); 3880 } else { 3881 nv_txrx_reset(dev); 3882 } 3883 3884 /* reinit driver view of the rx queue */ 3885 set_bufsize(dev); 3886 nv_init_ring(dev); 3887 3888 /* setup hardware for loopback */ 3889 writel(NVREG_MISC1_FORCE, base + NvRegMisc1); 3890 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); 3891 3892 /* reinit nic view of the rx queue */ 3893 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3894 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3895 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3896 base + NvRegRingSizes); 3897 pci_push(base); 3898 3899 /* restart rx engine */ 3900 nv_start_rx(dev); 3901 nv_start_tx(dev); 3902 3903 /* setup packet for tx */ 3904 pkt_len = ETH_DATA_LEN; 3905 tx_skb = dev_alloc_skb(pkt_len); 3906 if (!tx_skb) { 3907 printk(KERN_ERR "dev_alloc_skb() failed during loopback test" 3908 " of %s\n", dev->name); 3909 ret = 0; 3910 goto out; 3911 } 3912 pkt_data = skb_put(tx_skb, pkt_len); 3913 for (i = 0; i < pkt_len; i++) 3914 pkt_data[i] = (u8)(i & 0xff); 3915 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, 3916 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); 3917 3918 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3919 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 3920 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3921 } else { 3922 np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32; 3923 np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; 3924 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3925 } 3926 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3927 pci_push(get_hwbase(dev)); 3928 3929 msleep(500); 3930 3931 /* check for rx of the packet */ 3932 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3933 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); 3934 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 3935 3936 } else { 3937 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); 3938 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 3939 } 3940 3941 if (flags & NV_RX_AVAIL) { 3942 ret = 0; 3943 } else if (np->desc_ver == DESC_VER_1) { 3944 if (flags & NV_RX_ERROR) 3945 ret = 0; 3946 } else { 3947 if (flags & NV_RX2_ERROR) { 3948 ret = 0; 3949 } 3950 } 3951 3952 if (ret) { 3953 if (len != pkt_len) { 3954 ret = 0; 3955 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 3956 dev->name, len, pkt_len); 3957 } else { 3958 rx_skb = np->rx_skbuff[0]; 3959 for (i = 0; i < pkt_len; i++) { 3960 if (rx_skb->data[i] != (u8)(i & 0xff)) { 3961 ret = 0; 3962 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", 3963 dev->name, i); 3964 break; 3965 } 3966 } 3967 } 3968 } else { 3969 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); 3970 } 3971 3972 pci_unmap_page(np->pci_dev, test_dma_addr, 3973 tx_skb->end-tx_skb->data, 3974 PCI_DMA_TODEVICE); 3975 dev_kfree_skb_any(tx_skb); 3976 out: 3977 /* stop engines */ 3978 nv_stop_rx(dev); 3979 nv_stop_tx(dev); 3980 nv_txrx_reset(dev); 3981 /* drain rx queue */ 3982 nv_drain_rx(dev); 3983 nv_drain_tx(dev); 3984 3985 if (netif_running(dev)) { 3986 writel(misc1_flags, base + NvRegMisc1); 3987 writel(filter_flags, base + NvRegPacketFilterFlags); 3988 nv_enable_irq(dev); 3989 } 3990 3991 return ret; 3992} 3993 3994static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) 3995{ 3996 struct fe_priv *np = netdev_priv(dev); 3997 u8 __iomem *base = get_hwbase(dev); 3998 int result; 3999 memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64)); 4000 4001 if (!nv_link_test(dev)) { 4002 test->flags |= ETH_TEST_FL_FAILED; 4003 buffer[0] = 1; 4004 } 4005 4006 if (test->flags & ETH_TEST_FL_OFFLINE) { 4007 if (netif_running(dev)) { 4008 netif_stop_queue(dev); 4009 netif_poll_disable(dev); 4010 netif_tx_lock_bh(dev); 4011 spin_lock_irq(&np->lock); 4012 nv_disable_hw_interrupts(dev, np->irqmask); 4013 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 4014 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4015 } else { 4016 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4017 } 4018 /* stop engines */ 4019 nv_stop_rx(dev); 4020 nv_stop_tx(dev); 4021 nv_txrx_reset(dev); 4022 /* drain rx queue */ 4023 nv_drain_rx(dev); 4024 nv_drain_tx(dev); 4025 spin_unlock_irq(&np->lock); 4026 netif_tx_unlock_bh(dev); 4027 } 4028 4029 if (!nv_register_test(dev)) { 4030 test->flags |= ETH_TEST_FL_FAILED; 4031 buffer[1] = 1; 4032 } 4033 4034 result = nv_interrupt_test(dev); 4035 if (result != 1) { 4036 test->flags |= ETH_TEST_FL_FAILED; 4037 buffer[2] = 1; 4038 } 4039 if (result == 0) { 4040 /* bail out */ 4041 return; 4042 } 4043 4044 if (!nv_loopback_test(dev)) { 4045 test->flags |= ETH_TEST_FL_FAILED; 4046 buffer[3] = 1; 4047 } 4048 4049 if (netif_running(dev)) { 4050 /* reinit driver view of the rx queue */ 4051 set_bufsize(dev); 4052 if (nv_init_ring(dev)) { 4053 if (!np->in_shutdown) 4054 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4055 } 4056 /* reinit nic view of the rx queue */ 4057 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4058 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4059 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4060 base + NvRegRingSizes); 4061 pci_push(base); 4062 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4063 pci_push(base); 4064 /* restart rx engine */ 4065 nv_start_rx(dev); 4066 nv_start_tx(dev); 4067 netif_start_queue(dev); 4068 netif_poll_enable(dev); 4069 nv_enable_hw_interrupts(dev, np->irqmask); 4070 } 4071 } 4072} 4073 4074static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) 4075{ 4076 switch (stringset) { 4077 case ETH_SS_STATS: 4078 memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str)); 4079 break; 4080 case ETH_SS_TEST: 4081 memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str)); 4082 break; 4083 } 4084} 4085 4086static const struct ethtool_ops ops = { 4087 .get_drvinfo = nv_get_drvinfo, 4088 .get_link = ethtool_op_get_link, 4089 .get_wol = nv_get_wol, 4090 .set_wol = nv_set_wol, 4091 .get_settings = nv_get_settings, 4092 .set_settings = nv_set_settings, 4093 .get_regs_len = nv_get_regs_len, 4094 .get_regs = nv_get_regs, 4095 .nway_reset = nv_nway_reset, 4096 .get_perm_addr = ethtool_op_get_perm_addr, 4097 .get_tso = ethtool_op_get_tso, 4098 .set_tso = nv_set_tso, 4099 .get_ringparam = nv_get_ringparam, 4100 .set_ringparam = nv_set_ringparam, 4101 .get_pauseparam = nv_get_pauseparam, 4102 .set_pauseparam = nv_set_pauseparam, 4103 .get_rx_csum = nv_get_rx_csum, 4104 .set_rx_csum = nv_set_rx_csum, 4105 .get_tx_csum = ethtool_op_get_tx_csum, 4106 .set_tx_csum = nv_set_tx_csum, 4107 .get_sg = ethtool_op_get_sg, 4108 .set_sg = nv_set_sg, 4109 .get_strings = nv_get_strings, 4110 .get_stats_count = nv_get_stats_count, 4111 .get_ethtool_stats = nv_get_ethtool_stats, 4112 .self_test_count = nv_self_test_count, 4113 .self_test = nv_self_test, 4114}; 4115 4116static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 4117{ 4118 struct fe_priv *np = get_nvpriv(dev); 4119 4120 spin_lock_irq(&np->lock); 4121 4122 /* save vlan group */ 4123 np->vlangrp = grp; 4124 4125 if (grp) { 4126 /* enable vlan on MAC */ 4127 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; 4128 } else { 4129 /* disable vlan on MAC */ 4130 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; 4131 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; 4132 } 4133 4134 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4135 4136 spin_unlock_irq(&np->lock); 4137}; 4138 4139static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 4140{ 4141 /* nothing to do */ 4142}; 4143 4144/* The mgmt unit and driver use a semaphore to access the phy during init */ 4145static int nv_mgmt_acquire_sema(struct net_device *dev) 4146{ 4147 u8 __iomem *base = get_hwbase(dev); 4148 int i; 4149 u32 tx_ctrl, mgmt_sema; 4150 4151 for (i = 0; i < 10; i++) { 4152 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; 4153 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) 4154 break; 4155 msleep(500); 4156 } 4157 4158 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) 4159 return 0; 4160 4161 for (i = 0; i < 2; i++) { 4162 tx_ctrl = readl(base + NvRegTransmitterControl); 4163 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; 4164 writel(tx_ctrl, base + NvRegTransmitterControl); 4165 4166 /* verify that semaphore was acquired */ 4167 tx_ctrl = readl(base + NvRegTransmitterControl); 4168 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && 4169 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) 4170 return 1; 4171 else 4172 udelay(50); 4173 } 4174 4175 return 0; 4176} 4177 4178static int nv_open(struct net_device *dev) 4179{ 4180 struct fe_priv *np = netdev_priv(dev); 4181 u8 __iomem *base = get_hwbase(dev); 4182 int ret = 1; 4183 int oom, i; 4184 4185 dprintk(KERN_DEBUG "nv_open: begin\n"); 4186 4187 /* erase previous misconfiguration */ 4188 if (np->driver_data & DEV_HAS_POWER_CNTRL) 4189 nv_mac_reset(dev); 4190 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 4191 writel(0, base + NvRegMulticastAddrB); 4192 writel(0, base + NvRegMulticastMaskA); 4193 writel(0, base + NvRegMulticastMaskB); 4194 writel(0, base + NvRegPacketFilterFlags); 4195 4196 writel(0, base + NvRegTransmitterControl); 4197 writel(0, base + NvRegReceiverControl); 4198 4199 writel(0, base + NvRegAdapterControl); 4200 4201 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 4202 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 4203 4204 /* initialize descriptor rings */ 4205 set_bufsize(dev); 4206 oom = nv_init_ring(dev); 4207 4208 writel(0, base + NvRegLinkSpeed); 4209 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 4210 nv_txrx_reset(dev); 4211 writel(0, base + NvRegUnknownSetupReg6); 4212 4213 np->in_shutdown = 0; 4214 4215 /* give hw rings */ 4216 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4217 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4218 base + NvRegRingSizes); 4219 4220 writel(np->linkspeed, base + NvRegLinkSpeed); 4221 if (np->desc_ver == DESC_VER_1) 4222 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 4223 else 4224 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); 4225 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4226 writel(np->vlanctl_bits, base + NvRegVlanControl); 4227 pci_push(base); 4228 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 4229 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 4230 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 4231 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 4232 4233 writel(0, base + NvRegMIIMask); 4234 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4235 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 4236 4237 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 4238 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 4239 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 4240 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4241 4242 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 4243 get_random_bytes(&i, sizeof(i)); 4244 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); 4245 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); 4246 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); 4247 if (poll_interval == -1) { 4248 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 4249 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 4250 else 4251 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 4252 } 4253 else 4254 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 4255 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4256 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 4257 base + NvRegAdapterControl); 4258 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); 4259 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); 4260 if (np->wolenabled) 4261 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 4262 4263 i = readl(base + NvRegPowerState); 4264 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 4265 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 4266 4267 pci_push(base); 4268 udelay(10); 4269 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 4270 4271 nv_disable_hw_interrupts(dev, np->irqmask); 4272 pci_push(base); 4273 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 4274 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4275 pci_push(base); 4276 4277 if (nv_request_irq(dev, 0)) { 4278 goto out_drain; 4279 } 4280 4281 /* ask for interrupts */ 4282 nv_enable_hw_interrupts(dev, np->irqmask); 4283 4284 spin_lock_irq(&np->lock); 4285 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 4286 writel(0, base + NvRegMulticastAddrB); 4287 writel(0, base + NvRegMulticastMaskA); 4288 writel(0, base + NvRegMulticastMaskB); 4289 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 4290 /* One manual link speed update: Interrupts are enabled, future link 4291 * speed changes cause interrupts and are handled by nv_link_irq(). 4292 */ 4293 { 4294 u32 miistat; 4295 miistat = readl(base + NvRegMIIStatus); 4296 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 4297 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); 4298 } 4299 /* set linkspeed to invalid value, thus force nv_update_linkspeed 4300 * to init hw */ 4301 np->linkspeed = 0; 4302 ret = nv_update_linkspeed(dev); 4303 nv_start_rx(dev); 4304 nv_start_tx(dev); 4305 netif_start_queue(dev); 4306 netif_poll_enable(dev); 4307 4308 if (ret) { 4309 netif_carrier_on(dev); 4310 } else { 4311 printk("%s: no link during initialization.\n", dev->name); 4312 netif_carrier_off(dev); 4313 } 4314 if (oom) 4315 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4316 4317 /* start statistics timer */ 4318 if (np->driver_data & DEV_HAS_STATISTICS) 4319 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 4320 4321 spin_unlock_irq(&np->lock); 4322 4323 return 0; 4324out_drain: 4325 drain_ring(dev); 4326 return ret; 4327} 4328 4329static int nv_close(struct net_device *dev) 4330{ 4331 struct fe_priv *np = netdev_priv(dev); 4332 u8 __iomem *base; 4333 4334 spin_lock_irq(&np->lock); 4335 np->in_shutdown = 1; 4336 spin_unlock_irq(&np->lock); 4337 netif_poll_disable(dev); 4338 synchronize_irq(dev->irq); 4339 4340 del_timer_sync(&np->oom_kick); 4341 del_timer_sync(&np->nic_poll); 4342 del_timer_sync(&np->stats_poll); 4343 4344 netif_stop_queue(dev); 4345 spin_lock_irq(&np->lock); 4346 nv_stop_tx(dev); 4347 nv_stop_rx(dev); 4348 nv_txrx_reset(dev); 4349 4350 /* disable interrupts on the nic or we will lock up */ 4351 base = get_hwbase(dev); 4352 nv_disable_hw_interrupts(dev, np->irqmask); 4353 pci_push(base); 4354 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 4355 4356 spin_unlock_irq(&np->lock); 4357 4358 nv_free_irq(dev); 4359 4360 drain_ring(dev); 4361 4362 if (np->wolenabled) 4363 nv_start_rx(dev); 4364 4365 /* FIXME: power down nic */ 4366 4367 return 0; 4368} 4369 4370static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 4371{ 4372 struct net_device *dev; 4373 struct fe_priv *np; 4374 unsigned long addr; 4375 u8 __iomem *base; 4376 int err, i; 4377 u32 powerstate, txreg; 4378 u32 phystate_orig = 0, phystate; 4379 int phyinitialized = 0; 4380 4381 dev = alloc_etherdev(sizeof(struct fe_priv)); 4382 err = -ENOMEM; 4383 if (!dev) 4384 goto out; 4385 4386 np = netdev_priv(dev); 4387 np->pci_dev = pci_dev; 4388 spin_lock_init(&np->lock); 4389 SET_MODULE_OWNER(dev); 4390 SET_NETDEV_DEV(dev, &pci_dev->dev); 4391 4392 init_timer(&np->oom_kick); 4393 np->oom_kick.data = (unsigned long) dev; 4394 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ 4395 init_timer(&np->nic_poll); 4396 np->nic_poll.data = (unsigned long) dev; 4397 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ 4398 init_timer(&np->stats_poll); 4399 np->stats_poll.data = (unsigned long) dev; 4400 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ 4401 4402 err = pci_enable_device(pci_dev); 4403 if (err) { 4404 printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n", 4405 err, pci_name(pci_dev)); 4406 goto out_free; 4407 } 4408 4409 pci_set_master(pci_dev); 4410 4411 err = pci_request_regions(pci_dev, DRV_NAME); 4412 if (err < 0) 4413 goto out_disable; 4414 4415 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS)) 4416 np->register_size = NV_PCI_REGSZ_VER2; 4417 else 4418 np->register_size = NV_PCI_REGSZ_VER1; 4419 4420 err = -EINVAL; 4421 addr = 0; 4422 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 4423 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", 4424 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), 4425 pci_resource_len(pci_dev, i), 4426 pci_resource_flags(pci_dev, i)); 4427 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 4428 pci_resource_len(pci_dev, i) >= np->register_size) { 4429 addr = pci_resource_start(pci_dev, i); 4430 break; 4431 } 4432 } 4433 if (i == DEVICE_COUNT_RESOURCE) { 4434 printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n", 4435 pci_name(pci_dev)); 4436 goto out_relreg; 4437 } 4438 4439 /* copy of driver data */ 4440 np->driver_data = id->driver_data; 4441 4442 /* handle different descriptor versions */ 4443 if (id->driver_data & DEV_HAS_HIGH_DMA) { 4444 /* packet format 3: supports 40-bit addressing */ 4445 np->desc_ver = DESC_VER_3; 4446 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 4447 if (dma_64bit) { 4448 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { 4449 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", 4450 pci_name(pci_dev)); 4451 } else { 4452 dev->features |= NETIF_F_HIGHDMA; 4453 printk(KERN_INFO "forcedeth: using HIGHDMA\n"); 4454 } 4455 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) { 4456 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n", 4457 pci_name(pci_dev)); 4458 } 4459 } 4460 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 4461 /* packet format 2: supports jumbo frames */ 4462 np->desc_ver = DESC_VER_2; 4463 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; 4464 } else { 4465 /* original packet format */ 4466 np->desc_ver = DESC_VER_1; 4467 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; 4468 } 4469 4470 np->pkt_limit = NV_PKTLIMIT_1; 4471 if (id->driver_data & DEV_HAS_LARGEDESC) 4472 np->pkt_limit = NV_PKTLIMIT_2; 4473 4474 if (id->driver_data & DEV_HAS_CHECKSUM) { 4475 np->rx_csum = 1; 4476 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4477 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 4478#ifdef NETIF_F_TSO 4479 dev->features |= NETIF_F_TSO; 4480#endif 4481 } 4482 4483 np->vlanctl_bits = 0; 4484 if (id->driver_data & DEV_HAS_VLAN) { 4485 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 4486 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; 4487 dev->vlan_rx_register = nv_vlan_rx_register; 4488 dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid; 4489 } 4490 4491 np->msi_flags = 0; 4492 if ((id->driver_data & DEV_HAS_MSI) && msi) { 4493 np->msi_flags |= NV_MSI_CAPABLE; 4494 } 4495 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 4496 np->msi_flags |= NV_MSI_X_CAPABLE; 4497 } 4498 4499 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 4500 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { 4501 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 4502 } 4503 4504 4505 err = -ENOMEM; 4506 np->base = ioremap(addr, np->register_size); 4507 if (!np->base) 4508 goto out_relreg; 4509 dev->base_addr = (unsigned long)np->base; 4510 4511 dev->irq = pci_dev->irq; 4512 4513 np->rx_ring_size = RX_RING_DEFAULT; 4514 np->tx_ring_size = TX_RING_DEFAULT; 4515 np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE; 4516 np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1; 4517 4518 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4519 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 4520 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 4521 &np->ring_addr); 4522 if (!np->rx_ring.orig) 4523 goto out_unmap; 4524 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4525 } else { 4526 np->rx_ring.ex = pci_alloc_consistent(pci_dev, 4527 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 4528 &np->ring_addr); 4529 if (!np->rx_ring.ex) 4530 goto out_unmap; 4531 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4532 } 4533 np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL); 4534 np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL); 4535 np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL); 4536 np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL); 4537 np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL); 4538 if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len) 4539 goto out_freering; 4540 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); 4541 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); 4542 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); 4543 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); 4544 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); 4545 4546 dev->open = nv_open; 4547 dev->stop = nv_close; 4548 dev->hard_start_xmit = nv_start_xmit; 4549 dev->get_stats = nv_get_stats; 4550 dev->change_mtu = nv_change_mtu; 4551 dev->set_mac_address = nv_set_mac_address; 4552 dev->set_multicast_list = nv_set_multicast; 4553#ifdef CONFIG_NET_POLL_CONTROLLER 4554 dev->poll_controller = nv_poll_controller; 4555#endif 4556 dev->weight = 64; 4557#ifdef CONFIG_FORCEDETH_NAPI 4558 dev->poll = nv_napi_poll; 4559#endif 4560 SET_ETHTOOL_OPS(dev, &ops); 4561 dev->tx_timeout = nv_tx_timeout; 4562 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 4563 4564 pci_set_drvdata(pci_dev, dev); 4565 4566 /* read the mac address */ 4567 base = get_hwbase(dev); 4568 np->orig_mac[0] = readl(base + NvRegMacAddrA); 4569 np->orig_mac[1] = readl(base + NvRegMacAddrB); 4570 4571 /* check the workaround bit for correct mac address order */ 4572 txreg = readl(base + NvRegTransmitPoll); 4573 if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { 4574 /* mac address is already in correct order */ 4575 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 4576 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 4577 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 4578 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 4579 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 4580 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 4581 } else { 4582 /* need to reverse mac address to correct order */ 4583 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 4584 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 4585 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 4586 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 4587 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 4588 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 4589 /* set permanent address to be correct aswell */ 4590 np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 4591 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 4592 np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 4593 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 4594 } 4595 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 4596 4597 if (!is_valid_ether_addr(dev->perm_addr)) { 4598 /* 4599 * Bad mac address. At least one bios sets the mac address 4600 * to 01:23:45:67:89:ab 4601 */ 4602 printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n", 4603 pci_name(pci_dev), 4604 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 4605 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 4606 printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n"); 4607 dev->dev_addr[0] = 0x00; 4608 dev->dev_addr[1] = 0x00; 4609 dev->dev_addr[2] = 0x6c; 4610 get_random_bytes(&dev->dev_addr[3], 3); 4611 } 4612 4613 dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev), 4614 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 4615 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 4616 4617 /* set mac address */ 4618 nv_copy_mac_to_hw(dev); 4619 4620 /* disable WOL */ 4621 writel(0, base + NvRegWakeUpFlags); 4622 np->wolenabled = 0; 4623 4624 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 4625 u8 revision_id; 4626 pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id); 4627 4628 /* take phy and nic out of low power mode */ 4629 powerstate = readl(base + NvRegPowerState2); 4630 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; 4631 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || 4632 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && 4633 revision_id >= 0xA3) 4634 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; 4635 writel(powerstate, base + NvRegPowerState2); 4636 } 4637 4638 if (np->desc_ver == DESC_VER_1) { 4639 np->tx_flags = NV_TX_VALID; 4640 } else { 4641 np->tx_flags = NV_TX2_VALID; 4642 } 4643 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { 4644 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 4645 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 4646 np->msi_flags |= 0x0003; 4647 } else { 4648 np->irqmask = NVREG_IRQMASK_CPU; 4649 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 4650 np->msi_flags |= 0x0001; 4651 } 4652 4653 if (id->driver_data & DEV_NEED_TIMERIRQ) 4654 np->irqmask |= NVREG_IRQ_TIMER; 4655 if (id->driver_data & DEV_NEED_LINKTIMER) { 4656 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); 4657 np->need_linktimer = 1; 4658 np->link_timeout = jiffies + LINK_TIMEOUT; 4659 } else { 4660 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); 4661 np->need_linktimer = 0; 4662 } 4663 4664 /* clear phy state and temporarily halt phy interrupts */ 4665 writel(0, base + NvRegMIIMask); 4666 phystate = readl(base + NvRegAdapterControl); 4667 if (phystate & NVREG_ADAPTCTL_RUNNING) { 4668 phystate_orig = 1; 4669 phystate &= ~NVREG_ADAPTCTL_RUNNING; 4670 writel(phystate, base + NvRegAdapterControl); 4671 } 4672 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 4673 4674 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 4675 /* management unit running on the mac? */ 4676 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) { 4677 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; 4678 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use); 4679 for (i = 0; i < 5000; i++) { 4680 msleep(1); 4681 if (nv_mgmt_acquire_sema(dev)) { 4682 /* management unit setup the phy already? */ 4683 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 4684 NVREG_XMITCTL_SYNC_PHY_INIT) { 4685 /* phy is inited by mgmt unit */ 4686 phyinitialized = 1; 4687 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); 4688 } else { 4689 /* we need to init the phy */ 4690 } 4691 break; 4692 } 4693 } 4694 } 4695 } 4696 4697 /* find a suitable phy */ 4698 for (i = 1; i <= 32; i++) { 4699 int id1, id2; 4700 int phyaddr = i & 0x1F; 4701 4702 spin_lock_irq(&np->lock); 4703 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); 4704 spin_unlock_irq(&np->lock); 4705 if (id1 < 0 || id1 == 0xffff) 4706 continue; 4707 spin_lock_irq(&np->lock); 4708 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); 4709 spin_unlock_irq(&np->lock); 4710 if (id2 < 0 || id2 == 0xffff) 4711 continue; 4712 4713 np->phy_model = id2 & PHYID2_MODEL_MASK; 4714 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 4715 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 4716 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 4717 pci_name(pci_dev), id1, id2, phyaddr); 4718 np->phyaddr = phyaddr; 4719 np->phy_oui = id1 | id2; 4720 break; 4721 } 4722 if (i == 33) { 4723 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", 4724 pci_name(pci_dev)); 4725 goto out_error; 4726 } 4727 4728 if (!phyinitialized) { 4729 /* reset it */ 4730 phy_init(dev); 4731 } else { 4732 /* see if it is a gigabit phy */ 4733 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4734 if (mii_status & PHY_GIGABIT) { 4735 np->gigabit = PHY_GIGABIT; 4736 } 4737 } 4738 4739 /* set default link speed settings */ 4740 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 4741 np->duplex = 0; 4742 np->autoneg = 1; 4743 4744 err = register_netdev(dev); 4745 if (err) { 4746 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); 4747 goto out_error; 4748 } 4749 printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n", 4750 dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device, 4751 pci_name(pci_dev)); 4752 4753 return 0; 4754 4755out_error: 4756 if (phystate_orig) 4757 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 4758 pci_set_drvdata(pci_dev, NULL); 4759out_freering: 4760 free_rings(dev); 4761out_unmap: 4762 iounmap(get_hwbase(dev)); 4763out_relreg: 4764 pci_release_regions(pci_dev); 4765out_disable: 4766 pci_disable_device(pci_dev); 4767out_free: 4768 free_netdev(dev); 4769out: 4770 return err; 4771} 4772 4773static void __devexit nv_remove(struct pci_dev *pci_dev) 4774{ 4775 struct net_device *dev = pci_get_drvdata(pci_dev); 4776 struct fe_priv *np = netdev_priv(dev); 4777 u8 __iomem *base = get_hwbase(dev); 4778 4779 unregister_netdev(dev); 4780 4781 /* special op: write back the misordered MAC address - otherwise 4782 * the next nv_probe would see a wrong address. 4783 */ 4784 writel(np->orig_mac[0], base + NvRegMacAddrA); 4785 writel(np->orig_mac[1], base + NvRegMacAddrB); 4786 4787 /* free all structures */ 4788 free_rings(dev); 4789 iounmap(get_hwbase(dev)); 4790 pci_release_regions(pci_dev); 4791 pci_disable_device(pci_dev); 4792 free_netdev(dev); 4793 pci_set_drvdata(pci_dev, NULL); 4794} 4795 4796#ifdef CONFIG_PM 4797static int nv_suspend(struct pci_dev *pdev, pm_message_t state) 4798{ 4799 struct net_device *dev = pci_get_drvdata(pdev); 4800 struct fe_priv *np = netdev_priv(dev); 4801 4802 if (!netif_running(dev)) 4803 goto out; 4804 4805 netif_device_detach(dev); 4806 4807 // Gross. 4808 nv_close(dev); 4809 4810 pci_save_state(pdev); 4811 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); 4812 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4813out: 4814 return 0; 4815} 4816 4817static int nv_resume(struct pci_dev *pdev) 4818{ 4819 struct net_device *dev = pci_get_drvdata(pdev); 4820 int rc = 0; 4821 4822 if (!netif_running(dev)) 4823 goto out; 4824 4825 netif_device_attach(dev); 4826 4827 pci_set_power_state(pdev, PCI_D0); 4828 pci_restore_state(pdev); 4829 pci_enable_wake(pdev, PCI_D0, 0); 4830 4831 rc = nv_open(dev); 4832out: 4833 return rc; 4834} 4835#else 4836#define nv_suspend NULL 4837#define nv_resume NULL 4838#endif /* CONFIG_PM */ 4839 4840static struct pci_device_id pci_tbl[] = { 4841 { /* nForce Ethernet Controller */ 4842 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), 4843 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 4844 }, 4845 { /* nForce2 Ethernet Controller */ 4846 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), 4847 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 4848 }, 4849 { /* nForce3 Ethernet Controller */ 4850 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), 4851 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 4852 }, 4853 { /* nForce3 Ethernet Controller */ 4854 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), 4855 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 4856 }, 4857 { /* nForce3 Ethernet Controller */ 4858 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), 4859 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 4860 }, 4861 { /* nForce3 Ethernet Controller */ 4862 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), 4863 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 4864 }, 4865 { /* nForce3 Ethernet Controller */ 4866 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), 4867 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 4868 }, 4869 { /* CK804 Ethernet Controller */ 4870 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), 4871 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 4872 }, 4873 { /* CK804 Ethernet Controller */ 4874 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), 4875 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 4876 }, 4877 { /* MCP04 Ethernet Controller */ 4878 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), 4879 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 4880 }, 4881 { /* MCP04 Ethernet Controller */ 4882 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), 4883 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 4884 }, 4885 { /* MCP51 Ethernet Controller */ 4886 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 4887 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, 4888 }, 4889 { /* MCP51 Ethernet Controller */ 4890 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 4891 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, 4892 }, 4893 { /* MCP55 Ethernet Controller */ 4894 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 4895 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 4896 }, 4897 { /* MCP55 Ethernet Controller */ 4898 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 4899 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 4900 }, 4901 { /* MCP61 Ethernet Controller */ 4902 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 4903 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 4904 }, 4905 { /* MCP61 Ethernet Controller */ 4906 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 4907 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 4908 }, 4909 { /* MCP61 Ethernet Controller */ 4910 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 4911 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 4912 }, 4913 { /* MCP61 Ethernet Controller */ 4914 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 4915 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 4916 }, 4917 { /* MCP65 Ethernet Controller */ 4918 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 4919 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 4920 }, 4921 { /* MCP65 Ethernet Controller */ 4922 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 4923 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 4924 }, 4925 { /* MCP65 Ethernet Controller */ 4926 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 4927 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 4928 }, 4929 { /* MCP65 Ethernet Controller */ 4930 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 4931 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 4932 }, 4933 { /* MCP67 Ethernet Controller */ 4934 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 4935 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 4936 }, 4937 { /* MCP67 Ethernet Controller */ 4938 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 4939 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 4940 }, 4941 { /* MCP67 Ethernet Controller */ 4942 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 4943 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 4944 }, 4945 { /* MCP67 Ethernet Controller */ 4946 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 4947 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 4948 }, 4949 {0,}, 4950}; 4951 4952static struct pci_driver driver = { 4953 .name = "forcedeth", 4954 .id_table = pci_tbl, 4955 .probe = nv_probe, 4956 .remove = __devexit_p(nv_remove), 4957 .suspend = nv_suspend, 4958 .resume = nv_resume, 4959}; 4960 4961static int __init init_nic(void) 4962{ 4963 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); 4964 return pci_register_driver(&driver); 4965} 4966 4967static void __exit exit_nic(void) 4968{ 4969 pci_unregister_driver(&driver); 4970} 4971 4972module_param(max_interrupt_work, int, 0); 4973MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 4974module_param(optimization_mode, int, 0); 4975MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); 4976module_param(poll_interval, int, 0); 4977MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 4978module_param(msi, int, 0); 4979MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); 4980module_param(msix, int, 0); 4981MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); 4982module_param(dma_64bit, int, 0); 4983MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 4984 4985MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 4986MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 4987MODULE_LICENSE("GPL"); 4988 4989MODULE_DEVICE_TABLE(pci, pci_tbl); 4990 4991module_init(init_nic); 4992module_exit(exit_nic);