at v2.6.26-rc7 6056 lines 188 kB view raw
1/* 2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. 3 * 4 * Note: This driver is a cleanroom reimplementation based on reverse 5 * engineered documentation written by Carl-Daniel Hailfinger 6 * and Andrew de Quincey. 7 * 8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered 9 * trademarks of NVIDIA Corporation in the United States and other 10 * countries. 11 * 12 * Copyright (C) 2003,4,5 Manfred Spraul 13 * Copyright (C) 2004 Andrew de Quincey (wol support) 14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 15 * IRQ rate fixes, bigendian fixes, cleanups, verification) 16 * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation 17 * 18 * This program is free software; you can redistribute it and/or modify 19 * it under the terms of the GNU General Public License as published by 20 * the Free Software Foundation; either version 2 of the License, or 21 * (at your option) any later version. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, write to the Free Software 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 31 * 32 * Known bugs: 33 * We suspect that on some hardware no TX done interrupts are generated. 34 * This means recovery from netif_stop_queue only happens if the hw timer 35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) 36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance. 37 * If your hardware reliably generates tx done interrupts, then you can remove 38 * DEV_NEED_TIMERIRQ from the driver_data flags. 39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 40 * superfluous timer interrupts from the nic. 41 */ 42#define FORCEDETH_VERSION "0.61" 43#define DRV_NAME "forcedeth" 44 45#include <linux/module.h> 46#include <linux/types.h> 47#include <linux/pci.h> 48#include <linux/interrupt.h> 49#include <linux/netdevice.h> 50#include <linux/etherdevice.h> 51#include <linux/delay.h> 52#include <linux/spinlock.h> 53#include <linux/ethtool.h> 54#include <linux/timer.h> 55#include <linux/skbuff.h> 56#include <linux/mii.h> 57#include <linux/random.h> 58#include <linux/init.h> 59#include <linux/if_vlan.h> 60#include <linux/dma-mapping.h> 61 62#include <asm/irq.h> 63#include <asm/io.h> 64#include <asm/uaccess.h> 65#include <asm/system.h> 66 67#if 0 68#define dprintk printk 69#else 70#define dprintk(x...) do { } while (0) 71#endif 72 73#define TX_WORK_PER_LOOP 64 74#define RX_WORK_PER_LOOP 64 75 76/* 77 * Hardware access: 78 */ 79 80#define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */ 81#define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */ 82#define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */ 83#define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */ 84#define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */ 85#define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */ 86#define DEV_HAS_MSI 0x00040 /* device supports MSI */ 87#define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */ 88#define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */ 89#define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */ 90#define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */ 91#define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */ 92#define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */ 93#define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */ 94#define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */ 95#define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */ 96#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */ 97#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */ 98#define DEV_NEED_TX_LIMIT 0x40000 /* device needs to limit tx */ 99#define DEV_HAS_GEAR_MODE 0x80000 /* device supports gear mode */ 100 101enum { 102 NvRegIrqStatus = 0x000, 103#define NVREG_IRQSTAT_MIIEVENT 0x040 104#define NVREG_IRQSTAT_MASK 0x81ff 105 NvRegIrqMask = 0x004, 106#define NVREG_IRQ_RX_ERROR 0x0001 107#define NVREG_IRQ_RX 0x0002 108#define NVREG_IRQ_RX_NOBUF 0x0004 109#define NVREG_IRQ_TX_ERR 0x0008 110#define NVREG_IRQ_TX_OK 0x0010 111#define NVREG_IRQ_TIMER 0x0020 112#define NVREG_IRQ_LINK 0x0040 113#define NVREG_IRQ_RX_FORCED 0x0080 114#define NVREG_IRQ_TX_FORCED 0x0100 115#define NVREG_IRQ_RECOVER_ERROR 0x8000 116#define NVREG_IRQMASK_THROUGHPUT 0x00df 117#define NVREG_IRQMASK_CPU 0x0060 118#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) 119#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) 120#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) 121 122#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ 123 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ 124 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR)) 125 126 NvRegUnknownSetupReg6 = 0x008, 127#define NVREG_UNKSETUP6_VAL 3 128 129/* 130 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic 131 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 132 */ 133 NvRegPollingInterval = 0x00c, 134#define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */ 135#define NVREG_POLL_DEFAULT_CPU 13 136 NvRegMSIMap0 = 0x020, 137 NvRegMSIMap1 = 0x024, 138 NvRegMSIIrqMask = 0x030, 139#define NVREG_MSI_VECTOR_0_ENABLED 0x01 140 NvRegMisc1 = 0x080, 141#define NVREG_MISC1_PAUSE_TX 0x01 142#define NVREG_MISC1_HD 0x02 143#define NVREG_MISC1_FORCE 0x3b0f3c 144 145 NvRegMacReset = 0x34, 146#define NVREG_MAC_RESET_ASSERT 0x0F3 147 NvRegTransmitterControl = 0x084, 148#define NVREG_XMITCTL_START 0x01 149#define NVREG_XMITCTL_MGMT_ST 0x40000000 150#define NVREG_XMITCTL_SYNC_MASK 0x000f0000 151#define NVREG_XMITCTL_SYNC_NOT_READY 0x0 152#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 153#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 154#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 155#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 156#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 157#define NVREG_XMITCTL_HOST_LOADED 0x00004000 158#define NVREG_XMITCTL_TX_PATH_EN 0x01000000 159 NvRegTransmitterStatus = 0x088, 160#define NVREG_XMITSTAT_BUSY 0x01 161 162 NvRegPacketFilterFlags = 0x8c, 163#define NVREG_PFF_PAUSE_RX 0x08 164#define NVREG_PFF_ALWAYS 0x7F0000 165#define NVREG_PFF_PROMISC 0x80 166#define NVREG_PFF_MYADDR 0x20 167#define NVREG_PFF_LOOPBACK 0x10 168 169 NvRegOffloadConfig = 0x90, 170#define NVREG_OFFLOAD_HOMEPHY 0x601 171#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 172 NvRegReceiverControl = 0x094, 173#define NVREG_RCVCTL_START 0x01 174#define NVREG_RCVCTL_RX_PATH_EN 0x01000000 175 NvRegReceiverStatus = 0x98, 176#define NVREG_RCVSTAT_BUSY 0x01 177 178 NvRegSlotTime = 0x9c, 179#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 180#define NVREG_SLOTTIME_10_100_FULL 0x00007f00 181#define NVREG_SLOTTIME_1000_FULL 0x0003ff00 182#define NVREG_SLOTTIME_HALF 0x0000ff00 183#define NVREG_SLOTTIME_DEFAULT 0x00007f00 184#define NVREG_SLOTTIME_MASK 0x000000ff 185 186 NvRegTxDeferral = 0xA0, 187#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 188#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 189#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 190#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f 191#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f 192#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000 193 NvRegRxDeferral = 0xA4, 194#define NVREG_RX_DEFERRAL_DEFAULT 0x16 195 NvRegMacAddrA = 0xA8, 196 NvRegMacAddrB = 0xAC, 197 NvRegMulticastAddrA = 0xB0, 198#define NVREG_MCASTADDRA_FORCE 0x01 199 NvRegMulticastAddrB = 0xB4, 200 NvRegMulticastMaskA = 0xB8, 201#define NVREG_MCASTMASKA_NONE 0xffffffff 202 NvRegMulticastMaskB = 0xBC, 203#define NVREG_MCASTMASKB_NONE 0xffff 204 205 NvRegPhyInterface = 0xC0, 206#define PHY_RGMII 0x10000000 207 NvRegBackOffControl = 0xC4, 208#define NVREG_BKOFFCTRL_DEFAULT 0x70000000 209#define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff 210#define NVREG_BKOFFCTRL_SELECT 24 211#define NVREG_BKOFFCTRL_GEAR 12 212 213 NvRegTxRingPhysAddr = 0x100, 214 NvRegRxRingPhysAddr = 0x104, 215 NvRegRingSizes = 0x108, 216#define NVREG_RINGSZ_TXSHIFT 0 217#define NVREG_RINGSZ_RXSHIFT 16 218 NvRegTransmitPoll = 0x10c, 219#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 220 NvRegLinkSpeed = 0x110, 221#define NVREG_LINKSPEED_FORCE 0x10000 222#define NVREG_LINKSPEED_10 1000 223#define NVREG_LINKSPEED_100 100 224#define NVREG_LINKSPEED_1000 50 225#define NVREG_LINKSPEED_MASK (0xFFF) 226 NvRegUnknownSetupReg5 = 0x130, 227#define NVREG_UNKSETUP5_BIT31 (1<<31) 228 NvRegTxWatermark = 0x13c, 229#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 230#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 231#define NVREG_TX_WM_DESC2_3_1000 0xfe08000 232 NvRegTxRxControl = 0x144, 233#define NVREG_TXRXCTL_KICK 0x0001 234#define NVREG_TXRXCTL_BIT1 0x0002 235#define NVREG_TXRXCTL_BIT2 0x0004 236#define NVREG_TXRXCTL_IDLE 0x0008 237#define NVREG_TXRXCTL_RESET 0x0010 238#define NVREG_TXRXCTL_RXCHECK 0x0400 239#define NVREG_TXRXCTL_DESC_1 0 240#define NVREG_TXRXCTL_DESC_2 0x002100 241#define NVREG_TXRXCTL_DESC_3 0xc02200 242#define NVREG_TXRXCTL_VLANSTRIP 0x00040 243#define NVREG_TXRXCTL_VLANINS 0x00080 244 NvRegTxRingPhysAddrHigh = 0x148, 245 NvRegRxRingPhysAddrHigh = 0x14C, 246 NvRegTxPauseFrame = 0x170, 247#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080 248#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 249#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 250#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 251 NvRegMIIStatus = 0x180, 252#define NVREG_MIISTAT_ERROR 0x0001 253#define NVREG_MIISTAT_LINKCHANGE 0x0008 254#define NVREG_MIISTAT_MASK_RW 0x0007 255#define NVREG_MIISTAT_MASK_ALL 0x000f 256 NvRegMIIMask = 0x184, 257#define NVREG_MII_LINKCHANGE 0x0008 258 259 NvRegAdapterControl = 0x188, 260#define NVREG_ADAPTCTL_START 0x02 261#define NVREG_ADAPTCTL_LINKUP 0x04 262#define NVREG_ADAPTCTL_PHYVALID 0x40000 263#define NVREG_ADAPTCTL_RUNNING 0x100000 264#define NVREG_ADAPTCTL_PHYSHIFT 24 265 NvRegMIISpeed = 0x18c, 266#define NVREG_MIISPEED_BIT8 (1<<8) 267#define NVREG_MIIDELAY 5 268 NvRegMIIControl = 0x190, 269#define NVREG_MIICTL_INUSE 0x08000 270#define NVREG_MIICTL_WRITE 0x00400 271#define NVREG_MIICTL_ADDRSHIFT 5 272 NvRegMIIData = 0x194, 273 NvRegWakeUpFlags = 0x200, 274#define NVREG_WAKEUPFLAGS_VAL 0x7770 275#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 276#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 277#define NVREG_WAKEUPFLAGS_D3SHIFT 12 278#define NVREG_WAKEUPFLAGS_D2SHIFT 8 279#define NVREG_WAKEUPFLAGS_D1SHIFT 4 280#define NVREG_WAKEUPFLAGS_D0SHIFT 0 281#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 282#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 283#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 284#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 285 286 NvRegPatternCRC = 0x204, 287 NvRegPatternMask = 0x208, 288 NvRegPowerCap = 0x268, 289#define NVREG_POWERCAP_D3SUPP (1<<30) 290#define NVREG_POWERCAP_D2SUPP (1<<26) 291#define NVREG_POWERCAP_D1SUPP (1<<25) 292 NvRegPowerState = 0x26c, 293#define NVREG_POWERSTATE_POWEREDUP 0x8000 294#define NVREG_POWERSTATE_VALID 0x0100 295#define NVREG_POWERSTATE_MASK 0x0003 296#define NVREG_POWERSTATE_D0 0x0000 297#define NVREG_POWERSTATE_D1 0x0001 298#define NVREG_POWERSTATE_D2 0x0002 299#define NVREG_POWERSTATE_D3 0x0003 300 NvRegTxCnt = 0x280, 301 NvRegTxZeroReXmt = 0x284, 302 NvRegTxOneReXmt = 0x288, 303 NvRegTxManyReXmt = 0x28c, 304 NvRegTxLateCol = 0x290, 305 NvRegTxUnderflow = 0x294, 306 NvRegTxLossCarrier = 0x298, 307 NvRegTxExcessDef = 0x29c, 308 NvRegTxRetryErr = 0x2a0, 309 NvRegRxFrameErr = 0x2a4, 310 NvRegRxExtraByte = 0x2a8, 311 NvRegRxLateCol = 0x2ac, 312 NvRegRxRunt = 0x2b0, 313 NvRegRxFrameTooLong = 0x2b4, 314 NvRegRxOverflow = 0x2b8, 315 NvRegRxFCSErr = 0x2bc, 316 NvRegRxFrameAlignErr = 0x2c0, 317 NvRegRxLenErr = 0x2c4, 318 NvRegRxUnicast = 0x2c8, 319 NvRegRxMulticast = 0x2cc, 320 NvRegRxBroadcast = 0x2d0, 321 NvRegTxDef = 0x2d4, 322 NvRegTxFrame = 0x2d8, 323 NvRegRxCnt = 0x2dc, 324 NvRegTxPause = 0x2e0, 325 NvRegRxPause = 0x2e4, 326 NvRegRxDropFrame = 0x2e8, 327 NvRegVlanControl = 0x300, 328#define NVREG_VLANCONTROL_ENABLE 0x2000 329 NvRegMSIXMap0 = 0x3e0, 330 NvRegMSIXMap1 = 0x3e4, 331 NvRegMSIXIrqStatus = 0x3f0, 332 333 NvRegPowerState2 = 0x600, 334#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11 335#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 336}; 337 338/* Big endian: should work, but is untested */ 339struct ring_desc { 340 __le32 buf; 341 __le32 flaglen; 342}; 343 344struct ring_desc_ex { 345 __le32 bufhigh; 346 __le32 buflow; 347 __le32 txvlan; 348 __le32 flaglen; 349}; 350 351union ring_type { 352 struct ring_desc* orig; 353 struct ring_desc_ex* ex; 354}; 355 356#define FLAG_MASK_V1 0xffff0000 357#define FLAG_MASK_V2 0xffffc000 358#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 359#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) 360 361#define NV_TX_LASTPACKET (1<<16) 362#define NV_TX_RETRYERROR (1<<19) 363#define NV_TX_RETRYCOUNT_MASK (0xF<<20) 364#define NV_TX_FORCED_INTERRUPT (1<<24) 365#define NV_TX_DEFERRED (1<<26) 366#define NV_TX_CARRIERLOST (1<<27) 367#define NV_TX_LATECOLLISION (1<<28) 368#define NV_TX_UNDERFLOW (1<<29) 369#define NV_TX_ERROR (1<<30) 370#define NV_TX_VALID (1<<31) 371 372#define NV_TX2_LASTPACKET (1<<29) 373#define NV_TX2_RETRYERROR (1<<18) 374#define NV_TX2_RETRYCOUNT_MASK (0xF<<19) 375#define NV_TX2_FORCED_INTERRUPT (1<<30) 376#define NV_TX2_DEFERRED (1<<25) 377#define NV_TX2_CARRIERLOST (1<<26) 378#define NV_TX2_LATECOLLISION (1<<27) 379#define NV_TX2_UNDERFLOW (1<<28) 380/* error and valid are the same for both */ 381#define NV_TX2_ERROR (1<<30) 382#define NV_TX2_VALID (1<<31) 383#define NV_TX2_TSO (1<<28) 384#define NV_TX2_TSO_SHIFT 14 385#define NV_TX2_TSO_MAX_SHIFT 14 386#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) 387#define NV_TX2_CHECKSUM_L3 (1<<27) 388#define NV_TX2_CHECKSUM_L4 (1<<26) 389 390#define NV_TX3_VLAN_TAG_PRESENT (1<<18) 391 392#define NV_RX_DESCRIPTORVALID (1<<16) 393#define NV_RX_MISSEDFRAME (1<<17) 394#define NV_RX_SUBSTRACT1 (1<<18) 395#define NV_RX_ERROR1 (1<<23) 396#define NV_RX_ERROR2 (1<<24) 397#define NV_RX_ERROR3 (1<<25) 398#define NV_RX_ERROR4 (1<<26) 399#define NV_RX_CRCERR (1<<27) 400#define NV_RX_OVERFLOW (1<<28) 401#define NV_RX_FRAMINGERR (1<<29) 402#define NV_RX_ERROR (1<<30) 403#define NV_RX_AVAIL (1<<31) 404 405#define NV_RX2_CHECKSUMMASK (0x1C000000) 406#define NV_RX2_CHECKSUM_IP (0x10000000) 407#define NV_RX2_CHECKSUM_IP_TCP (0x14000000) 408#define NV_RX2_CHECKSUM_IP_UDP (0x18000000) 409#define NV_RX2_DESCRIPTORVALID (1<<29) 410#define NV_RX2_SUBSTRACT1 (1<<25) 411#define NV_RX2_ERROR1 (1<<18) 412#define NV_RX2_ERROR2 (1<<19) 413#define NV_RX2_ERROR3 (1<<20) 414#define NV_RX2_ERROR4 (1<<21) 415#define NV_RX2_CRCERR (1<<22) 416#define NV_RX2_OVERFLOW (1<<23) 417#define NV_RX2_FRAMINGERR (1<<24) 418/* error and avail are the same for both */ 419#define NV_RX2_ERROR (1<<30) 420#define NV_RX2_AVAIL (1<<31) 421 422#define NV_RX3_VLAN_TAG_PRESENT (1<<16) 423#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 424 425/* Miscelaneous hardware related defines: */ 426#define NV_PCI_REGSZ_VER1 0x270 427#define NV_PCI_REGSZ_VER2 0x2d4 428#define NV_PCI_REGSZ_VER3 0x604 429 430/* various timeout delays: all in usec */ 431#define NV_TXRX_RESET_DELAY 4 432#define NV_TXSTOP_DELAY1 10 433#define NV_TXSTOP_DELAY1MAX 500000 434#define NV_TXSTOP_DELAY2 100 435#define NV_RXSTOP_DELAY1 10 436#define NV_RXSTOP_DELAY1MAX 500000 437#define NV_RXSTOP_DELAY2 100 438#define NV_SETUP5_DELAY 5 439#define NV_SETUP5_DELAYMAX 50000 440#define NV_POWERUP_DELAY 5 441#define NV_POWERUP_DELAYMAX 5000 442#define NV_MIIBUSY_DELAY 50 443#define NV_MIIPHY_DELAY 10 444#define NV_MIIPHY_DELAYMAX 10000 445#define NV_MAC_RESET_DELAY 64 446 447#define NV_WAKEUPPATTERNS 5 448#define NV_WAKEUPMASKENTRIES 4 449 450/* General driver defaults */ 451#define NV_WATCHDOG_TIMEO (5*HZ) 452 453#define RX_RING_DEFAULT 128 454#define TX_RING_DEFAULT 256 455#define RX_RING_MIN 128 456#define TX_RING_MIN 64 457#define RING_MAX_DESC_VER_1 1024 458#define RING_MAX_DESC_VER_2_3 16384 459 460/* rx/tx mac addr + type + vlan + align + slack*/ 461#define NV_RX_HEADERS (64) 462/* even more slack. */ 463#define NV_RX_ALLOC_PAD (64) 464 465/* maximum mtu size */ 466#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ 467#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ 468 469#define OOM_REFILL (1+HZ/20) 470#define POLL_WAIT (1+HZ/100) 471#define LINK_TIMEOUT (3*HZ) 472#define STATS_INTERVAL (10*HZ) 473 474/* 475 * desc_ver values: 476 * The nic supports three different descriptor types: 477 * - DESC_VER_1: Original 478 * - DESC_VER_2: support for jumbo frames. 479 * - DESC_VER_3: 64-bit format. 480 */ 481#define DESC_VER_1 1 482#define DESC_VER_2 2 483#define DESC_VER_3 3 484 485/* PHY defines */ 486#define PHY_OUI_MARVELL 0x5043 487#define PHY_OUI_CICADA 0x03f1 488#define PHY_OUI_VITESSE 0x01c1 489#define PHY_OUI_REALTEK 0x0732 490#define PHY_OUI_REALTEK2 0x0020 491#define PHYID1_OUI_MASK 0x03ff 492#define PHYID1_OUI_SHFT 6 493#define PHYID2_OUI_MASK 0xfc00 494#define PHYID2_OUI_SHFT 10 495#define PHYID2_MODEL_MASK 0x03f0 496#define PHY_MODEL_REALTEK_8211 0x0110 497#define PHY_REV_MASK 0x0001 498#define PHY_REV_REALTEK_8211B 0x0000 499#define PHY_REV_REALTEK_8211C 0x0001 500#define PHY_MODEL_REALTEK_8201 0x0200 501#define PHY_MODEL_MARVELL_E3016 0x0220 502#define PHY_MARVELL_E3016_INITMASK 0x0300 503#define PHY_CICADA_INIT1 0x0f000 504#define PHY_CICADA_INIT2 0x0e00 505#define PHY_CICADA_INIT3 0x01000 506#define PHY_CICADA_INIT4 0x0200 507#define PHY_CICADA_INIT5 0x0004 508#define PHY_CICADA_INIT6 0x02000 509#define PHY_VITESSE_INIT_REG1 0x1f 510#define PHY_VITESSE_INIT_REG2 0x10 511#define PHY_VITESSE_INIT_REG3 0x11 512#define PHY_VITESSE_INIT_REG4 0x12 513#define PHY_VITESSE_INIT_MSK1 0xc 514#define PHY_VITESSE_INIT_MSK2 0x0180 515#define PHY_VITESSE_INIT1 0x52b5 516#define PHY_VITESSE_INIT2 0xaf8a 517#define PHY_VITESSE_INIT3 0x8 518#define PHY_VITESSE_INIT4 0x8f8a 519#define PHY_VITESSE_INIT5 0xaf86 520#define PHY_VITESSE_INIT6 0x8f86 521#define PHY_VITESSE_INIT7 0xaf82 522#define PHY_VITESSE_INIT8 0x0100 523#define PHY_VITESSE_INIT9 0x8f82 524#define PHY_VITESSE_INIT10 0x0 525#define PHY_REALTEK_INIT_REG1 0x1f 526#define PHY_REALTEK_INIT_REG2 0x19 527#define PHY_REALTEK_INIT_REG3 0x13 528#define PHY_REALTEK_INIT_REG4 0x14 529#define PHY_REALTEK_INIT_REG5 0x18 530#define PHY_REALTEK_INIT_REG6 0x11 531#define PHY_REALTEK_INIT1 0x0000 532#define PHY_REALTEK_INIT2 0x8e00 533#define PHY_REALTEK_INIT3 0x0001 534#define PHY_REALTEK_INIT4 0xad17 535#define PHY_REALTEK_INIT5 0xfb54 536#define PHY_REALTEK_INIT6 0xf5c7 537#define PHY_REALTEK_INIT7 0x1000 538#define PHY_REALTEK_INIT8 0x0003 539#define PHY_REALTEK_INIT_MSK1 0x0003 540 541#define PHY_GIGABIT 0x0100 542 543#define PHY_TIMEOUT 0x1 544#define PHY_ERROR 0x2 545 546#define PHY_100 0x1 547#define PHY_1000 0x2 548#define PHY_HALF 0x100 549 550#define NV_PAUSEFRAME_RX_CAPABLE 0x0001 551#define NV_PAUSEFRAME_TX_CAPABLE 0x0002 552#define NV_PAUSEFRAME_RX_ENABLE 0x0004 553#define NV_PAUSEFRAME_TX_ENABLE 0x0008 554#define NV_PAUSEFRAME_RX_REQ 0x0010 555#define NV_PAUSEFRAME_TX_REQ 0x0020 556#define NV_PAUSEFRAME_AUTONEG 0x0040 557 558/* MSI/MSI-X defines */ 559#define NV_MSI_X_MAX_VECTORS 8 560#define NV_MSI_X_VECTORS_MASK 0x000f 561#define NV_MSI_CAPABLE 0x0010 562#define NV_MSI_X_CAPABLE 0x0020 563#define NV_MSI_ENABLED 0x0040 564#define NV_MSI_X_ENABLED 0x0080 565 566#define NV_MSI_X_VECTOR_ALL 0x0 567#define NV_MSI_X_VECTOR_RX 0x0 568#define NV_MSI_X_VECTOR_TX 0x1 569#define NV_MSI_X_VECTOR_OTHER 0x2 570 571#define NV_RESTART_TX 0x1 572#define NV_RESTART_RX 0x2 573 574#define NV_TX_LIMIT_COUNT 16 575 576/* statistics */ 577struct nv_ethtool_str { 578 char name[ETH_GSTRING_LEN]; 579}; 580 581static const struct nv_ethtool_str nv_estats_str[] = { 582 { "tx_bytes" }, 583 { "tx_zero_rexmt" }, 584 { "tx_one_rexmt" }, 585 { "tx_many_rexmt" }, 586 { "tx_late_collision" }, 587 { "tx_fifo_errors" }, 588 { "tx_carrier_errors" }, 589 { "tx_excess_deferral" }, 590 { "tx_retry_error" }, 591 { "rx_frame_error" }, 592 { "rx_extra_byte" }, 593 { "rx_late_collision" }, 594 { "rx_runt" }, 595 { "rx_frame_too_long" }, 596 { "rx_over_errors" }, 597 { "rx_crc_errors" }, 598 { "rx_frame_align_error" }, 599 { "rx_length_error" }, 600 { "rx_unicast" }, 601 { "rx_multicast" }, 602 { "rx_broadcast" }, 603 { "rx_packets" }, 604 { "rx_errors_total" }, 605 { "tx_errors_total" }, 606 607 /* version 2 stats */ 608 { "tx_deferral" }, 609 { "tx_packets" }, 610 { "rx_bytes" }, 611 { "tx_pause" }, 612 { "rx_pause" }, 613 { "rx_drop_frame" } 614}; 615 616struct nv_ethtool_stats { 617 u64 tx_bytes; 618 u64 tx_zero_rexmt; 619 u64 tx_one_rexmt; 620 u64 tx_many_rexmt; 621 u64 tx_late_collision; 622 u64 tx_fifo_errors; 623 u64 tx_carrier_errors; 624 u64 tx_excess_deferral; 625 u64 tx_retry_error; 626 u64 rx_frame_error; 627 u64 rx_extra_byte; 628 u64 rx_late_collision; 629 u64 rx_runt; 630 u64 rx_frame_too_long; 631 u64 rx_over_errors; 632 u64 rx_crc_errors; 633 u64 rx_frame_align_error; 634 u64 rx_length_error; 635 u64 rx_unicast; 636 u64 rx_multicast; 637 u64 rx_broadcast; 638 u64 rx_packets; 639 u64 rx_errors_total; 640 u64 tx_errors_total; 641 642 /* version 2 stats */ 643 u64 tx_deferral; 644 u64 tx_packets; 645 u64 rx_bytes; 646 u64 tx_pause; 647 u64 rx_pause; 648 u64 rx_drop_frame; 649}; 650 651#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 652#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 653 654/* diagnostics */ 655#define NV_TEST_COUNT_BASE 3 656#define NV_TEST_COUNT_EXTENDED 4 657 658static const struct nv_ethtool_str nv_etests_str[] = { 659 { "link (online/offline)" }, 660 { "register (offline) " }, 661 { "interrupt (offline) " }, 662 { "loopback (offline) " } 663}; 664 665struct register_test { 666 __u32 reg; 667 __u32 mask; 668}; 669 670static const struct register_test nv_registers_test[] = { 671 { NvRegUnknownSetupReg6, 0x01 }, 672 { NvRegMisc1, 0x03c }, 673 { NvRegOffloadConfig, 0x03ff }, 674 { NvRegMulticastAddrA, 0xffffffff }, 675 { NvRegTxWatermark, 0x0ff }, 676 { NvRegWakeUpFlags, 0x07777 }, 677 { 0,0 } 678}; 679 680struct nv_skb_map { 681 struct sk_buff *skb; 682 dma_addr_t dma; 683 unsigned int dma_len; 684 struct ring_desc_ex *first_tx_desc; 685 struct nv_skb_map *next_tx_ctx; 686}; 687 688/* 689 * SMP locking: 690 * All hardware access under dev->priv->lock, except the performance 691 * critical parts: 692 * - rx is (pseudo-) lockless: it relies on the single-threading provided 693 * by the arch code for interrupts. 694 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 695 * needs dev->priv->lock :-( 696 * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 697 */ 698 699/* in dev: base, irq */ 700struct fe_priv { 701 spinlock_t lock; 702 703 struct net_device *dev; 704 struct napi_struct napi; 705 706 /* General data: 707 * Locking: spin_lock(&np->lock); */ 708 struct nv_ethtool_stats estats; 709 int in_shutdown; 710 u32 linkspeed; 711 int duplex; 712 int autoneg; 713 int fixed_mode; 714 int phyaddr; 715 int wolenabled; 716 unsigned int phy_oui; 717 unsigned int phy_model; 718 unsigned int phy_rev; 719 u16 gigabit; 720 int intr_test; 721 int recover_error; 722 723 /* General data: RO fields */ 724 dma_addr_t ring_addr; 725 struct pci_dev *pci_dev; 726 u32 orig_mac[2]; 727 u32 irqmask; 728 u32 desc_ver; 729 u32 txrxctl_bits; 730 u32 vlanctl_bits; 731 u32 driver_data; 732 u32 device_id; 733 u32 register_size; 734 int rx_csum; 735 u32 mac_in_use; 736 737 void __iomem *base; 738 739 /* rx specific fields. 740 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 741 */ 742 union ring_type get_rx, put_rx, first_rx, last_rx; 743 struct nv_skb_map *get_rx_ctx, *put_rx_ctx; 744 struct nv_skb_map *first_rx_ctx, *last_rx_ctx; 745 struct nv_skb_map *rx_skb; 746 747 union ring_type rx_ring; 748 unsigned int rx_buf_sz; 749 unsigned int pkt_limit; 750 struct timer_list oom_kick; 751 struct timer_list nic_poll; 752 struct timer_list stats_poll; 753 u32 nic_poll_irq; 754 int rx_ring_size; 755 756 /* media detection workaround. 757 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 758 */ 759 int need_linktimer; 760 unsigned long link_timeout; 761 /* 762 * tx specific fields. 763 */ 764 union ring_type get_tx, put_tx, first_tx, last_tx; 765 struct nv_skb_map *get_tx_ctx, *put_tx_ctx; 766 struct nv_skb_map *first_tx_ctx, *last_tx_ctx; 767 struct nv_skb_map *tx_skb; 768 769 union ring_type tx_ring; 770 u32 tx_flags; 771 int tx_ring_size; 772 int tx_limit; 773 u32 tx_pkts_in_progress; 774 struct nv_skb_map *tx_change_owner; 775 struct nv_skb_map *tx_end_flip; 776 int tx_stop; 777 778 /* vlan fields */ 779 struct vlan_group *vlangrp; 780 781 /* msi/msi-x fields */ 782 u32 msi_flags; 783 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; 784 785 /* flow control */ 786 u32 pause_flags; 787}; 788 789/* 790 * Maximum number of loops until we assume that a bit in the irq mask 791 * is stuck. Overridable with module param. 792 */ 793static int max_interrupt_work = 5; 794 795/* 796 * Optimization can be either throuput mode or cpu mode 797 * 798 * Throughput Mode: Every tx and rx packet will generate an interrupt. 799 * CPU Mode: Interrupts are controlled by a timer. 800 */ 801enum { 802 NV_OPTIMIZATION_MODE_THROUGHPUT, 803 NV_OPTIMIZATION_MODE_CPU 804}; 805static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; 806 807/* 808 * Poll interval for timer irq 809 * 810 * This interval determines how frequent an interrupt is generated. 811 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] 812 * Min = 0, and Max = 65535 813 */ 814static int poll_interval = -1; 815 816/* 817 * MSI interrupts 818 */ 819enum { 820 NV_MSI_INT_DISABLED, 821 NV_MSI_INT_ENABLED 822}; 823static int msi = NV_MSI_INT_ENABLED; 824 825/* 826 * MSIX interrupts 827 */ 828enum { 829 NV_MSIX_INT_DISABLED, 830 NV_MSIX_INT_ENABLED 831}; 832static int msix = NV_MSIX_INT_DISABLED; 833 834/* 835 * DMA 64bit 836 */ 837enum { 838 NV_DMA_64BIT_DISABLED, 839 NV_DMA_64BIT_ENABLED 840}; 841static int dma_64bit = NV_DMA_64BIT_ENABLED; 842 843/* 844 * Crossover Detection 845 * Realtek 8201 phy + some OEM boards do not work properly. 846 */ 847enum { 848 NV_CROSSOVER_DETECTION_DISABLED, 849 NV_CROSSOVER_DETECTION_ENABLED 850}; 851static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; 852 853static inline struct fe_priv *get_nvpriv(struct net_device *dev) 854{ 855 return netdev_priv(dev); 856} 857 858static inline u8 __iomem *get_hwbase(struct net_device *dev) 859{ 860 return ((struct fe_priv *)netdev_priv(dev))->base; 861} 862 863static inline void pci_push(u8 __iomem *base) 864{ 865 /* force out pending posted writes */ 866 readl(base); 867} 868 869static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 870{ 871 return le32_to_cpu(prd->flaglen) 872 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 873} 874 875static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 876{ 877 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; 878} 879 880static bool nv_optimized(struct fe_priv *np) 881{ 882 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 883 return false; 884 return true; 885} 886 887static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 888 int delay, int delaymax, const char *msg) 889{ 890 u8 __iomem *base = get_hwbase(dev); 891 892 pci_push(base); 893 do { 894 udelay(delay); 895 delaymax -= delay; 896 if (delaymax < 0) { 897 if (msg) 898 printk(msg); 899 return 1; 900 } 901 } while ((readl(base + offset) & mask) != target); 902 return 0; 903} 904 905#define NV_SETUP_RX_RING 0x01 906#define NV_SETUP_TX_RING 0x02 907 908static inline u32 dma_low(dma_addr_t addr) 909{ 910 return addr; 911} 912 913static inline u32 dma_high(dma_addr_t addr) 914{ 915 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */ 916} 917 918static void setup_hw_rings(struct net_device *dev, int rxtx_flags) 919{ 920 struct fe_priv *np = get_nvpriv(dev); 921 u8 __iomem *base = get_hwbase(dev); 922 923 if (!nv_optimized(np)) { 924 if (rxtx_flags & NV_SETUP_RX_RING) { 925 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 926 } 927 if (rxtx_flags & NV_SETUP_TX_RING) { 928 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 929 } 930 } else { 931 if (rxtx_flags & NV_SETUP_RX_RING) { 932 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 933 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); 934 } 935 if (rxtx_flags & NV_SETUP_TX_RING) { 936 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); 937 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh); 938 } 939 } 940} 941 942static void free_rings(struct net_device *dev) 943{ 944 struct fe_priv *np = get_nvpriv(dev); 945 946 if (!nv_optimized(np)) { 947 if (np->rx_ring.orig) 948 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 949 np->rx_ring.orig, np->ring_addr); 950 } else { 951 if (np->rx_ring.ex) 952 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 953 np->rx_ring.ex, np->ring_addr); 954 } 955 if (np->rx_skb) 956 kfree(np->rx_skb); 957 if (np->tx_skb) 958 kfree(np->tx_skb); 959} 960 961static int using_multi_irqs(struct net_device *dev) 962{ 963 struct fe_priv *np = get_nvpriv(dev); 964 965 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 966 ((np->msi_flags & NV_MSI_X_ENABLED) && 967 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) 968 return 0; 969 else 970 return 1; 971} 972 973static void nv_enable_irq(struct net_device *dev) 974{ 975 struct fe_priv *np = get_nvpriv(dev); 976 977 if (!using_multi_irqs(dev)) { 978 if (np->msi_flags & NV_MSI_X_ENABLED) 979 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 980 else 981 enable_irq(np->pci_dev->irq); 982 } else { 983 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 984 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 985 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 986 } 987} 988 989static void nv_disable_irq(struct net_device *dev) 990{ 991 struct fe_priv *np = get_nvpriv(dev); 992 993 if (!using_multi_irqs(dev)) { 994 if (np->msi_flags & NV_MSI_X_ENABLED) 995 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 996 else 997 disable_irq(np->pci_dev->irq); 998 } else { 999 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1000 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1001 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1002 } 1003} 1004 1005/* In MSIX mode, a write to irqmask behaves as XOR */ 1006static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) 1007{ 1008 u8 __iomem *base = get_hwbase(dev); 1009 1010 writel(mask, base + NvRegIrqMask); 1011} 1012 1013static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) 1014{ 1015 struct fe_priv *np = get_nvpriv(dev); 1016 u8 __iomem *base = get_hwbase(dev); 1017 1018 if (np->msi_flags & NV_MSI_X_ENABLED) { 1019 writel(mask, base + NvRegIrqMask); 1020 } else { 1021 if (np->msi_flags & NV_MSI_ENABLED) 1022 writel(0, base + NvRegMSIIrqMask); 1023 writel(0, base + NvRegIrqMask); 1024 } 1025} 1026 1027#define MII_READ (-1) 1028/* mii_rw: read/write a register on the PHY. 1029 * 1030 * Caller must guarantee serialization 1031 */ 1032static int mii_rw(struct net_device *dev, int addr, int miireg, int value) 1033{ 1034 u8 __iomem *base = get_hwbase(dev); 1035 u32 reg; 1036 int retval; 1037 1038 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus); 1039 1040 reg = readl(base + NvRegMIIControl); 1041 if (reg & NVREG_MIICTL_INUSE) { 1042 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); 1043 udelay(NV_MIIBUSY_DELAY); 1044 } 1045 1046 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; 1047 if (value != MII_READ) { 1048 writel(value, base + NvRegMIIData); 1049 reg |= NVREG_MIICTL_WRITE; 1050 } 1051 writel(reg, base + NvRegMIIControl); 1052 1053 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1054 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { 1055 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", 1056 dev->name, miireg, addr); 1057 retval = -1; 1058 } else if (value != MII_READ) { 1059 /* it was a write operation - fewer failures are detectable */ 1060 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", 1061 dev->name, value, miireg, addr); 1062 retval = 0; 1063 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1064 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", 1065 dev->name, miireg, addr); 1066 retval = -1; 1067 } else { 1068 retval = readl(base + NvRegMIIData); 1069 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", 1070 dev->name, miireg, addr, retval); 1071 } 1072 1073 return retval; 1074} 1075 1076static int phy_reset(struct net_device *dev, u32 bmcr_setup) 1077{ 1078 struct fe_priv *np = netdev_priv(dev); 1079 u32 miicontrol; 1080 unsigned int tries = 0; 1081 1082 miicontrol = BMCR_RESET | bmcr_setup; 1083 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1084 return -1; 1085 } 1086 1087 /* wait for 500ms */ 1088 msleep(500); 1089 1090 /* must wait till reset is deasserted */ 1091 while (miicontrol & BMCR_RESET) { 1092 msleep(10); 1093 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1094 /* FIXME: 100 tries seem excessive */ 1095 if (tries++ > 100) 1096 return -1; 1097 } 1098 return 0; 1099} 1100 1101static int phy_init(struct net_device *dev) 1102{ 1103 struct fe_priv *np = get_nvpriv(dev); 1104 u8 __iomem *base = get_hwbase(dev); 1105 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1106 1107 /* phy errata for E3016 phy */ 1108 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1109 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1110 reg &= ~PHY_MARVELL_E3016_INITMASK; 1111 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { 1112 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); 1113 return PHY_ERROR; 1114 } 1115 } 1116 if (np->phy_oui == PHY_OUI_REALTEK) { 1117 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1118 np->phy_rev == PHY_REV_REALTEK_8211B) { 1119 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1120 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1121 return PHY_ERROR; 1122 } 1123 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1124 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1125 return PHY_ERROR; 1126 } 1127 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1128 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1129 return PHY_ERROR; 1130 } 1131 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1132 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1133 return PHY_ERROR; 1134 } 1135 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1136 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1137 return PHY_ERROR; 1138 } 1139 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) { 1140 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1141 return PHY_ERROR; 1142 } 1143 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1144 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1145 return PHY_ERROR; 1146 } 1147 } 1148 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1149 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 || 1150 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 || 1151 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 || 1152 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 || 1153 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 || 1154 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 || 1155 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 || 1156 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) { 1157 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1158 phy_reserved |= PHY_REALTEK_INIT7; 1159 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) { 1160 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1161 return PHY_ERROR; 1162 } 1163 } 1164 } 1165 } 1166 1167 /* set advertise register */ 1168 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1169 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1170 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1171 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); 1172 return PHY_ERROR; 1173 } 1174 1175 /* get phy interface type */ 1176 phyinterface = readl(base + NvRegPhyInterface); 1177 1178 /* see if gigabit phy */ 1179 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1180 if (mii_status & PHY_GIGABIT) { 1181 np->gigabit = PHY_GIGABIT; 1182 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 1183 mii_control_1000 &= ~ADVERTISE_1000HALF; 1184 if (phyinterface & PHY_RGMII) 1185 mii_control_1000 |= ADVERTISE_1000FULL; 1186 else 1187 mii_control_1000 &= ~ADVERTISE_1000FULL; 1188 1189 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1190 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1191 return PHY_ERROR; 1192 } 1193 } 1194 else 1195 np->gigabit = 0; 1196 1197 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1198 mii_control |= BMCR_ANENABLE; 1199 1200 /* reset the phy 1201 * (certain phys need bmcr to be setup with reset) 1202 */ 1203 if (phy_reset(dev, mii_control)) { 1204 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1205 return PHY_ERROR; 1206 } 1207 1208 /* phy vendor specific configuration */ 1209 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1210 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1211 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1212 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1213 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { 1214 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1215 return PHY_ERROR; 1216 } 1217 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1218 phy_reserved |= PHY_CICADA_INIT5; 1219 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { 1220 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1221 return PHY_ERROR; 1222 } 1223 } 1224 if (np->phy_oui == PHY_OUI_CICADA) { 1225 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); 1226 phy_reserved |= PHY_CICADA_INIT6; 1227 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { 1228 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1229 return PHY_ERROR; 1230 } 1231 } 1232 if (np->phy_oui == PHY_OUI_VITESSE) { 1233 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) { 1234 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1235 return PHY_ERROR; 1236 } 1237 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) { 1238 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1239 return PHY_ERROR; 1240 } 1241 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1242 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1243 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1244 return PHY_ERROR; 1245 } 1246 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1247 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1248 phy_reserved |= PHY_VITESSE_INIT3; 1249 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1250 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1251 return PHY_ERROR; 1252 } 1253 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) { 1254 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1255 return PHY_ERROR; 1256 } 1257 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) { 1258 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1259 return PHY_ERROR; 1260 } 1261 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1262 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1263 phy_reserved |= PHY_VITESSE_INIT3; 1264 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1265 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1266 return PHY_ERROR; 1267 } 1268 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1269 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1270 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1271 return PHY_ERROR; 1272 } 1273 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { 1274 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1275 return PHY_ERROR; 1276 } 1277 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { 1278 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1279 return PHY_ERROR; 1280 } 1281 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1282 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1283 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1284 return PHY_ERROR; 1285 } 1286 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1287 phy_reserved &= ~PHY_VITESSE_INIT_MSK2; 1288 phy_reserved |= PHY_VITESSE_INIT8; 1289 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1290 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1291 return PHY_ERROR; 1292 } 1293 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) { 1294 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1295 return PHY_ERROR; 1296 } 1297 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) { 1298 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1299 return PHY_ERROR; 1300 } 1301 } 1302 if (np->phy_oui == PHY_OUI_REALTEK) { 1303 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1304 np->phy_rev == PHY_REV_REALTEK_8211B) { 1305 /* reset could have cleared these out, set them back */ 1306 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1307 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1308 return PHY_ERROR; 1309 } 1310 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1311 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1312 return PHY_ERROR; 1313 } 1314 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1315 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1316 return PHY_ERROR; 1317 } 1318 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1319 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1320 return PHY_ERROR; 1321 } 1322 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1323 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1324 return PHY_ERROR; 1325 } 1326 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) { 1327 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1328 return PHY_ERROR; 1329 } 1330 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1331 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1332 return PHY_ERROR; 1333 } 1334 } 1335 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1336 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 || 1337 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 || 1338 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 || 1339 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 || 1340 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 || 1341 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 || 1342 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 || 1343 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) { 1344 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1345 phy_reserved |= PHY_REALTEK_INIT7; 1346 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) { 1347 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1348 return PHY_ERROR; 1349 } 1350 } 1351 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 1352 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1353 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1354 return PHY_ERROR; 1355 } 1356 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); 1357 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 1358 phy_reserved |= PHY_REALTEK_INIT3; 1359 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) { 1360 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1361 return PHY_ERROR; 1362 } 1363 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1364 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1365 return PHY_ERROR; 1366 } 1367 } 1368 } 1369 } 1370 1371 /* some phys clear out pause advertisment on reset, set it back */ 1372 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); 1373 1374 /* restart auto negotiation */ 1375 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1376 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1377 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1378 return PHY_ERROR; 1379 } 1380 1381 return 0; 1382} 1383 1384static void nv_start_rx(struct net_device *dev) 1385{ 1386 struct fe_priv *np = netdev_priv(dev); 1387 u8 __iomem *base = get_hwbase(dev); 1388 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1389 1390 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 1391 /* Already running? Stop it. */ 1392 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1393 rx_ctrl &= ~NVREG_RCVCTL_START; 1394 writel(rx_ctrl, base + NvRegReceiverControl); 1395 pci_push(base); 1396 } 1397 writel(np->linkspeed, base + NvRegLinkSpeed); 1398 pci_push(base); 1399 rx_ctrl |= NVREG_RCVCTL_START; 1400 if (np->mac_in_use) 1401 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1402 writel(rx_ctrl, base + NvRegReceiverControl); 1403 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1404 dev->name, np->duplex, np->linkspeed); 1405 pci_push(base); 1406} 1407 1408static void nv_stop_rx(struct net_device *dev) 1409{ 1410 struct fe_priv *np = netdev_priv(dev); 1411 u8 __iomem *base = get_hwbase(dev); 1412 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1413 1414 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 1415 if (!np->mac_in_use) 1416 rx_ctrl &= ~NVREG_RCVCTL_START; 1417 else 1418 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1419 writel(rx_ctrl, base + NvRegReceiverControl); 1420 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1421 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1422 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1423 1424 udelay(NV_RXSTOP_DELAY2); 1425 if (!np->mac_in_use) 1426 writel(0, base + NvRegLinkSpeed); 1427} 1428 1429static void nv_start_tx(struct net_device *dev) 1430{ 1431 struct fe_priv *np = netdev_priv(dev); 1432 u8 __iomem *base = get_hwbase(dev); 1433 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1434 1435 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 1436 tx_ctrl |= NVREG_XMITCTL_START; 1437 if (np->mac_in_use) 1438 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1439 writel(tx_ctrl, base + NvRegTransmitterControl); 1440 pci_push(base); 1441} 1442 1443static void nv_stop_tx(struct net_device *dev) 1444{ 1445 struct fe_priv *np = netdev_priv(dev); 1446 u8 __iomem *base = get_hwbase(dev); 1447 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1448 1449 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 1450 if (!np->mac_in_use) 1451 tx_ctrl &= ~NVREG_XMITCTL_START; 1452 else 1453 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1454 writel(tx_ctrl, base + NvRegTransmitterControl); 1455 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1456 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1457 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1458 1459 udelay(NV_TXSTOP_DELAY2); 1460 if (!np->mac_in_use) 1461 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, 1462 base + NvRegTransmitPoll); 1463} 1464 1465static void nv_start_rxtx(struct net_device *dev) 1466{ 1467 nv_start_rx(dev); 1468 nv_start_tx(dev); 1469} 1470 1471static void nv_stop_rxtx(struct net_device *dev) 1472{ 1473 nv_stop_rx(dev); 1474 nv_stop_tx(dev); 1475} 1476 1477static void nv_txrx_reset(struct net_device *dev) 1478{ 1479 struct fe_priv *np = netdev_priv(dev); 1480 u8 __iomem *base = get_hwbase(dev); 1481 1482 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); 1483 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1484 pci_push(base); 1485 udelay(NV_TXRX_RESET_DELAY); 1486 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1487 pci_push(base); 1488} 1489 1490static void nv_mac_reset(struct net_device *dev) 1491{ 1492 struct fe_priv *np = netdev_priv(dev); 1493 u8 __iomem *base = get_hwbase(dev); 1494 u32 temp1, temp2, temp3; 1495 1496 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); 1497 1498 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1499 pci_push(base); 1500 1501 /* save registers since they will be cleared on reset */ 1502 temp1 = readl(base + NvRegMacAddrA); 1503 temp2 = readl(base + NvRegMacAddrB); 1504 temp3 = readl(base + NvRegTransmitPoll); 1505 1506 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); 1507 pci_push(base); 1508 udelay(NV_MAC_RESET_DELAY); 1509 writel(0, base + NvRegMacReset); 1510 pci_push(base); 1511 udelay(NV_MAC_RESET_DELAY); 1512 1513 /* restore saved registers */ 1514 writel(temp1, base + NvRegMacAddrA); 1515 writel(temp2, base + NvRegMacAddrB); 1516 writel(temp3, base + NvRegTransmitPoll); 1517 1518 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1519 pci_push(base); 1520} 1521 1522static void nv_get_hw_stats(struct net_device *dev) 1523{ 1524 struct fe_priv *np = netdev_priv(dev); 1525 u8 __iomem *base = get_hwbase(dev); 1526 1527 np->estats.tx_bytes += readl(base + NvRegTxCnt); 1528 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); 1529 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); 1530 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); 1531 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); 1532 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); 1533 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); 1534 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); 1535 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); 1536 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); 1537 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); 1538 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); 1539 np->estats.rx_runt += readl(base + NvRegRxRunt); 1540 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); 1541 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); 1542 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); 1543 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); 1544 np->estats.rx_length_error += readl(base + NvRegRxLenErr); 1545 np->estats.rx_unicast += readl(base + NvRegRxUnicast); 1546 np->estats.rx_multicast += readl(base + NvRegRxMulticast); 1547 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); 1548 np->estats.rx_packets = 1549 np->estats.rx_unicast + 1550 np->estats.rx_multicast + 1551 np->estats.rx_broadcast; 1552 np->estats.rx_errors_total = 1553 np->estats.rx_crc_errors + 1554 np->estats.rx_over_errors + 1555 np->estats.rx_frame_error + 1556 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + 1557 np->estats.rx_late_collision + 1558 np->estats.rx_runt + 1559 np->estats.rx_frame_too_long; 1560 np->estats.tx_errors_total = 1561 np->estats.tx_late_collision + 1562 np->estats.tx_fifo_errors + 1563 np->estats.tx_carrier_errors + 1564 np->estats.tx_excess_deferral + 1565 np->estats.tx_retry_error; 1566 1567 if (np->driver_data & DEV_HAS_STATISTICS_V2) { 1568 np->estats.tx_deferral += readl(base + NvRegTxDef); 1569 np->estats.tx_packets += readl(base + NvRegTxFrame); 1570 np->estats.rx_bytes += readl(base + NvRegRxCnt); 1571 np->estats.tx_pause += readl(base + NvRegTxPause); 1572 np->estats.rx_pause += readl(base + NvRegRxPause); 1573 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1574 } 1575} 1576 1577/* 1578 * nv_get_stats: dev->get_stats function 1579 * Get latest stats value from the nic. 1580 * Called with read_lock(&dev_base_lock) held for read - 1581 * only synchronized against unregister_netdevice. 1582 */ 1583static struct net_device_stats *nv_get_stats(struct net_device *dev) 1584{ 1585 struct fe_priv *np = netdev_priv(dev); 1586 1587 /* If the nic supports hw counters then retrieve latest values */ 1588 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) { 1589 nv_get_hw_stats(dev); 1590 1591 /* copy to net_device stats */ 1592 dev->stats.tx_bytes = np->estats.tx_bytes; 1593 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1594 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1595 dev->stats.rx_crc_errors = np->estats.rx_crc_errors; 1596 dev->stats.rx_over_errors = np->estats.rx_over_errors; 1597 dev->stats.rx_errors = np->estats.rx_errors_total; 1598 dev->stats.tx_errors = np->estats.tx_errors_total; 1599 } 1600 1601 return &dev->stats; 1602} 1603 1604/* 1605 * nv_alloc_rx: fill rx ring entries. 1606 * Return 1 if the allocations for the skbs failed and the 1607 * rx engine is without Available descriptors 1608 */ 1609static int nv_alloc_rx(struct net_device *dev) 1610{ 1611 struct fe_priv *np = netdev_priv(dev); 1612 struct ring_desc* less_rx; 1613 1614 less_rx = np->get_rx.orig; 1615 if (less_rx-- == np->first_rx.orig) 1616 less_rx = np->last_rx.orig; 1617 1618 while (np->put_rx.orig != less_rx) { 1619 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1620 if (skb) { 1621 np->put_rx_ctx->skb = skb; 1622 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1623 skb->data, 1624 skb_tailroom(skb), 1625 PCI_DMA_FROMDEVICE); 1626 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1627 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); 1628 wmb(); 1629 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1630 if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) 1631 np->put_rx.orig = np->first_rx.orig; 1632 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1633 np->put_rx_ctx = np->first_rx_ctx; 1634 } else { 1635 return 1; 1636 } 1637 } 1638 return 0; 1639} 1640 1641static int nv_alloc_rx_optimized(struct net_device *dev) 1642{ 1643 struct fe_priv *np = netdev_priv(dev); 1644 struct ring_desc_ex* less_rx; 1645 1646 less_rx = np->get_rx.ex; 1647 if (less_rx-- == np->first_rx.ex) 1648 less_rx = np->last_rx.ex; 1649 1650 while (np->put_rx.ex != less_rx) { 1651 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1652 if (skb) { 1653 np->put_rx_ctx->skb = skb; 1654 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1655 skb->data, 1656 skb_tailroom(skb), 1657 PCI_DMA_FROMDEVICE); 1658 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1659 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); 1660 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); 1661 wmb(); 1662 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1663 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) 1664 np->put_rx.ex = np->first_rx.ex; 1665 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1666 np->put_rx_ctx = np->first_rx_ctx; 1667 } else { 1668 return 1; 1669 } 1670 } 1671 return 0; 1672} 1673 1674/* If rx bufs are exhausted called after 50ms to attempt to refresh */ 1675#ifdef CONFIG_FORCEDETH_NAPI 1676static void nv_do_rx_refill(unsigned long data) 1677{ 1678 struct net_device *dev = (struct net_device *) data; 1679 struct fe_priv *np = netdev_priv(dev); 1680 1681 /* Just reschedule NAPI rx processing */ 1682 netif_rx_schedule(dev, &np->napi); 1683} 1684#else 1685static void nv_do_rx_refill(unsigned long data) 1686{ 1687 struct net_device *dev = (struct net_device *) data; 1688 struct fe_priv *np = netdev_priv(dev); 1689 int retcode; 1690 1691 if (!using_multi_irqs(dev)) { 1692 if (np->msi_flags & NV_MSI_X_ENABLED) 1693 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1694 else 1695 disable_irq(np->pci_dev->irq); 1696 } else { 1697 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1698 } 1699 if (!nv_optimized(np)) 1700 retcode = nv_alloc_rx(dev); 1701 else 1702 retcode = nv_alloc_rx_optimized(dev); 1703 if (retcode) { 1704 spin_lock_irq(&np->lock); 1705 if (!np->in_shutdown) 1706 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1707 spin_unlock_irq(&np->lock); 1708 } 1709 if (!using_multi_irqs(dev)) { 1710 if (np->msi_flags & NV_MSI_X_ENABLED) 1711 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1712 else 1713 enable_irq(np->pci_dev->irq); 1714 } else { 1715 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1716 } 1717} 1718#endif 1719 1720static void nv_init_rx(struct net_device *dev) 1721{ 1722 struct fe_priv *np = netdev_priv(dev); 1723 int i; 1724 1725 np->get_rx = np->put_rx = np->first_rx = np->rx_ring; 1726 1727 if (!nv_optimized(np)) 1728 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; 1729 else 1730 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; 1731 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; 1732 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; 1733 1734 for (i = 0; i < np->rx_ring_size; i++) { 1735 if (!nv_optimized(np)) { 1736 np->rx_ring.orig[i].flaglen = 0; 1737 np->rx_ring.orig[i].buf = 0; 1738 } else { 1739 np->rx_ring.ex[i].flaglen = 0; 1740 np->rx_ring.ex[i].txvlan = 0; 1741 np->rx_ring.ex[i].bufhigh = 0; 1742 np->rx_ring.ex[i].buflow = 0; 1743 } 1744 np->rx_skb[i].skb = NULL; 1745 np->rx_skb[i].dma = 0; 1746 } 1747} 1748 1749static void nv_init_tx(struct net_device *dev) 1750{ 1751 struct fe_priv *np = netdev_priv(dev); 1752 int i; 1753 1754 np->get_tx = np->put_tx = np->first_tx = np->tx_ring; 1755 1756 if (!nv_optimized(np)) 1757 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; 1758 else 1759 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; 1760 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; 1761 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; 1762 np->tx_pkts_in_progress = 0; 1763 np->tx_change_owner = NULL; 1764 np->tx_end_flip = NULL; 1765 1766 for (i = 0; i < np->tx_ring_size; i++) { 1767 if (!nv_optimized(np)) { 1768 np->tx_ring.orig[i].flaglen = 0; 1769 np->tx_ring.orig[i].buf = 0; 1770 } else { 1771 np->tx_ring.ex[i].flaglen = 0; 1772 np->tx_ring.ex[i].txvlan = 0; 1773 np->tx_ring.ex[i].bufhigh = 0; 1774 np->tx_ring.ex[i].buflow = 0; 1775 } 1776 np->tx_skb[i].skb = NULL; 1777 np->tx_skb[i].dma = 0; 1778 np->tx_skb[i].dma_len = 0; 1779 np->tx_skb[i].first_tx_desc = NULL; 1780 np->tx_skb[i].next_tx_ctx = NULL; 1781 } 1782} 1783 1784static int nv_init_ring(struct net_device *dev) 1785{ 1786 struct fe_priv *np = netdev_priv(dev); 1787 1788 nv_init_tx(dev); 1789 nv_init_rx(dev); 1790 1791 if (!nv_optimized(np)) 1792 return nv_alloc_rx(dev); 1793 else 1794 return nv_alloc_rx_optimized(dev); 1795} 1796 1797static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb) 1798{ 1799 struct fe_priv *np = netdev_priv(dev); 1800 1801 if (tx_skb->dma) { 1802 pci_unmap_page(np->pci_dev, tx_skb->dma, 1803 tx_skb->dma_len, 1804 PCI_DMA_TODEVICE); 1805 tx_skb->dma = 0; 1806 } 1807 if (tx_skb->skb) { 1808 dev_kfree_skb_any(tx_skb->skb); 1809 tx_skb->skb = NULL; 1810 return 1; 1811 } else { 1812 return 0; 1813 } 1814} 1815 1816static void nv_drain_tx(struct net_device *dev) 1817{ 1818 struct fe_priv *np = netdev_priv(dev); 1819 unsigned int i; 1820 1821 for (i = 0; i < np->tx_ring_size; i++) { 1822 if (!nv_optimized(np)) { 1823 np->tx_ring.orig[i].flaglen = 0; 1824 np->tx_ring.orig[i].buf = 0; 1825 } else { 1826 np->tx_ring.ex[i].flaglen = 0; 1827 np->tx_ring.ex[i].txvlan = 0; 1828 np->tx_ring.ex[i].bufhigh = 0; 1829 np->tx_ring.ex[i].buflow = 0; 1830 } 1831 if (nv_release_txskb(dev, &np->tx_skb[i])) 1832 dev->stats.tx_dropped++; 1833 np->tx_skb[i].dma = 0; 1834 np->tx_skb[i].dma_len = 0; 1835 np->tx_skb[i].first_tx_desc = NULL; 1836 np->tx_skb[i].next_tx_ctx = NULL; 1837 } 1838 np->tx_pkts_in_progress = 0; 1839 np->tx_change_owner = NULL; 1840 np->tx_end_flip = NULL; 1841} 1842 1843static void nv_drain_rx(struct net_device *dev) 1844{ 1845 struct fe_priv *np = netdev_priv(dev); 1846 int i; 1847 1848 for (i = 0; i < np->rx_ring_size; i++) { 1849 if (!nv_optimized(np)) { 1850 np->rx_ring.orig[i].flaglen = 0; 1851 np->rx_ring.orig[i].buf = 0; 1852 } else { 1853 np->rx_ring.ex[i].flaglen = 0; 1854 np->rx_ring.ex[i].txvlan = 0; 1855 np->rx_ring.ex[i].bufhigh = 0; 1856 np->rx_ring.ex[i].buflow = 0; 1857 } 1858 wmb(); 1859 if (np->rx_skb[i].skb) { 1860 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, 1861 (skb_end_pointer(np->rx_skb[i].skb) - 1862 np->rx_skb[i].skb->data), 1863 PCI_DMA_FROMDEVICE); 1864 dev_kfree_skb(np->rx_skb[i].skb); 1865 np->rx_skb[i].skb = NULL; 1866 } 1867 } 1868} 1869 1870static void nv_drain_rxtx(struct net_device *dev) 1871{ 1872 nv_drain_tx(dev); 1873 nv_drain_rx(dev); 1874} 1875 1876static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) 1877{ 1878 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); 1879} 1880 1881static void nv_legacybackoff_reseed(struct net_device *dev) 1882{ 1883 u8 __iomem *base = get_hwbase(dev); 1884 u32 reg; 1885 u32 low; 1886 int tx_status = 0; 1887 1888 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK; 1889 get_random_bytes(&low, sizeof(low)); 1890 reg |= low & NVREG_SLOTTIME_MASK; 1891 1892 /* Need to stop tx before change takes effect. 1893 * Caller has already gained np->lock. 1894 */ 1895 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START; 1896 if (tx_status) 1897 nv_stop_tx(dev); 1898 nv_stop_rx(dev); 1899 writel(reg, base + NvRegSlotTime); 1900 if (tx_status) 1901 nv_start_tx(dev); 1902 nv_start_rx(dev); 1903} 1904 1905/* Gear Backoff Seeds */ 1906#define BACKOFF_SEEDSET_ROWS 8 1907#define BACKOFF_SEEDSET_LFSRS 15 1908 1909/* Known Good seed sets */ 1910static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 1911 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 1912 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, 1913 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 1914 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, 1915 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, 1916 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, 1917 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, 1918 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}}; 1919 1920static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 1921 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 1922 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 1923 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, 1924 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 1925 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 1926 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 1927 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 1928 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}}; 1929 1930static void nv_gear_backoff_reseed(struct net_device *dev) 1931{ 1932 u8 __iomem *base = get_hwbase(dev); 1933 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed; 1934 u32 temp, seedset, combinedSeed; 1935 int i; 1936 1937 /* Setup seed for free running LFSR */ 1938 /* We are going to read the time stamp counter 3 times 1939 and swizzle bits around to increase randomness */ 1940 get_random_bytes(&miniseed1, sizeof(miniseed1)); 1941 miniseed1 &= 0x0fff; 1942 if (miniseed1 == 0) 1943 miniseed1 = 0xabc; 1944 1945 get_random_bytes(&miniseed2, sizeof(miniseed2)); 1946 miniseed2 &= 0x0fff; 1947 if (miniseed2 == 0) 1948 miniseed2 = 0xabc; 1949 miniseed2_reversed = 1950 ((miniseed2 & 0xF00) >> 8) | 1951 (miniseed2 & 0x0F0) | 1952 ((miniseed2 & 0x00F) << 8); 1953 1954 get_random_bytes(&miniseed3, sizeof(miniseed3)); 1955 miniseed3 &= 0x0fff; 1956 if (miniseed3 == 0) 1957 miniseed3 = 0xabc; 1958 miniseed3_reversed = 1959 ((miniseed3 & 0xF00) >> 8) | 1960 (miniseed3 & 0x0F0) | 1961 ((miniseed3 & 0x00F) << 8); 1962 1963 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) | 1964 (miniseed2 ^ miniseed3_reversed); 1965 1966 /* Seeds can not be zero */ 1967 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0) 1968 combinedSeed |= 0x08; 1969 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0) 1970 combinedSeed |= 0x8000; 1971 1972 /* No need to disable tx here */ 1973 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); 1974 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; 1975 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; 1976 writel(temp,base + NvRegBackOffControl); 1977 1978 /* Setup seeds for all gear LFSRs. */ 1979 get_random_bytes(&seedset, sizeof(seedset)); 1980 seedset = seedset % BACKOFF_SEEDSET_ROWS; 1981 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) 1982 { 1983 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); 1984 temp |= main_seedset[seedset][i-1] & 0x3ff; 1985 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); 1986 writel(temp, base + NvRegBackOffControl); 1987 } 1988} 1989 1990/* 1991 * nv_start_xmit: dev->hard_start_xmit function 1992 * Called with netif_tx_lock held. 1993 */ 1994static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 1995{ 1996 struct fe_priv *np = netdev_priv(dev); 1997 u32 tx_flags = 0; 1998 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 1999 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2000 unsigned int i; 2001 u32 offset = 0; 2002 u32 bcnt; 2003 u32 size = skb->len-skb->data_len; 2004 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2005 u32 empty_slots; 2006 struct ring_desc* put_tx; 2007 struct ring_desc* start_tx; 2008 struct ring_desc* prev_tx; 2009 struct nv_skb_map* prev_tx_ctx; 2010 unsigned long flags; 2011 2012 /* add fragments to entries count */ 2013 for (i = 0; i < fragments; i++) { 2014 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 2015 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2016 } 2017 2018 empty_slots = nv_get_empty_tx_slots(np); 2019 if (unlikely(empty_slots <= entries)) { 2020 spin_lock_irqsave(&np->lock, flags); 2021 netif_stop_queue(dev); 2022 np->tx_stop = 1; 2023 spin_unlock_irqrestore(&np->lock, flags); 2024 return NETDEV_TX_BUSY; 2025 } 2026 2027 start_tx = put_tx = np->put_tx.orig; 2028 2029 /* setup the header buffer */ 2030 do { 2031 prev_tx = put_tx; 2032 prev_tx_ctx = np->put_tx_ctx; 2033 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2034 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2035 PCI_DMA_TODEVICE); 2036 np->put_tx_ctx->dma_len = bcnt; 2037 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2038 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2039 2040 tx_flags = np->tx_flags; 2041 offset += bcnt; 2042 size -= bcnt; 2043 if (unlikely(put_tx++ == np->last_tx.orig)) 2044 put_tx = np->first_tx.orig; 2045 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2046 np->put_tx_ctx = np->first_tx_ctx; 2047 } while (size); 2048 2049 /* setup the fragments */ 2050 for (i = 0; i < fragments; i++) { 2051 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2052 u32 size = frag->size; 2053 offset = 0; 2054 2055 do { 2056 prev_tx = put_tx; 2057 prev_tx_ctx = np->put_tx_ctx; 2058 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2059 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 2060 PCI_DMA_TODEVICE); 2061 np->put_tx_ctx->dma_len = bcnt; 2062 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2063 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2064 2065 offset += bcnt; 2066 size -= bcnt; 2067 if (unlikely(put_tx++ == np->last_tx.orig)) 2068 put_tx = np->first_tx.orig; 2069 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2070 np->put_tx_ctx = np->first_tx_ctx; 2071 } while (size); 2072 } 2073 2074 /* set last fragment flag */ 2075 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); 2076 2077 /* save skb in this slot's context area */ 2078 prev_tx_ctx->skb = skb; 2079 2080 if (skb_is_gso(skb)) 2081 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2082 else 2083 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2084 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2085 2086 spin_lock_irqsave(&np->lock, flags); 2087 2088 /* set tx flags */ 2089 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2090 np->put_tx.orig = put_tx; 2091 2092 spin_unlock_irqrestore(&np->lock, flags); 2093 2094 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", 2095 dev->name, entries, tx_flags_extra); 2096 { 2097 int j; 2098 for (j=0; j<64; j++) { 2099 if ((j%16) == 0) 2100 dprintk("\n%03x:", j); 2101 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2102 } 2103 dprintk("\n"); 2104 } 2105 2106 dev->trans_start = jiffies; 2107 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2108 return NETDEV_TX_OK; 2109} 2110 2111static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) 2112{ 2113 struct fe_priv *np = netdev_priv(dev); 2114 u32 tx_flags = 0; 2115 u32 tx_flags_extra; 2116 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2117 unsigned int i; 2118 u32 offset = 0; 2119 u32 bcnt; 2120 u32 size = skb->len-skb->data_len; 2121 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2122 u32 empty_slots; 2123 struct ring_desc_ex* put_tx; 2124 struct ring_desc_ex* start_tx; 2125 struct ring_desc_ex* prev_tx; 2126 struct nv_skb_map* prev_tx_ctx; 2127 struct nv_skb_map* start_tx_ctx; 2128 unsigned long flags; 2129 2130 /* add fragments to entries count */ 2131 for (i = 0; i < fragments; i++) { 2132 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 2133 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2134 } 2135 2136 empty_slots = nv_get_empty_tx_slots(np); 2137 if (unlikely(empty_slots <= entries)) { 2138 spin_lock_irqsave(&np->lock, flags); 2139 netif_stop_queue(dev); 2140 np->tx_stop = 1; 2141 spin_unlock_irqrestore(&np->lock, flags); 2142 return NETDEV_TX_BUSY; 2143 } 2144 2145 start_tx = put_tx = np->put_tx.ex; 2146 start_tx_ctx = np->put_tx_ctx; 2147 2148 /* setup the header buffer */ 2149 do { 2150 prev_tx = put_tx; 2151 prev_tx_ctx = np->put_tx_ctx; 2152 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2153 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2154 PCI_DMA_TODEVICE); 2155 np->put_tx_ctx->dma_len = bcnt; 2156 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2157 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2158 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2159 2160 tx_flags = NV_TX2_VALID; 2161 offset += bcnt; 2162 size -= bcnt; 2163 if (unlikely(put_tx++ == np->last_tx.ex)) 2164 put_tx = np->first_tx.ex; 2165 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2166 np->put_tx_ctx = np->first_tx_ctx; 2167 } while (size); 2168 2169 /* setup the fragments */ 2170 for (i = 0; i < fragments; i++) { 2171 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2172 u32 size = frag->size; 2173 offset = 0; 2174 2175 do { 2176 prev_tx = put_tx; 2177 prev_tx_ctx = np->put_tx_ctx; 2178 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2179 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 2180 PCI_DMA_TODEVICE); 2181 np->put_tx_ctx->dma_len = bcnt; 2182 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2183 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2184 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2185 2186 offset += bcnt; 2187 size -= bcnt; 2188 if (unlikely(put_tx++ == np->last_tx.ex)) 2189 put_tx = np->first_tx.ex; 2190 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2191 np->put_tx_ctx = np->first_tx_ctx; 2192 } while (size); 2193 } 2194 2195 /* set last fragment flag */ 2196 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); 2197 2198 /* save skb in this slot's context area */ 2199 prev_tx_ctx->skb = skb; 2200 2201 if (skb_is_gso(skb)) 2202 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2203 else 2204 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2205 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2206 2207 /* vlan tag */ 2208 if (likely(!np->vlangrp)) { 2209 start_tx->txvlan = 0; 2210 } else { 2211 if (vlan_tx_tag_present(skb)) 2212 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb)); 2213 else 2214 start_tx->txvlan = 0; 2215 } 2216 2217 spin_lock_irqsave(&np->lock, flags); 2218 2219 if (np->tx_limit) { 2220 /* Limit the number of outstanding tx. Setup all fragments, but 2221 * do not set the VALID bit on the first descriptor. Save a pointer 2222 * to that descriptor and also for next skb_map element. 2223 */ 2224 2225 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) { 2226 if (!np->tx_change_owner) 2227 np->tx_change_owner = start_tx_ctx; 2228 2229 /* remove VALID bit */ 2230 tx_flags &= ~NV_TX2_VALID; 2231 start_tx_ctx->first_tx_desc = start_tx; 2232 start_tx_ctx->next_tx_ctx = np->put_tx_ctx; 2233 np->tx_end_flip = np->put_tx_ctx; 2234 } else { 2235 np->tx_pkts_in_progress++; 2236 } 2237 } 2238 2239 /* set tx flags */ 2240 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2241 np->put_tx.ex = put_tx; 2242 2243 spin_unlock_irqrestore(&np->lock, flags); 2244 2245 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", 2246 dev->name, entries, tx_flags_extra); 2247 { 2248 int j; 2249 for (j=0; j<64; j++) { 2250 if ((j%16) == 0) 2251 dprintk("\n%03x:", j); 2252 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2253 } 2254 dprintk("\n"); 2255 } 2256 2257 dev->trans_start = jiffies; 2258 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2259 return NETDEV_TX_OK; 2260} 2261 2262static inline void nv_tx_flip_ownership(struct net_device *dev) 2263{ 2264 struct fe_priv *np = netdev_priv(dev); 2265 2266 np->tx_pkts_in_progress--; 2267 if (np->tx_change_owner) { 2268 np->tx_change_owner->first_tx_desc->flaglen |= 2269 cpu_to_le32(NV_TX2_VALID); 2270 np->tx_pkts_in_progress++; 2271 2272 np->tx_change_owner = np->tx_change_owner->next_tx_ctx; 2273 if (np->tx_change_owner == np->tx_end_flip) 2274 np->tx_change_owner = NULL; 2275 2276 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2277 } 2278} 2279 2280/* 2281 * nv_tx_done: check for completed packets, release the skbs. 2282 * 2283 * Caller must own np->lock. 2284 */ 2285static void nv_tx_done(struct net_device *dev) 2286{ 2287 struct fe_priv *np = netdev_priv(dev); 2288 u32 flags; 2289 struct ring_desc* orig_get_tx = np->get_tx.orig; 2290 2291 while ((np->get_tx.orig != np->put_tx.orig) && 2292 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) { 2293 2294 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", 2295 dev->name, flags); 2296 2297 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, 2298 np->get_tx_ctx->dma_len, 2299 PCI_DMA_TODEVICE); 2300 np->get_tx_ctx->dma = 0; 2301 2302 if (np->desc_ver == DESC_VER_1) { 2303 if (flags & NV_TX_LASTPACKET) { 2304 if (flags & NV_TX_ERROR) { 2305 if (flags & NV_TX_UNDERFLOW) 2306 dev->stats.tx_fifo_errors++; 2307 if (flags & NV_TX_CARRIERLOST) 2308 dev->stats.tx_carrier_errors++; 2309 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) 2310 nv_legacybackoff_reseed(dev); 2311 dev->stats.tx_errors++; 2312 } else { 2313 dev->stats.tx_packets++; 2314 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2315 } 2316 dev_kfree_skb_any(np->get_tx_ctx->skb); 2317 np->get_tx_ctx->skb = NULL; 2318 } 2319 } else { 2320 if (flags & NV_TX2_LASTPACKET) { 2321 if (flags & NV_TX2_ERROR) { 2322 if (flags & NV_TX2_UNDERFLOW) 2323 dev->stats.tx_fifo_errors++; 2324 if (flags & NV_TX2_CARRIERLOST) 2325 dev->stats.tx_carrier_errors++; 2326 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2327 nv_legacybackoff_reseed(dev); 2328 dev->stats.tx_errors++; 2329 } else { 2330 dev->stats.tx_packets++; 2331 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2332 } 2333 dev_kfree_skb_any(np->get_tx_ctx->skb); 2334 np->get_tx_ctx->skb = NULL; 2335 } 2336 } 2337 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) 2338 np->get_tx.orig = np->first_tx.orig; 2339 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2340 np->get_tx_ctx = np->first_tx_ctx; 2341 } 2342 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { 2343 np->tx_stop = 0; 2344 netif_wake_queue(dev); 2345 } 2346} 2347 2348static void nv_tx_done_optimized(struct net_device *dev, int limit) 2349{ 2350 struct fe_priv *np = netdev_priv(dev); 2351 u32 flags; 2352 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 2353 2354 while ((np->get_tx.ex != np->put_tx.ex) && 2355 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) && 2356 (limit-- > 0)) { 2357 2358 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", 2359 dev->name, flags); 2360 2361 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, 2362 np->get_tx_ctx->dma_len, 2363 PCI_DMA_TODEVICE); 2364 np->get_tx_ctx->dma = 0; 2365 2366 if (flags & NV_TX2_LASTPACKET) { 2367 if (!(flags & NV_TX2_ERROR)) 2368 dev->stats.tx_packets++; 2369 else { 2370 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { 2371 if (np->driver_data & DEV_HAS_GEAR_MODE) 2372 nv_gear_backoff_reseed(dev); 2373 else 2374 nv_legacybackoff_reseed(dev); 2375 } 2376 } 2377 2378 dev_kfree_skb_any(np->get_tx_ctx->skb); 2379 np->get_tx_ctx->skb = NULL; 2380 2381 if (np->tx_limit) { 2382 nv_tx_flip_ownership(dev); 2383 } 2384 } 2385 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 2386 np->get_tx.ex = np->first_tx.ex; 2387 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2388 np->get_tx_ctx = np->first_tx_ctx; 2389 } 2390 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { 2391 np->tx_stop = 0; 2392 netif_wake_queue(dev); 2393 } 2394} 2395 2396/* 2397 * nv_tx_timeout: dev->tx_timeout function 2398 * Called with netif_tx_lock held. 2399 */ 2400static void nv_tx_timeout(struct net_device *dev) 2401{ 2402 struct fe_priv *np = netdev_priv(dev); 2403 u8 __iomem *base = get_hwbase(dev); 2404 u32 status; 2405 2406 if (np->msi_flags & NV_MSI_X_ENABLED) 2407 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2408 else 2409 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2410 2411 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); 2412 2413 { 2414 int i; 2415 2416 printk(KERN_INFO "%s: Ring at %lx\n", 2417 dev->name, (unsigned long)np->ring_addr); 2418 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 2419 for (i=0;i<=np->register_size;i+= 32) { 2420 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 2421 i, 2422 readl(base + i + 0), readl(base + i + 4), 2423 readl(base + i + 8), readl(base + i + 12), 2424 readl(base + i + 16), readl(base + i + 20), 2425 readl(base + i + 24), readl(base + i + 28)); 2426 } 2427 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2428 for (i=0;i<np->tx_ring_size;i+= 4) { 2429 if (!nv_optimized(np)) { 2430 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2431 i, 2432 le32_to_cpu(np->tx_ring.orig[i].buf), 2433 le32_to_cpu(np->tx_ring.orig[i].flaglen), 2434 le32_to_cpu(np->tx_ring.orig[i+1].buf), 2435 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), 2436 le32_to_cpu(np->tx_ring.orig[i+2].buf), 2437 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), 2438 le32_to_cpu(np->tx_ring.orig[i+3].buf), 2439 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); 2440 } else { 2441 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 2442 i, 2443 le32_to_cpu(np->tx_ring.ex[i].bufhigh), 2444 le32_to_cpu(np->tx_ring.ex[i].buflow), 2445 le32_to_cpu(np->tx_ring.ex[i].flaglen), 2446 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), 2447 le32_to_cpu(np->tx_ring.ex[i+1].buflow), 2448 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), 2449 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), 2450 le32_to_cpu(np->tx_ring.ex[i+2].buflow), 2451 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), 2452 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), 2453 le32_to_cpu(np->tx_ring.ex[i+3].buflow), 2454 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); 2455 } 2456 } 2457 } 2458 2459 spin_lock_irq(&np->lock); 2460 2461 /* 1) stop tx engine */ 2462 nv_stop_tx(dev); 2463 2464 /* 2) check that the packets were not sent already: */ 2465 if (!nv_optimized(np)) 2466 nv_tx_done(dev); 2467 else 2468 nv_tx_done_optimized(dev, np->tx_ring_size); 2469 2470 /* 3) if there are dead entries: clear everything */ 2471 if (np->get_tx_ctx != np->put_tx_ctx) { 2472 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 2473 nv_drain_tx(dev); 2474 nv_init_tx(dev); 2475 setup_hw_rings(dev, NV_SETUP_TX_RING); 2476 } 2477 2478 netif_wake_queue(dev); 2479 2480 /* 4) restart tx engine */ 2481 nv_start_tx(dev); 2482 spin_unlock_irq(&np->lock); 2483} 2484 2485/* 2486 * Called when the nic notices a mismatch between the actual data len on the 2487 * wire and the len indicated in the 802 header 2488 */ 2489static int nv_getlen(struct net_device *dev, void *packet, int datalen) 2490{ 2491 int hdrlen; /* length of the 802 header */ 2492 int protolen; /* length as stored in the proto field */ 2493 2494 /* 1) calculate len according to header */ 2495 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2496 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 2497 hdrlen = VLAN_HLEN; 2498 } else { 2499 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 2500 hdrlen = ETH_HLEN; 2501 } 2502 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", 2503 dev->name, datalen, protolen, hdrlen); 2504 if (protolen > ETH_DATA_LEN) 2505 return datalen; /* Value in proto field not a len, no checks possible */ 2506 2507 protolen += hdrlen; 2508 /* consistency checks: */ 2509 if (datalen > ETH_ZLEN) { 2510 if (datalen >= protolen) { 2511 /* more data on wire than in 802 header, trim of 2512 * additional data. 2513 */ 2514 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2515 dev->name, protolen); 2516 return protolen; 2517 } else { 2518 /* less data on wire than mentioned in header. 2519 * Discard the packet. 2520 */ 2521 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", 2522 dev->name); 2523 return -1; 2524 } 2525 } else { 2526 /* short packet. Accept only if 802 values are also short */ 2527 if (protolen > ETH_ZLEN) { 2528 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", 2529 dev->name); 2530 return -1; 2531 } 2532 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2533 dev->name, datalen); 2534 return datalen; 2535 } 2536} 2537 2538static int nv_rx_process(struct net_device *dev, int limit) 2539{ 2540 struct fe_priv *np = netdev_priv(dev); 2541 u32 flags; 2542 int rx_work = 0; 2543 struct sk_buff *skb; 2544 int len; 2545 2546 while((np->get_rx.orig != np->put_rx.orig) && 2547 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2548 (rx_work < limit)) { 2549 2550 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", 2551 dev->name, flags); 2552 2553 /* 2554 * the packet is for us - immediately tear down the pci mapping. 2555 * TODO: check if a prefetch of the first cacheline improves 2556 * the performance. 2557 */ 2558 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2559 np->get_rx_ctx->dma_len, 2560 PCI_DMA_FROMDEVICE); 2561 skb = np->get_rx_ctx->skb; 2562 np->get_rx_ctx->skb = NULL; 2563 2564 { 2565 int j; 2566 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2567 for (j=0; j<64; j++) { 2568 if ((j%16) == 0) 2569 dprintk("\n%03x:", j); 2570 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2571 } 2572 dprintk("\n"); 2573 } 2574 /* look at what we actually got: */ 2575 if (np->desc_ver == DESC_VER_1) { 2576 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2577 len = flags & LEN_MASK_V1; 2578 if (unlikely(flags & NV_RX_ERROR)) { 2579 if (flags & NV_RX_ERROR4) { 2580 len = nv_getlen(dev, skb->data, len); 2581 if (len < 0) { 2582 dev->stats.rx_errors++; 2583 dev_kfree_skb(skb); 2584 goto next_pkt; 2585 } 2586 } 2587 /* framing errors are soft errors */ 2588 else if (flags & NV_RX_FRAMINGERR) { 2589 if (flags & NV_RX_SUBSTRACT1) { 2590 len--; 2591 } 2592 } 2593 /* the rest are hard errors */ 2594 else { 2595 if (flags & NV_RX_MISSEDFRAME) 2596 dev->stats.rx_missed_errors++; 2597 if (flags & NV_RX_CRCERR) 2598 dev->stats.rx_crc_errors++; 2599 if (flags & NV_RX_OVERFLOW) 2600 dev->stats.rx_over_errors++; 2601 dev->stats.rx_errors++; 2602 dev_kfree_skb(skb); 2603 goto next_pkt; 2604 } 2605 } 2606 } else { 2607 dev_kfree_skb(skb); 2608 goto next_pkt; 2609 } 2610 } else { 2611 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2612 len = flags & LEN_MASK_V2; 2613 if (unlikely(flags & NV_RX2_ERROR)) { 2614 if (flags & NV_RX2_ERROR4) { 2615 len = nv_getlen(dev, skb->data, len); 2616 if (len < 0) { 2617 dev->stats.rx_errors++; 2618 dev_kfree_skb(skb); 2619 goto next_pkt; 2620 } 2621 } 2622 /* framing errors are soft errors */ 2623 else if (flags & NV_RX2_FRAMINGERR) { 2624 if (flags & NV_RX2_SUBSTRACT1) { 2625 len--; 2626 } 2627 } 2628 /* the rest are hard errors */ 2629 else { 2630 if (flags & NV_RX2_CRCERR) 2631 dev->stats.rx_crc_errors++; 2632 if (flags & NV_RX2_OVERFLOW) 2633 dev->stats.rx_over_errors++; 2634 dev->stats.rx_errors++; 2635 dev_kfree_skb(skb); 2636 goto next_pkt; 2637 } 2638 } 2639 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2640 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2641 skb->ip_summed = CHECKSUM_UNNECESSARY; 2642 } else { 2643 dev_kfree_skb(skb); 2644 goto next_pkt; 2645 } 2646 } 2647 /* got a valid packet - forward it to the network core */ 2648 skb_put(skb, len); 2649 skb->protocol = eth_type_trans(skb, dev); 2650 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", 2651 dev->name, len, skb->protocol); 2652#ifdef CONFIG_FORCEDETH_NAPI 2653 netif_receive_skb(skb); 2654#else 2655 netif_rx(skb); 2656#endif 2657 dev->last_rx = jiffies; 2658 dev->stats.rx_packets++; 2659 dev->stats.rx_bytes += len; 2660next_pkt: 2661 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2662 np->get_rx.orig = np->first_rx.orig; 2663 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2664 np->get_rx_ctx = np->first_rx_ctx; 2665 2666 rx_work++; 2667 } 2668 2669 return rx_work; 2670} 2671 2672static int nv_rx_process_optimized(struct net_device *dev, int limit) 2673{ 2674 struct fe_priv *np = netdev_priv(dev); 2675 u32 flags; 2676 u32 vlanflags = 0; 2677 int rx_work = 0; 2678 struct sk_buff *skb; 2679 int len; 2680 2681 while((np->get_rx.ex != np->put_rx.ex) && 2682 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2683 (rx_work < limit)) { 2684 2685 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", 2686 dev->name, flags); 2687 2688 /* 2689 * the packet is for us - immediately tear down the pci mapping. 2690 * TODO: check if a prefetch of the first cacheline improves 2691 * the performance. 2692 */ 2693 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2694 np->get_rx_ctx->dma_len, 2695 PCI_DMA_FROMDEVICE); 2696 skb = np->get_rx_ctx->skb; 2697 np->get_rx_ctx->skb = NULL; 2698 2699 { 2700 int j; 2701 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2702 for (j=0; j<64; j++) { 2703 if ((j%16) == 0) 2704 dprintk("\n%03x:", j); 2705 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2706 } 2707 dprintk("\n"); 2708 } 2709 /* look at what we actually got: */ 2710 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2711 len = flags & LEN_MASK_V2; 2712 if (unlikely(flags & NV_RX2_ERROR)) { 2713 if (flags & NV_RX2_ERROR4) { 2714 len = nv_getlen(dev, skb->data, len); 2715 if (len < 0) { 2716 dev_kfree_skb(skb); 2717 goto next_pkt; 2718 } 2719 } 2720 /* framing errors are soft errors */ 2721 else if (flags & NV_RX2_FRAMINGERR) { 2722 if (flags & NV_RX2_SUBSTRACT1) { 2723 len--; 2724 } 2725 } 2726 /* the rest are hard errors */ 2727 else { 2728 dev_kfree_skb(skb); 2729 goto next_pkt; 2730 } 2731 } 2732 2733 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2734 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2735 skb->ip_summed = CHECKSUM_UNNECESSARY; 2736 2737 /* got a valid packet - forward it to the network core */ 2738 skb_put(skb, len); 2739 skb->protocol = eth_type_trans(skb, dev); 2740 prefetch(skb->data); 2741 2742 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n", 2743 dev->name, len, skb->protocol); 2744 2745 if (likely(!np->vlangrp)) { 2746#ifdef CONFIG_FORCEDETH_NAPI 2747 netif_receive_skb(skb); 2748#else 2749 netif_rx(skb); 2750#endif 2751 } else { 2752 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2753 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2754#ifdef CONFIG_FORCEDETH_NAPI 2755 vlan_hwaccel_receive_skb(skb, np->vlangrp, 2756 vlanflags & NV_RX3_VLAN_TAG_MASK); 2757#else 2758 vlan_hwaccel_rx(skb, np->vlangrp, 2759 vlanflags & NV_RX3_VLAN_TAG_MASK); 2760#endif 2761 } else { 2762#ifdef CONFIG_FORCEDETH_NAPI 2763 netif_receive_skb(skb); 2764#else 2765 netif_rx(skb); 2766#endif 2767 } 2768 } 2769 2770 dev->last_rx = jiffies; 2771 dev->stats.rx_packets++; 2772 dev->stats.rx_bytes += len; 2773 } else { 2774 dev_kfree_skb(skb); 2775 } 2776next_pkt: 2777 if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) 2778 np->get_rx.ex = np->first_rx.ex; 2779 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2780 np->get_rx_ctx = np->first_rx_ctx; 2781 2782 rx_work++; 2783 } 2784 2785 return rx_work; 2786} 2787 2788static void set_bufsize(struct net_device *dev) 2789{ 2790 struct fe_priv *np = netdev_priv(dev); 2791 2792 if (dev->mtu <= ETH_DATA_LEN) 2793 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; 2794 else 2795 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; 2796} 2797 2798/* 2799 * nv_change_mtu: dev->change_mtu function 2800 * Called with dev_base_lock held for read. 2801 */ 2802static int nv_change_mtu(struct net_device *dev, int new_mtu) 2803{ 2804 struct fe_priv *np = netdev_priv(dev); 2805 int old_mtu; 2806 2807 if (new_mtu < 64 || new_mtu > np->pkt_limit) 2808 return -EINVAL; 2809 2810 old_mtu = dev->mtu; 2811 dev->mtu = new_mtu; 2812 2813 /* return early if the buffer sizes will not change */ 2814 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) 2815 return 0; 2816 if (old_mtu == new_mtu) 2817 return 0; 2818 2819 /* synchronized against open : rtnl_lock() held by caller */ 2820 if (netif_running(dev)) { 2821 u8 __iomem *base = get_hwbase(dev); 2822 /* 2823 * It seems that the nic preloads valid ring entries into an 2824 * internal buffer. The procedure for flushing everything is 2825 * guessed, there is probably a simpler approach. 2826 * Changing the MTU is a rare event, it shouldn't matter. 2827 */ 2828 nv_disable_irq(dev); 2829 netif_tx_lock_bh(dev); 2830 spin_lock(&np->lock); 2831 /* stop engines */ 2832 nv_stop_rxtx(dev); 2833 nv_txrx_reset(dev); 2834 /* drain rx queue */ 2835 nv_drain_rxtx(dev); 2836 /* reinit driver view of the rx queue */ 2837 set_bufsize(dev); 2838 if (nv_init_ring(dev)) { 2839 if (!np->in_shutdown) 2840 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2841 } 2842 /* reinit nic view of the rx queue */ 2843 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 2844 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 2845 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 2846 base + NvRegRingSizes); 2847 pci_push(base); 2848 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2849 pci_push(base); 2850 2851 /* restart rx engine */ 2852 nv_start_rxtx(dev); 2853 spin_unlock(&np->lock); 2854 netif_tx_unlock_bh(dev); 2855 nv_enable_irq(dev); 2856 } 2857 return 0; 2858} 2859 2860static void nv_copy_mac_to_hw(struct net_device *dev) 2861{ 2862 u8 __iomem *base = get_hwbase(dev); 2863 u32 mac[2]; 2864 2865 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 2866 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 2867 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 2868 2869 writel(mac[0], base + NvRegMacAddrA); 2870 writel(mac[1], base + NvRegMacAddrB); 2871} 2872 2873/* 2874 * nv_set_mac_address: dev->set_mac_address function 2875 * Called with rtnl_lock() held. 2876 */ 2877static int nv_set_mac_address(struct net_device *dev, void *addr) 2878{ 2879 struct fe_priv *np = netdev_priv(dev); 2880 struct sockaddr *macaddr = (struct sockaddr*)addr; 2881 2882 if (!is_valid_ether_addr(macaddr->sa_data)) 2883 return -EADDRNOTAVAIL; 2884 2885 /* synchronized against open : rtnl_lock() held by caller */ 2886 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 2887 2888 if (netif_running(dev)) { 2889 netif_tx_lock_bh(dev); 2890 spin_lock_irq(&np->lock); 2891 2892 /* stop rx engine */ 2893 nv_stop_rx(dev); 2894 2895 /* set mac address */ 2896 nv_copy_mac_to_hw(dev); 2897 2898 /* restart rx engine */ 2899 nv_start_rx(dev); 2900 spin_unlock_irq(&np->lock); 2901 netif_tx_unlock_bh(dev); 2902 } else { 2903 nv_copy_mac_to_hw(dev); 2904 } 2905 return 0; 2906} 2907 2908/* 2909 * nv_set_multicast: dev->set_multicast function 2910 * Called with netif_tx_lock held. 2911 */ 2912static void nv_set_multicast(struct net_device *dev) 2913{ 2914 struct fe_priv *np = netdev_priv(dev); 2915 u8 __iomem *base = get_hwbase(dev); 2916 u32 addr[2]; 2917 u32 mask[2]; 2918 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; 2919 2920 memset(addr, 0, sizeof(addr)); 2921 memset(mask, 0, sizeof(mask)); 2922 2923 if (dev->flags & IFF_PROMISC) { 2924 pff |= NVREG_PFF_PROMISC; 2925 } else { 2926 pff |= NVREG_PFF_MYADDR; 2927 2928 if (dev->flags & IFF_ALLMULTI || dev->mc_list) { 2929 u32 alwaysOff[2]; 2930 u32 alwaysOn[2]; 2931 2932 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; 2933 if (dev->flags & IFF_ALLMULTI) { 2934 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 2935 } else { 2936 struct dev_mc_list *walk; 2937 2938 walk = dev->mc_list; 2939 while (walk != NULL) { 2940 u32 a, b; 2941 a = le32_to_cpu(*(__le32 *) walk->dmi_addr); 2942 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4])); 2943 alwaysOn[0] &= a; 2944 alwaysOff[0] &= ~a; 2945 alwaysOn[1] &= b; 2946 alwaysOff[1] &= ~b; 2947 walk = walk->next; 2948 } 2949 } 2950 addr[0] = alwaysOn[0]; 2951 addr[1] = alwaysOn[1]; 2952 mask[0] = alwaysOn[0] | alwaysOff[0]; 2953 mask[1] = alwaysOn[1] | alwaysOff[1]; 2954 } else { 2955 mask[0] = NVREG_MCASTMASKA_NONE; 2956 mask[1] = NVREG_MCASTMASKB_NONE; 2957 } 2958 } 2959 addr[0] |= NVREG_MCASTADDRA_FORCE; 2960 pff |= NVREG_PFF_ALWAYS; 2961 spin_lock_irq(&np->lock); 2962 nv_stop_rx(dev); 2963 writel(addr[0], base + NvRegMulticastAddrA); 2964 writel(addr[1], base + NvRegMulticastAddrB); 2965 writel(mask[0], base + NvRegMulticastMaskA); 2966 writel(mask[1], base + NvRegMulticastMaskB); 2967 writel(pff, base + NvRegPacketFilterFlags); 2968 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", 2969 dev->name); 2970 nv_start_rx(dev); 2971 spin_unlock_irq(&np->lock); 2972} 2973 2974static void nv_update_pause(struct net_device *dev, u32 pause_flags) 2975{ 2976 struct fe_priv *np = netdev_priv(dev); 2977 u8 __iomem *base = get_hwbase(dev); 2978 2979 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); 2980 2981 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { 2982 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; 2983 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { 2984 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); 2985 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2986 } else { 2987 writel(pff, base + NvRegPacketFilterFlags); 2988 } 2989 } 2990 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 2991 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 2992 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 2993 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; 2994 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) 2995 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; 2996 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) 2997 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; 2998 writel(pause_enable, base + NvRegTxPauseFrame); 2999 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 3000 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3001 } else { 3002 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 3003 writel(regmisc, base + NvRegMisc1); 3004 } 3005 } 3006} 3007 3008/** 3009 * nv_update_linkspeed: Setup the MAC according to the link partner 3010 * @dev: Network device to be configured 3011 * 3012 * The function queries the PHY and checks if there is a link partner. 3013 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is 3014 * set to 10 MBit HD. 3015 * 3016 * The function returns 0 if there is no link partner and 1 if there is 3017 * a good link partner. 3018 */ 3019static int nv_update_linkspeed(struct net_device *dev) 3020{ 3021 struct fe_priv *np = netdev_priv(dev); 3022 u8 __iomem *base = get_hwbase(dev); 3023 int adv = 0; 3024 int lpa = 0; 3025 int adv_lpa, adv_pause, lpa_pause; 3026 int newls = np->linkspeed; 3027 int newdup = np->duplex; 3028 int mii_status; 3029 int retval = 0; 3030 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 3031 u32 txrxFlags = 0; 3032 u32 phy_exp; 3033 3034 /* BMSR_LSTATUS is latched, read it twice: 3035 * we want the current value. 3036 */ 3037 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3038 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3039 3040 if (!(mii_status & BMSR_LSTATUS)) { 3041 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", 3042 dev->name); 3043 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3044 newdup = 0; 3045 retval = 0; 3046 goto set_speed; 3047 } 3048 3049 if (np->autoneg == 0) { 3050 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", 3051 dev->name, np->fixed_mode); 3052 if (np->fixed_mode & LPA_100FULL) { 3053 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3054 newdup = 1; 3055 } else if (np->fixed_mode & LPA_100HALF) { 3056 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3057 newdup = 0; 3058 } else if (np->fixed_mode & LPA_10FULL) { 3059 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3060 newdup = 1; 3061 } else { 3062 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3063 newdup = 0; 3064 } 3065 retval = 1; 3066 goto set_speed; 3067 } 3068 /* check auto negotiation is complete */ 3069 if (!(mii_status & BMSR_ANEGCOMPLETE)) { 3070 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ 3071 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3072 newdup = 0; 3073 retval = 0; 3074 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); 3075 goto set_speed; 3076 } 3077 3078 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3079 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 3080 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", 3081 dev->name, adv, lpa); 3082 3083 retval = 1; 3084 if (np->gigabit == PHY_GIGABIT) { 3085 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3086 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); 3087 3088 if ((control_1000 & ADVERTISE_1000FULL) && 3089 (status_1000 & LPA_1000FULL)) { 3090 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", 3091 dev->name); 3092 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 3093 newdup = 1; 3094 goto set_speed; 3095 } 3096 } 3097 3098 /* FIXME: handle parallel detection properly */ 3099 adv_lpa = lpa & adv; 3100 if (adv_lpa & LPA_100FULL) { 3101 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3102 newdup = 1; 3103 } else if (adv_lpa & LPA_100HALF) { 3104 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3105 newdup = 0; 3106 } else if (adv_lpa & LPA_10FULL) { 3107 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3108 newdup = 1; 3109 } else if (adv_lpa & LPA_10HALF) { 3110 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3111 newdup = 0; 3112 } else { 3113 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); 3114 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3115 newdup = 0; 3116 } 3117 3118set_speed: 3119 if (np->duplex == newdup && np->linkspeed == newls) 3120 return retval; 3121 3122 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", 3123 dev->name, np->linkspeed, np->duplex, newls, newdup); 3124 3125 np->duplex = newdup; 3126 np->linkspeed = newls; 3127 3128 /* The transmitter and receiver must be restarted for safe update */ 3129 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) { 3130 txrxFlags |= NV_RESTART_TX; 3131 nv_stop_tx(dev); 3132 } 3133 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { 3134 txrxFlags |= NV_RESTART_RX; 3135 nv_stop_rx(dev); 3136 } 3137 3138 if (np->gigabit == PHY_GIGABIT) { 3139 phyreg = readl(base + NvRegSlotTime); 3140 phyreg &= ~(0x3FF00); 3141 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) || 3142 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)) 3143 phyreg |= NVREG_SLOTTIME_10_100_FULL; 3144 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 3145 phyreg |= NVREG_SLOTTIME_1000_FULL; 3146 writel(phyreg, base + NvRegSlotTime); 3147 } 3148 3149 phyreg = readl(base + NvRegPhyInterface); 3150 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 3151 if (np->duplex == 0) 3152 phyreg |= PHY_HALF; 3153 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 3154 phyreg |= PHY_100; 3155 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3156 phyreg |= PHY_1000; 3157 writel(phyreg, base + NvRegPhyInterface); 3158 3159 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ 3160 if (phyreg & PHY_RGMII) { 3161 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { 3162 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 3163 } else { 3164 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { 3165 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) 3166 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10; 3167 else 3168 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100; 3169 } else { 3170 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 3171 } 3172 } 3173 } else { 3174 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) 3175 txreg = NVREG_TX_DEFERRAL_MII_STRETCH; 3176 else 3177 txreg = NVREG_TX_DEFERRAL_DEFAULT; 3178 } 3179 writel(txreg, base + NvRegTxDeferral); 3180 3181 if (np->desc_ver == DESC_VER_1) { 3182 txreg = NVREG_TX_WM_DESC1_DEFAULT; 3183 } else { 3184 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3185 txreg = NVREG_TX_WM_DESC2_3_1000; 3186 else 3187 txreg = NVREG_TX_WM_DESC2_3_DEFAULT; 3188 } 3189 writel(txreg, base + NvRegTxWatermark); 3190 3191 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 3192 base + NvRegMisc1); 3193 pci_push(base); 3194 writel(np->linkspeed, base + NvRegLinkSpeed); 3195 pci_push(base); 3196 3197 pause_flags = 0; 3198 /* setup pause frame */ 3199 if (np->duplex != 0) { 3200 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3201 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 3202 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 3203 3204 switch (adv_pause) { 3205 case ADVERTISE_PAUSE_CAP: 3206 if (lpa_pause & LPA_PAUSE_CAP) { 3207 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3208 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3209 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3210 } 3211 break; 3212 case ADVERTISE_PAUSE_ASYM: 3213 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 3214 { 3215 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3216 } 3217 break; 3218 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: 3219 if (lpa_pause & LPA_PAUSE_CAP) 3220 { 3221 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3222 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3223 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3224 } 3225 if (lpa_pause == LPA_PAUSE_ASYM) 3226 { 3227 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3228 } 3229 break; 3230 } 3231 } else { 3232 pause_flags = np->pause_flags; 3233 } 3234 } 3235 nv_update_pause(dev, pause_flags); 3236 3237 if (txrxFlags & NV_RESTART_TX) 3238 nv_start_tx(dev); 3239 if (txrxFlags & NV_RESTART_RX) 3240 nv_start_rx(dev); 3241 3242 return retval; 3243} 3244 3245static void nv_linkchange(struct net_device *dev) 3246{ 3247 if (nv_update_linkspeed(dev)) { 3248 if (!netif_carrier_ok(dev)) { 3249 netif_carrier_on(dev); 3250 printk(KERN_INFO "%s: link up.\n", dev->name); 3251 nv_start_rx(dev); 3252 } 3253 } else { 3254 if (netif_carrier_ok(dev)) { 3255 netif_carrier_off(dev); 3256 printk(KERN_INFO "%s: link down.\n", dev->name); 3257 nv_stop_rx(dev); 3258 } 3259 } 3260} 3261 3262static void nv_link_irq(struct net_device *dev) 3263{ 3264 u8 __iomem *base = get_hwbase(dev); 3265 u32 miistat; 3266 3267 miistat = readl(base + NvRegMIIStatus); 3268 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus); 3269 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); 3270 3271 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 3272 nv_linkchange(dev); 3273 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); 3274} 3275 3276static void nv_msi_workaround(struct fe_priv *np) 3277{ 3278 3279 /* Need to toggle the msi irq mask within the ethernet device, 3280 * otherwise, future interrupts will not be detected. 3281 */ 3282 if (np->msi_flags & NV_MSI_ENABLED) { 3283 u8 __iomem *base = np->base; 3284 3285 writel(0, base + NvRegMSIIrqMask); 3286 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 3287 } 3288} 3289 3290static irqreturn_t nv_nic_irq(int foo, void *data) 3291{ 3292 struct net_device *dev = (struct net_device *) data; 3293 struct fe_priv *np = netdev_priv(dev); 3294 u8 __iomem *base = get_hwbase(dev); 3295 u32 events; 3296 int i; 3297 3298 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 3299 3300 for (i=0; ; i++) { 3301 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3302 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3303 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3304 } else { 3305 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3306 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 3307 } 3308 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3309 if (!(events & np->irqmask)) 3310 break; 3311 3312 nv_msi_workaround(np); 3313 3314 spin_lock(&np->lock); 3315 nv_tx_done(dev); 3316 spin_unlock(&np->lock); 3317 3318#ifdef CONFIG_FORCEDETH_NAPI 3319 if (events & NVREG_IRQ_RX_ALL) { 3320 netif_rx_schedule(dev, &np->napi); 3321 3322 /* Disable furthur receive irq's */ 3323 spin_lock(&np->lock); 3324 np->irqmask &= ~NVREG_IRQ_RX_ALL; 3325 3326 if (np->msi_flags & NV_MSI_X_ENABLED) 3327 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3328 else 3329 writel(np->irqmask, base + NvRegIrqMask); 3330 spin_unlock(&np->lock); 3331 } 3332#else 3333 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) { 3334 if (unlikely(nv_alloc_rx(dev))) { 3335 spin_lock(&np->lock); 3336 if (!np->in_shutdown) 3337 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3338 spin_unlock(&np->lock); 3339 } 3340 } 3341#endif 3342 if (unlikely(events & NVREG_IRQ_LINK)) { 3343 spin_lock(&np->lock); 3344 nv_link_irq(dev); 3345 spin_unlock(&np->lock); 3346 } 3347 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3348 spin_lock(&np->lock); 3349 nv_linkchange(dev); 3350 spin_unlock(&np->lock); 3351 np->link_timeout = jiffies + LINK_TIMEOUT; 3352 } 3353 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 3354 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3355 dev->name, events); 3356 } 3357 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { 3358 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3359 dev->name, events); 3360 } 3361 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { 3362 spin_lock(&np->lock); 3363 /* disable interrupts on the nic */ 3364 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3365 writel(0, base + NvRegIrqMask); 3366 else 3367 writel(np->irqmask, base + NvRegIrqMask); 3368 pci_push(base); 3369 3370 if (!np->in_shutdown) { 3371 np->nic_poll_irq = np->irqmask; 3372 np->recover_error = 1; 3373 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3374 } 3375 spin_unlock(&np->lock); 3376 break; 3377 } 3378 if (unlikely(i > max_interrupt_work)) { 3379 spin_lock(&np->lock); 3380 /* disable interrupts on the nic */ 3381 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3382 writel(0, base + NvRegIrqMask); 3383 else 3384 writel(np->irqmask, base + NvRegIrqMask); 3385 pci_push(base); 3386 3387 if (!np->in_shutdown) { 3388 np->nic_poll_irq = np->irqmask; 3389 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3390 } 3391 spin_unlock(&np->lock); 3392 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 3393 break; 3394 } 3395 3396 } 3397 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3398 3399 return IRQ_RETVAL(i); 3400} 3401 3402/** 3403 * All _optimized functions are used to help increase performance 3404 * (reduce CPU and increase throughput). They use descripter version 3, 3405 * compiler directives, and reduce memory accesses. 3406 */ 3407static irqreturn_t nv_nic_irq_optimized(int foo, void *data) 3408{ 3409 struct net_device *dev = (struct net_device *) data; 3410 struct fe_priv *np = netdev_priv(dev); 3411 u8 __iomem *base = get_hwbase(dev); 3412 u32 events; 3413 int i; 3414 3415 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); 3416 3417 for (i=0; ; i++) { 3418 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3419 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3420 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3421 } else { 3422 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3423 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 3424 } 3425 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3426 if (!(events & np->irqmask)) 3427 break; 3428 3429 nv_msi_workaround(np); 3430 3431 spin_lock(&np->lock); 3432 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3433 spin_unlock(&np->lock); 3434 3435#ifdef CONFIG_FORCEDETH_NAPI 3436 if (events & NVREG_IRQ_RX_ALL) { 3437 netif_rx_schedule(dev, &np->napi); 3438 3439 /* Disable furthur receive irq's */ 3440 spin_lock(&np->lock); 3441 np->irqmask &= ~NVREG_IRQ_RX_ALL; 3442 3443 if (np->msi_flags & NV_MSI_X_ENABLED) 3444 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3445 else 3446 writel(np->irqmask, base + NvRegIrqMask); 3447 spin_unlock(&np->lock); 3448 } 3449#else 3450 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3451 if (unlikely(nv_alloc_rx_optimized(dev))) { 3452 spin_lock(&np->lock); 3453 if (!np->in_shutdown) 3454 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3455 spin_unlock(&np->lock); 3456 } 3457 } 3458#endif 3459 if (unlikely(events & NVREG_IRQ_LINK)) { 3460 spin_lock(&np->lock); 3461 nv_link_irq(dev); 3462 spin_unlock(&np->lock); 3463 } 3464 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3465 spin_lock(&np->lock); 3466 nv_linkchange(dev); 3467 spin_unlock(&np->lock); 3468 np->link_timeout = jiffies + LINK_TIMEOUT; 3469 } 3470 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 3471 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3472 dev->name, events); 3473 } 3474 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { 3475 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3476 dev->name, events); 3477 } 3478 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { 3479 spin_lock(&np->lock); 3480 /* disable interrupts on the nic */ 3481 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3482 writel(0, base + NvRegIrqMask); 3483 else 3484 writel(np->irqmask, base + NvRegIrqMask); 3485 pci_push(base); 3486 3487 if (!np->in_shutdown) { 3488 np->nic_poll_irq = np->irqmask; 3489 np->recover_error = 1; 3490 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3491 } 3492 spin_unlock(&np->lock); 3493 break; 3494 } 3495 3496 if (unlikely(i > max_interrupt_work)) { 3497 spin_lock(&np->lock); 3498 /* disable interrupts on the nic */ 3499 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3500 writel(0, base + NvRegIrqMask); 3501 else 3502 writel(np->irqmask, base + NvRegIrqMask); 3503 pci_push(base); 3504 3505 if (!np->in_shutdown) { 3506 np->nic_poll_irq = np->irqmask; 3507 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3508 } 3509 spin_unlock(&np->lock); 3510 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 3511 break; 3512 } 3513 3514 } 3515 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); 3516 3517 return IRQ_RETVAL(i); 3518} 3519 3520static irqreturn_t nv_nic_irq_tx(int foo, void *data) 3521{ 3522 struct net_device *dev = (struct net_device *) data; 3523 struct fe_priv *np = netdev_priv(dev); 3524 u8 __iomem *base = get_hwbase(dev); 3525 u32 events; 3526 int i; 3527 unsigned long flags; 3528 3529 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 3530 3531 for (i=0; ; i++) { 3532 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3533 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3534 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 3535 if (!(events & np->irqmask)) 3536 break; 3537 3538 spin_lock_irqsave(&np->lock, flags); 3539 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3540 spin_unlock_irqrestore(&np->lock, flags); 3541 3542 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 3543 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3544 dev->name, events); 3545 } 3546 if (unlikely(i > max_interrupt_work)) { 3547 spin_lock_irqsave(&np->lock, flags); 3548 /* disable interrupts on the nic */ 3549 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 3550 pci_push(base); 3551 3552 if (!np->in_shutdown) { 3553 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; 3554 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3555 } 3556 spin_unlock_irqrestore(&np->lock, flags); 3557 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 3558 break; 3559 } 3560 3561 } 3562 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); 3563 3564 return IRQ_RETVAL(i); 3565} 3566 3567#ifdef CONFIG_FORCEDETH_NAPI 3568static int nv_napi_poll(struct napi_struct *napi, int budget) 3569{ 3570 struct fe_priv *np = container_of(napi, struct fe_priv, napi); 3571 struct net_device *dev = np->dev; 3572 u8 __iomem *base = get_hwbase(dev); 3573 unsigned long flags; 3574 int pkts, retcode; 3575 3576 if (!nv_optimized(np)) { 3577 pkts = nv_rx_process(dev, budget); 3578 retcode = nv_alloc_rx(dev); 3579 } else { 3580 pkts = nv_rx_process_optimized(dev, budget); 3581 retcode = nv_alloc_rx_optimized(dev); 3582 } 3583 3584 if (retcode) { 3585 spin_lock_irqsave(&np->lock, flags); 3586 if (!np->in_shutdown) 3587 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3588 spin_unlock_irqrestore(&np->lock, flags); 3589 } 3590 3591 if (pkts < budget) { 3592 /* re-enable receive interrupts */ 3593 spin_lock_irqsave(&np->lock, flags); 3594 3595 __netif_rx_complete(dev, napi); 3596 3597 np->irqmask |= NVREG_IRQ_RX_ALL; 3598 if (np->msi_flags & NV_MSI_X_ENABLED) 3599 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3600 else 3601 writel(np->irqmask, base + NvRegIrqMask); 3602 3603 spin_unlock_irqrestore(&np->lock, flags); 3604 } 3605 return pkts; 3606} 3607#endif 3608 3609#ifdef CONFIG_FORCEDETH_NAPI 3610static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3611{ 3612 struct net_device *dev = (struct net_device *) data; 3613 struct fe_priv *np = netdev_priv(dev); 3614 u8 __iomem *base = get_hwbase(dev); 3615 u32 events; 3616 3617 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3618 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3619 3620 if (events) { 3621 netif_rx_schedule(dev, &np->napi); 3622 /* disable receive interrupts on the nic */ 3623 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3624 pci_push(base); 3625 } 3626 return IRQ_HANDLED; 3627} 3628#else 3629static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3630{ 3631 struct net_device *dev = (struct net_device *) data; 3632 struct fe_priv *np = netdev_priv(dev); 3633 u8 __iomem *base = get_hwbase(dev); 3634 u32 events; 3635 int i; 3636 unsigned long flags; 3637 3638 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 3639 3640 for (i=0; ; i++) { 3641 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3642 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3643 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 3644 if (!(events & np->irqmask)) 3645 break; 3646 3647 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3648 if (unlikely(nv_alloc_rx_optimized(dev))) { 3649 spin_lock_irqsave(&np->lock, flags); 3650 if (!np->in_shutdown) 3651 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3652 spin_unlock_irqrestore(&np->lock, flags); 3653 } 3654 } 3655 3656 if (unlikely(i > max_interrupt_work)) { 3657 spin_lock_irqsave(&np->lock, flags); 3658 /* disable interrupts on the nic */ 3659 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3660 pci_push(base); 3661 3662 if (!np->in_shutdown) { 3663 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; 3664 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3665 } 3666 spin_unlock_irqrestore(&np->lock, flags); 3667 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 3668 break; 3669 } 3670 } 3671 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 3672 3673 return IRQ_RETVAL(i); 3674} 3675#endif 3676 3677static irqreturn_t nv_nic_irq_other(int foo, void *data) 3678{ 3679 struct net_device *dev = (struct net_device *) data; 3680 struct fe_priv *np = netdev_priv(dev); 3681 u8 __iomem *base = get_hwbase(dev); 3682 u32 events; 3683 int i; 3684 unsigned long flags; 3685 3686 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 3687 3688 for (i=0; ; i++) { 3689 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3690 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3691 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3692 if (!(events & np->irqmask)) 3693 break; 3694 3695 /* check tx in case we reached max loop limit in tx isr */ 3696 spin_lock_irqsave(&np->lock, flags); 3697 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3698 spin_unlock_irqrestore(&np->lock, flags); 3699 3700 if (events & NVREG_IRQ_LINK) { 3701 spin_lock_irqsave(&np->lock, flags); 3702 nv_link_irq(dev); 3703 spin_unlock_irqrestore(&np->lock, flags); 3704 } 3705 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 3706 spin_lock_irqsave(&np->lock, flags); 3707 nv_linkchange(dev); 3708 spin_unlock_irqrestore(&np->lock, flags); 3709 np->link_timeout = jiffies + LINK_TIMEOUT; 3710 } 3711 if (events & NVREG_IRQ_RECOVER_ERROR) { 3712 spin_lock_irq(&np->lock); 3713 /* disable interrupts on the nic */ 3714 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3715 pci_push(base); 3716 3717 if (!np->in_shutdown) { 3718 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3719 np->recover_error = 1; 3720 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3721 } 3722 spin_unlock_irq(&np->lock); 3723 break; 3724 } 3725 if (events & (NVREG_IRQ_UNKNOWN)) { 3726 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3727 dev->name, events); 3728 } 3729 if (unlikely(i > max_interrupt_work)) { 3730 spin_lock_irqsave(&np->lock, flags); 3731 /* disable interrupts on the nic */ 3732 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3733 pci_push(base); 3734 3735 if (!np->in_shutdown) { 3736 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3737 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3738 } 3739 spin_unlock_irqrestore(&np->lock, flags); 3740 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 3741 break; 3742 } 3743 3744 } 3745 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); 3746 3747 return IRQ_RETVAL(i); 3748} 3749 3750static irqreturn_t nv_nic_irq_test(int foo, void *data) 3751{ 3752 struct net_device *dev = (struct net_device *) data; 3753 struct fe_priv *np = netdev_priv(dev); 3754 u8 __iomem *base = get_hwbase(dev); 3755 u32 events; 3756 3757 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); 3758 3759 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3760 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3761 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3762 } else { 3763 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3764 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3765 } 3766 pci_push(base); 3767 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3768 if (!(events & NVREG_IRQ_TIMER)) 3769 return IRQ_RETVAL(0); 3770 3771 nv_msi_workaround(np); 3772 3773 spin_lock(&np->lock); 3774 np->intr_test = 1; 3775 spin_unlock(&np->lock); 3776 3777 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); 3778 3779 return IRQ_RETVAL(1); 3780} 3781 3782static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) 3783{ 3784 u8 __iomem *base = get_hwbase(dev); 3785 int i; 3786 u32 msixmap = 0; 3787 3788 /* Each interrupt bit can be mapped to a MSIX vector (4 bits). 3789 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents 3790 * the remaining 8 interrupts. 3791 */ 3792 for (i = 0; i < 8; i++) { 3793 if ((irqmask >> i) & 0x1) { 3794 msixmap |= vector << (i << 2); 3795 } 3796 } 3797 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3798 3799 msixmap = 0; 3800 for (i = 0; i < 8; i++) { 3801 if ((irqmask >> (i + 8)) & 0x1) { 3802 msixmap |= vector << (i << 2); 3803 } 3804 } 3805 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3806} 3807 3808static int nv_request_irq(struct net_device *dev, int intr_test) 3809{ 3810 struct fe_priv *np = get_nvpriv(dev); 3811 u8 __iomem *base = get_hwbase(dev); 3812 int ret = 1; 3813 int i; 3814 irqreturn_t (*handler)(int foo, void *data); 3815 3816 if (intr_test) { 3817 handler = nv_nic_irq_test; 3818 } else { 3819 if (nv_optimized(np)) 3820 handler = nv_nic_irq_optimized; 3821 else 3822 handler = nv_nic_irq; 3823 } 3824 3825 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3826 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3827 np->msi_x_entry[i].entry = i; 3828 } 3829 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 3830 np->msi_flags |= NV_MSI_X_ENABLED; 3831 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3832 /* Request irq for rx handling */ 3833 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) { 3834 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 3835 pci_disable_msix(np->pci_dev); 3836 np->msi_flags &= ~NV_MSI_X_ENABLED; 3837 goto out_err; 3838 } 3839 /* Request irq for tx handling */ 3840 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) { 3841 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 3842 pci_disable_msix(np->pci_dev); 3843 np->msi_flags &= ~NV_MSI_X_ENABLED; 3844 goto out_free_rx; 3845 } 3846 /* Request irq for link and timer handling */ 3847 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) { 3848 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 3849 pci_disable_msix(np->pci_dev); 3850 np->msi_flags &= ~NV_MSI_X_ENABLED; 3851 goto out_free_tx; 3852 } 3853 /* map interrupts to their respective vector */ 3854 writel(0, base + NvRegMSIXMap0); 3855 writel(0, base + NvRegMSIXMap1); 3856 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); 3857 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); 3858 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 3859 } else { 3860 /* Request irq for all interrupts */ 3861 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { 3862 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3863 pci_disable_msix(np->pci_dev); 3864 np->msi_flags &= ~NV_MSI_X_ENABLED; 3865 goto out_err; 3866 } 3867 3868 /* map interrupts to vector 0 */ 3869 writel(0, base + NvRegMSIXMap0); 3870 writel(0, base + NvRegMSIXMap1); 3871 } 3872 } 3873 } 3874 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3875 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3876 np->msi_flags |= NV_MSI_ENABLED; 3877 dev->irq = np->pci_dev->irq; 3878 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3879 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3880 pci_disable_msi(np->pci_dev); 3881 np->msi_flags &= ~NV_MSI_ENABLED; 3882 dev->irq = np->pci_dev->irq; 3883 goto out_err; 3884 } 3885 3886 /* map interrupts to vector 0 */ 3887 writel(0, base + NvRegMSIMap0); 3888 writel(0, base + NvRegMSIMap1); 3889 /* enable msi vector 0 */ 3890 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 3891 } 3892 } 3893 if (ret != 0) { 3894 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) 3895 goto out_err; 3896 3897 } 3898 3899 return 0; 3900out_free_tx: 3901 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); 3902out_free_rx: 3903 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); 3904out_err: 3905 return 1; 3906} 3907 3908static void nv_free_irq(struct net_device *dev) 3909{ 3910 struct fe_priv *np = get_nvpriv(dev); 3911 int i; 3912 3913 if (np->msi_flags & NV_MSI_X_ENABLED) { 3914 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3915 free_irq(np->msi_x_entry[i].vector, dev); 3916 } 3917 pci_disable_msix(np->pci_dev); 3918 np->msi_flags &= ~NV_MSI_X_ENABLED; 3919 } else { 3920 free_irq(np->pci_dev->irq, dev); 3921 if (np->msi_flags & NV_MSI_ENABLED) { 3922 pci_disable_msi(np->pci_dev); 3923 np->msi_flags &= ~NV_MSI_ENABLED; 3924 } 3925 } 3926} 3927 3928static void nv_do_nic_poll(unsigned long data) 3929{ 3930 struct net_device *dev = (struct net_device *) data; 3931 struct fe_priv *np = netdev_priv(dev); 3932 u8 __iomem *base = get_hwbase(dev); 3933 u32 mask = 0; 3934 3935 /* 3936 * First disable irq(s) and then 3937 * reenable interrupts on the nic, we have to do this before calling 3938 * nv_nic_irq because that may decide to do otherwise 3939 */ 3940 3941 if (!using_multi_irqs(dev)) { 3942 if (np->msi_flags & NV_MSI_X_ENABLED) 3943 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 3944 else 3945 disable_irq_lockdep(np->pci_dev->irq); 3946 mask = np->irqmask; 3947 } else { 3948 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 3949 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 3950 mask |= NVREG_IRQ_RX_ALL; 3951 } 3952 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 3953 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 3954 mask |= NVREG_IRQ_TX_ALL; 3955 } 3956 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 3957 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 3958 mask |= NVREG_IRQ_OTHER; 3959 } 3960 } 3961 np->nic_poll_irq = 0; 3962 3963 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */ 3964 3965 if (np->recover_error) { 3966 np->recover_error = 0; 3967 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n"); 3968 if (netif_running(dev)) { 3969 netif_tx_lock_bh(dev); 3970 spin_lock(&np->lock); 3971 /* stop engines */ 3972 nv_stop_rxtx(dev); 3973 nv_txrx_reset(dev); 3974 /* drain rx queue */ 3975 nv_drain_rxtx(dev); 3976 /* reinit driver view of the rx queue */ 3977 set_bufsize(dev); 3978 if (nv_init_ring(dev)) { 3979 if (!np->in_shutdown) 3980 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3981 } 3982 /* reinit nic view of the rx queue */ 3983 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3984 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3985 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3986 base + NvRegRingSizes); 3987 pci_push(base); 3988 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3989 pci_push(base); 3990 3991 /* restart rx engine */ 3992 nv_start_rxtx(dev); 3993 spin_unlock(&np->lock); 3994 netif_tx_unlock_bh(dev); 3995 } 3996 } 3997 3998 3999 writel(mask, base + NvRegIrqMask); 4000 pci_push(base); 4001 4002 if (!using_multi_irqs(dev)) { 4003 if (nv_optimized(np)) 4004 nv_nic_irq_optimized(0, dev); 4005 else 4006 nv_nic_irq(0, dev); 4007 if (np->msi_flags & NV_MSI_X_ENABLED) 4008 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 4009 else 4010 enable_irq_lockdep(np->pci_dev->irq); 4011 } else { 4012 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4013 nv_nic_irq_rx(0, dev); 4014 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 4015 } 4016 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4017 nv_nic_irq_tx(0, dev); 4018 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 4019 } 4020 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4021 nv_nic_irq_other(0, dev); 4022 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 4023 } 4024 } 4025} 4026 4027#ifdef CONFIG_NET_POLL_CONTROLLER 4028static void nv_poll_controller(struct net_device *dev) 4029{ 4030 nv_do_nic_poll((unsigned long) dev); 4031} 4032#endif 4033 4034static void nv_do_stats_poll(unsigned long data) 4035{ 4036 struct net_device *dev = (struct net_device *) data; 4037 struct fe_priv *np = netdev_priv(dev); 4038 4039 nv_get_hw_stats(dev); 4040 4041 if (!np->in_shutdown) 4042 mod_timer(&np->stats_poll, 4043 round_jiffies(jiffies + STATS_INTERVAL)); 4044} 4045 4046static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 4047{ 4048 struct fe_priv *np = netdev_priv(dev); 4049 strcpy(info->driver, DRV_NAME); 4050 strcpy(info->version, FORCEDETH_VERSION); 4051 strcpy(info->bus_info, pci_name(np->pci_dev)); 4052} 4053 4054static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4055{ 4056 struct fe_priv *np = netdev_priv(dev); 4057 wolinfo->supported = WAKE_MAGIC; 4058 4059 spin_lock_irq(&np->lock); 4060 if (np->wolenabled) 4061 wolinfo->wolopts = WAKE_MAGIC; 4062 spin_unlock_irq(&np->lock); 4063} 4064 4065static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4066{ 4067 struct fe_priv *np = netdev_priv(dev); 4068 u8 __iomem *base = get_hwbase(dev); 4069 u32 flags = 0; 4070 4071 if (wolinfo->wolopts == 0) { 4072 np->wolenabled = 0; 4073 } else if (wolinfo->wolopts & WAKE_MAGIC) { 4074 np->wolenabled = 1; 4075 flags = NVREG_WAKEUPFLAGS_ENABLE; 4076 } 4077 if (netif_running(dev)) { 4078 spin_lock_irq(&np->lock); 4079 writel(flags, base + NvRegWakeUpFlags); 4080 spin_unlock_irq(&np->lock); 4081 } 4082 return 0; 4083} 4084 4085static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 4086{ 4087 struct fe_priv *np = netdev_priv(dev); 4088 int adv; 4089 4090 spin_lock_irq(&np->lock); 4091 ecmd->port = PORT_MII; 4092 if (!netif_running(dev)) { 4093 /* We do not track link speed / duplex setting if the 4094 * interface is disabled. Force a link check */ 4095 if (nv_update_linkspeed(dev)) { 4096 if (!netif_carrier_ok(dev)) 4097 netif_carrier_on(dev); 4098 } else { 4099 if (netif_carrier_ok(dev)) 4100 netif_carrier_off(dev); 4101 } 4102 } 4103 4104 if (netif_carrier_ok(dev)) { 4105 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 4106 case NVREG_LINKSPEED_10: 4107 ecmd->speed = SPEED_10; 4108 break; 4109 case NVREG_LINKSPEED_100: 4110 ecmd->speed = SPEED_100; 4111 break; 4112 case NVREG_LINKSPEED_1000: 4113 ecmd->speed = SPEED_1000; 4114 break; 4115 } 4116 ecmd->duplex = DUPLEX_HALF; 4117 if (np->duplex) 4118 ecmd->duplex = DUPLEX_FULL; 4119 } else { 4120 ecmd->speed = -1; 4121 ecmd->duplex = -1; 4122 } 4123 4124 ecmd->autoneg = np->autoneg; 4125 4126 ecmd->advertising = ADVERTISED_MII; 4127 if (np->autoneg) { 4128 ecmd->advertising |= ADVERTISED_Autoneg; 4129 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4130 if (adv & ADVERTISE_10HALF) 4131 ecmd->advertising |= ADVERTISED_10baseT_Half; 4132 if (adv & ADVERTISE_10FULL) 4133 ecmd->advertising |= ADVERTISED_10baseT_Full; 4134 if (adv & ADVERTISE_100HALF) 4135 ecmd->advertising |= ADVERTISED_100baseT_Half; 4136 if (adv & ADVERTISE_100FULL) 4137 ecmd->advertising |= ADVERTISED_100baseT_Full; 4138 if (np->gigabit == PHY_GIGABIT) { 4139 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4140 if (adv & ADVERTISE_1000FULL) 4141 ecmd->advertising |= ADVERTISED_1000baseT_Full; 4142 } 4143 } 4144 ecmd->supported = (SUPPORTED_Autoneg | 4145 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 4146 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 4147 SUPPORTED_MII); 4148 if (np->gigabit == PHY_GIGABIT) 4149 ecmd->supported |= SUPPORTED_1000baseT_Full; 4150 4151 ecmd->phy_address = np->phyaddr; 4152 ecmd->transceiver = XCVR_EXTERNAL; 4153 4154 /* ignore maxtxpkt, maxrxpkt for now */ 4155 spin_unlock_irq(&np->lock); 4156 return 0; 4157} 4158 4159static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 4160{ 4161 struct fe_priv *np = netdev_priv(dev); 4162 4163 if (ecmd->port != PORT_MII) 4164 return -EINVAL; 4165 if (ecmd->transceiver != XCVR_EXTERNAL) 4166 return -EINVAL; 4167 if (ecmd->phy_address != np->phyaddr) { 4168 /* TODO: support switching between multiple phys. Should be 4169 * trivial, but not enabled due to lack of test hardware. */ 4170 return -EINVAL; 4171 } 4172 if (ecmd->autoneg == AUTONEG_ENABLE) { 4173 u32 mask; 4174 4175 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 4176 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 4177 if (np->gigabit == PHY_GIGABIT) 4178 mask |= ADVERTISED_1000baseT_Full; 4179 4180 if ((ecmd->advertising & mask) == 0) 4181 return -EINVAL; 4182 4183 } else if (ecmd->autoneg == AUTONEG_DISABLE) { 4184 /* Note: autonegotiation disable, speed 1000 intentionally 4185 * forbidden - noone should need that. */ 4186 4187 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) 4188 return -EINVAL; 4189 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 4190 return -EINVAL; 4191 } else { 4192 return -EINVAL; 4193 } 4194 4195 netif_carrier_off(dev); 4196 if (netif_running(dev)) { 4197 nv_disable_irq(dev); 4198 netif_tx_lock_bh(dev); 4199 spin_lock(&np->lock); 4200 /* stop engines */ 4201 nv_stop_rxtx(dev); 4202 spin_unlock(&np->lock); 4203 netif_tx_unlock_bh(dev); 4204 } 4205 4206 if (ecmd->autoneg == AUTONEG_ENABLE) { 4207 int adv, bmcr; 4208 4209 np->autoneg = 1; 4210 4211 /* advertise only what has been requested */ 4212 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4213 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4214 if (ecmd->advertising & ADVERTISED_10baseT_Half) 4215 adv |= ADVERTISE_10HALF; 4216 if (ecmd->advertising & ADVERTISED_10baseT_Full) 4217 adv |= ADVERTISE_10FULL; 4218 if (ecmd->advertising & ADVERTISED_100baseT_Half) 4219 adv |= ADVERTISE_100HALF; 4220 if (ecmd->advertising & ADVERTISED_100baseT_Full) 4221 adv |= ADVERTISE_100FULL; 4222 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 4223 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4224 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4225 adv |= ADVERTISE_PAUSE_ASYM; 4226 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4227 4228 if (np->gigabit == PHY_GIGABIT) { 4229 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4230 adv &= ~ADVERTISE_1000FULL; 4231 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 4232 adv |= ADVERTISE_1000FULL; 4233 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4234 } 4235 4236 if (netif_running(dev)) 4237 printk(KERN_INFO "%s: link down.\n", dev->name); 4238 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4239 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4240 bmcr |= BMCR_ANENABLE; 4241 /* reset the phy in order for settings to stick, 4242 * and cause autoneg to start */ 4243 if (phy_reset(dev, bmcr)) { 4244 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4245 return -EINVAL; 4246 } 4247 } else { 4248 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4249 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4250 } 4251 } else { 4252 int adv, bmcr; 4253 4254 np->autoneg = 0; 4255 4256 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4257 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4258 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) 4259 adv |= ADVERTISE_10HALF; 4260 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) 4261 adv |= ADVERTISE_10FULL; 4262 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) 4263 adv |= ADVERTISE_100HALF; 4264 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) 4265 adv |= ADVERTISE_100FULL; 4266 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4267 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ 4268 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4269 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4270 } 4271 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { 4272 adv |= ADVERTISE_PAUSE_ASYM; 4273 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4274 } 4275 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4276 np->fixed_mode = adv; 4277 4278 if (np->gigabit == PHY_GIGABIT) { 4279 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4280 adv &= ~ADVERTISE_1000FULL; 4281 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4282 } 4283 4284 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4285 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); 4286 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) 4287 bmcr |= BMCR_FULLDPLX; 4288 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 4289 bmcr |= BMCR_SPEED100; 4290 if (np->phy_oui == PHY_OUI_MARVELL) { 4291 /* reset the phy in order for forced mode settings to stick */ 4292 if (phy_reset(dev, bmcr)) { 4293 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4294 return -EINVAL; 4295 } 4296 } else { 4297 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4298 if (netif_running(dev)) { 4299 /* Wait a bit and then reconfigure the nic. */ 4300 udelay(10); 4301 nv_linkchange(dev); 4302 } 4303 } 4304 } 4305 4306 if (netif_running(dev)) { 4307 nv_start_rxtx(dev); 4308 nv_enable_irq(dev); 4309 } 4310 4311 return 0; 4312} 4313 4314#define FORCEDETH_REGS_VER 1 4315 4316static int nv_get_regs_len(struct net_device *dev) 4317{ 4318 struct fe_priv *np = netdev_priv(dev); 4319 return np->register_size; 4320} 4321 4322static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 4323{ 4324 struct fe_priv *np = netdev_priv(dev); 4325 u8 __iomem *base = get_hwbase(dev); 4326 u32 *rbuf = buf; 4327 int i; 4328 4329 regs->version = FORCEDETH_REGS_VER; 4330 spin_lock_irq(&np->lock); 4331 for (i = 0;i <= np->register_size/sizeof(u32); i++) 4332 rbuf[i] = readl(base + i*sizeof(u32)); 4333 spin_unlock_irq(&np->lock); 4334} 4335 4336static int nv_nway_reset(struct net_device *dev) 4337{ 4338 struct fe_priv *np = netdev_priv(dev); 4339 int ret; 4340 4341 if (np->autoneg) { 4342 int bmcr; 4343 4344 netif_carrier_off(dev); 4345 if (netif_running(dev)) { 4346 nv_disable_irq(dev); 4347 netif_tx_lock_bh(dev); 4348 spin_lock(&np->lock); 4349 /* stop engines */ 4350 nv_stop_rxtx(dev); 4351 spin_unlock(&np->lock); 4352 netif_tx_unlock_bh(dev); 4353 printk(KERN_INFO "%s: link down.\n", dev->name); 4354 } 4355 4356 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4357 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4358 bmcr |= BMCR_ANENABLE; 4359 /* reset the phy in order for settings to stick*/ 4360 if (phy_reset(dev, bmcr)) { 4361 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4362 return -EINVAL; 4363 } 4364 } else { 4365 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4366 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4367 } 4368 4369 if (netif_running(dev)) { 4370 nv_start_rxtx(dev); 4371 nv_enable_irq(dev); 4372 } 4373 ret = 0; 4374 } else { 4375 ret = -EINVAL; 4376 } 4377 4378 return ret; 4379} 4380 4381static int nv_set_tso(struct net_device *dev, u32 value) 4382{ 4383 struct fe_priv *np = netdev_priv(dev); 4384 4385 if ((np->driver_data & DEV_HAS_CHECKSUM)) 4386 return ethtool_op_set_tso(dev, value); 4387 else 4388 return -EOPNOTSUPP; 4389} 4390 4391static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4392{ 4393 struct fe_priv *np = netdev_priv(dev); 4394 4395 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4396 ring->rx_mini_max_pending = 0; 4397 ring->rx_jumbo_max_pending = 0; 4398 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4399 4400 ring->rx_pending = np->rx_ring_size; 4401 ring->rx_mini_pending = 0; 4402 ring->rx_jumbo_pending = 0; 4403 ring->tx_pending = np->tx_ring_size; 4404} 4405 4406static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4407{ 4408 struct fe_priv *np = netdev_priv(dev); 4409 u8 __iomem *base = get_hwbase(dev); 4410 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; 4411 dma_addr_t ring_addr; 4412 4413 if (ring->rx_pending < RX_RING_MIN || 4414 ring->tx_pending < TX_RING_MIN || 4415 ring->rx_mini_pending != 0 || 4416 ring->rx_jumbo_pending != 0 || 4417 (np->desc_ver == DESC_VER_1 && 4418 (ring->rx_pending > RING_MAX_DESC_VER_1 || 4419 ring->tx_pending > RING_MAX_DESC_VER_1)) || 4420 (np->desc_ver != DESC_VER_1 && 4421 (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 4422 ring->tx_pending > RING_MAX_DESC_VER_2_3))) { 4423 return -EINVAL; 4424 } 4425 4426 /* allocate new rings */ 4427 if (!nv_optimized(np)) { 4428 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4429 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4430 &ring_addr); 4431 } else { 4432 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4433 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4434 &ring_addr); 4435 } 4436 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); 4437 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); 4438 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { 4439 /* fall back to old rings */ 4440 if (!nv_optimized(np)) { 4441 if (rxtx_ring) 4442 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4443 rxtx_ring, ring_addr); 4444 } else { 4445 if (rxtx_ring) 4446 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4447 rxtx_ring, ring_addr); 4448 } 4449 if (rx_skbuff) 4450 kfree(rx_skbuff); 4451 if (tx_skbuff) 4452 kfree(tx_skbuff); 4453 goto exit; 4454 } 4455 4456 if (netif_running(dev)) { 4457 nv_disable_irq(dev); 4458 netif_tx_lock_bh(dev); 4459 spin_lock(&np->lock); 4460 /* stop engines */ 4461 nv_stop_rxtx(dev); 4462 nv_txrx_reset(dev); 4463 /* drain queues */ 4464 nv_drain_rxtx(dev); 4465 /* delete queues */ 4466 free_rings(dev); 4467 } 4468 4469 /* set new values */ 4470 np->rx_ring_size = ring->rx_pending; 4471 np->tx_ring_size = ring->tx_pending; 4472 4473 if (!nv_optimized(np)) { 4474 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4475 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4476 } else { 4477 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4478 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4479 } 4480 np->rx_skb = (struct nv_skb_map*)rx_skbuff; 4481 np->tx_skb = (struct nv_skb_map*)tx_skbuff; 4482 np->ring_addr = ring_addr; 4483 4484 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4485 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); 4486 4487 if (netif_running(dev)) { 4488 /* reinit driver view of the queues */ 4489 set_bufsize(dev); 4490 if (nv_init_ring(dev)) { 4491 if (!np->in_shutdown) 4492 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4493 } 4494 4495 /* reinit nic view of the queues */ 4496 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4497 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4498 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4499 base + NvRegRingSizes); 4500 pci_push(base); 4501 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4502 pci_push(base); 4503 4504 /* restart engines */ 4505 nv_start_rxtx(dev); 4506 spin_unlock(&np->lock); 4507 netif_tx_unlock_bh(dev); 4508 nv_enable_irq(dev); 4509 } 4510 return 0; 4511exit: 4512 return -ENOMEM; 4513} 4514 4515static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4516{ 4517 struct fe_priv *np = netdev_priv(dev); 4518 4519 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; 4520 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; 4521 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; 4522} 4523 4524static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4525{ 4526 struct fe_priv *np = netdev_priv(dev); 4527 int adv, bmcr; 4528 4529 if ((!np->autoneg && np->duplex == 0) || 4530 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 4531 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 4532 dev->name); 4533 return -EINVAL; 4534 } 4535 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 4536 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); 4537 return -EINVAL; 4538 } 4539 4540 netif_carrier_off(dev); 4541 if (netif_running(dev)) { 4542 nv_disable_irq(dev); 4543 netif_tx_lock_bh(dev); 4544 spin_lock(&np->lock); 4545 /* stop engines */ 4546 nv_stop_rxtx(dev); 4547 spin_unlock(&np->lock); 4548 netif_tx_unlock_bh(dev); 4549 } 4550 4551 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); 4552 if (pause->rx_pause) 4553 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; 4554 if (pause->tx_pause) 4555 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; 4556 4557 if (np->autoneg && pause->autoneg) { 4558 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; 4559 4560 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4561 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4562 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 4563 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4564 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4565 adv |= ADVERTISE_PAUSE_ASYM; 4566 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4567 4568 if (netif_running(dev)) 4569 printk(KERN_INFO "%s: link down.\n", dev->name); 4570 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4571 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4572 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4573 } else { 4574 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4575 if (pause->rx_pause) 4576 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4577 if (pause->tx_pause) 4578 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4579 4580 if (!netif_running(dev)) 4581 nv_update_linkspeed(dev); 4582 else 4583 nv_update_pause(dev, np->pause_flags); 4584 } 4585 4586 if (netif_running(dev)) { 4587 nv_start_rxtx(dev); 4588 nv_enable_irq(dev); 4589 } 4590 return 0; 4591} 4592 4593static u32 nv_get_rx_csum(struct net_device *dev) 4594{ 4595 struct fe_priv *np = netdev_priv(dev); 4596 return (np->rx_csum) != 0; 4597} 4598 4599static int nv_set_rx_csum(struct net_device *dev, u32 data) 4600{ 4601 struct fe_priv *np = netdev_priv(dev); 4602 u8 __iomem *base = get_hwbase(dev); 4603 int retcode = 0; 4604 4605 if (np->driver_data & DEV_HAS_CHECKSUM) { 4606 if (data) { 4607 np->rx_csum = 1; 4608 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4609 } else { 4610 np->rx_csum = 0; 4611 /* vlan is dependent on rx checksum offload */ 4612 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) 4613 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; 4614 } 4615 if (netif_running(dev)) { 4616 spin_lock_irq(&np->lock); 4617 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4618 spin_unlock_irq(&np->lock); 4619 } 4620 } else { 4621 return -EINVAL; 4622 } 4623 4624 return retcode; 4625} 4626 4627static int nv_set_tx_csum(struct net_device *dev, u32 data) 4628{ 4629 struct fe_priv *np = netdev_priv(dev); 4630 4631 if (np->driver_data & DEV_HAS_CHECKSUM) 4632 return ethtool_op_set_tx_hw_csum(dev, data); 4633 else 4634 return -EOPNOTSUPP; 4635} 4636 4637static int nv_set_sg(struct net_device *dev, u32 data) 4638{ 4639 struct fe_priv *np = netdev_priv(dev); 4640 4641 if (np->driver_data & DEV_HAS_CHECKSUM) 4642 return ethtool_op_set_sg(dev, data); 4643 else 4644 return -EOPNOTSUPP; 4645} 4646 4647static int nv_get_sset_count(struct net_device *dev, int sset) 4648{ 4649 struct fe_priv *np = netdev_priv(dev); 4650 4651 switch (sset) { 4652 case ETH_SS_TEST: 4653 if (np->driver_data & DEV_HAS_TEST_EXTENDED) 4654 return NV_TEST_COUNT_EXTENDED; 4655 else 4656 return NV_TEST_COUNT_BASE; 4657 case ETH_SS_STATS: 4658 if (np->driver_data & DEV_HAS_STATISTICS_V1) 4659 return NV_DEV_STATISTICS_V1_COUNT; 4660 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4661 return NV_DEV_STATISTICS_V2_COUNT; 4662 else 4663 return 0; 4664 default: 4665 return -EOPNOTSUPP; 4666 } 4667} 4668 4669static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) 4670{ 4671 struct fe_priv *np = netdev_priv(dev); 4672 4673 /* update stats */ 4674 nv_do_stats_poll((unsigned long)dev); 4675 4676 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); 4677} 4678 4679static int nv_link_test(struct net_device *dev) 4680{ 4681 struct fe_priv *np = netdev_priv(dev); 4682 int mii_status; 4683 4684 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4685 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4686 4687 /* check phy link status */ 4688 if (!(mii_status & BMSR_LSTATUS)) 4689 return 0; 4690 else 4691 return 1; 4692} 4693 4694static int nv_register_test(struct net_device *dev) 4695{ 4696 u8 __iomem *base = get_hwbase(dev); 4697 int i = 0; 4698 u32 orig_read, new_read; 4699 4700 do { 4701 orig_read = readl(base + nv_registers_test[i].reg); 4702 4703 /* xor with mask to toggle bits */ 4704 orig_read ^= nv_registers_test[i].mask; 4705 4706 writel(orig_read, base + nv_registers_test[i].reg); 4707 4708 new_read = readl(base + nv_registers_test[i].reg); 4709 4710 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) 4711 return 0; 4712 4713 /* restore original value */ 4714 orig_read ^= nv_registers_test[i].mask; 4715 writel(orig_read, base + nv_registers_test[i].reg); 4716 4717 } while (nv_registers_test[++i].reg != 0); 4718 4719 return 1; 4720} 4721 4722static int nv_interrupt_test(struct net_device *dev) 4723{ 4724 struct fe_priv *np = netdev_priv(dev); 4725 u8 __iomem *base = get_hwbase(dev); 4726 int ret = 1; 4727 int testcnt; 4728 u32 save_msi_flags, save_poll_interval = 0; 4729 4730 if (netif_running(dev)) { 4731 /* free current irq */ 4732 nv_free_irq(dev); 4733 save_poll_interval = readl(base+NvRegPollingInterval); 4734 } 4735 4736 /* flag to test interrupt handler */ 4737 np->intr_test = 0; 4738 4739 /* setup test irq */ 4740 save_msi_flags = np->msi_flags; 4741 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; 4742 np->msi_flags |= 0x001; /* setup 1 vector */ 4743 if (nv_request_irq(dev, 1)) 4744 return 0; 4745 4746 /* setup timer interrupt */ 4747 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 4748 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4749 4750 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4751 4752 /* wait for at least one interrupt */ 4753 msleep(100); 4754 4755 spin_lock_irq(&np->lock); 4756 4757 /* flag should be set within ISR */ 4758 testcnt = np->intr_test; 4759 if (!testcnt) 4760 ret = 2; 4761 4762 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4763 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 4764 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4765 else 4766 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4767 4768 spin_unlock_irq(&np->lock); 4769 4770 nv_free_irq(dev); 4771 4772 np->msi_flags = save_msi_flags; 4773 4774 if (netif_running(dev)) { 4775 writel(save_poll_interval, base + NvRegPollingInterval); 4776 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4777 /* restore original irq */ 4778 if (nv_request_irq(dev, 0)) 4779 return 0; 4780 } 4781 4782 return ret; 4783} 4784 4785static int nv_loopback_test(struct net_device *dev) 4786{ 4787 struct fe_priv *np = netdev_priv(dev); 4788 u8 __iomem *base = get_hwbase(dev); 4789 struct sk_buff *tx_skb, *rx_skb; 4790 dma_addr_t test_dma_addr; 4791 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 4792 u32 flags; 4793 int len, i, pkt_len; 4794 u8 *pkt_data; 4795 u32 filter_flags = 0; 4796 u32 misc1_flags = 0; 4797 int ret = 1; 4798 4799 if (netif_running(dev)) { 4800 nv_disable_irq(dev); 4801 filter_flags = readl(base + NvRegPacketFilterFlags); 4802 misc1_flags = readl(base + NvRegMisc1); 4803 } else { 4804 nv_txrx_reset(dev); 4805 } 4806 4807 /* reinit driver view of the rx queue */ 4808 set_bufsize(dev); 4809 nv_init_ring(dev); 4810 4811 /* setup hardware for loopback */ 4812 writel(NVREG_MISC1_FORCE, base + NvRegMisc1); 4813 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); 4814 4815 /* reinit nic view of the rx queue */ 4816 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4817 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4818 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4819 base + NvRegRingSizes); 4820 pci_push(base); 4821 4822 /* restart rx engine */ 4823 nv_start_rxtx(dev); 4824 4825 /* setup packet for tx */ 4826 pkt_len = ETH_DATA_LEN; 4827 tx_skb = dev_alloc_skb(pkt_len); 4828 if (!tx_skb) { 4829 printk(KERN_ERR "dev_alloc_skb() failed during loopback test" 4830 " of %s\n", dev->name); 4831 ret = 0; 4832 goto out; 4833 } 4834 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, 4835 skb_tailroom(tx_skb), 4836 PCI_DMA_FROMDEVICE); 4837 pkt_data = skb_put(tx_skb, pkt_len); 4838 for (i = 0; i < pkt_len; i++) 4839 pkt_data[i] = (u8)(i & 0xff); 4840 4841 if (!nv_optimized(np)) { 4842 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 4843 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4844 } else { 4845 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); 4846 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); 4847 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4848 } 4849 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4850 pci_push(get_hwbase(dev)); 4851 4852 msleep(500); 4853 4854 /* check for rx of the packet */ 4855 if (!nv_optimized(np)) { 4856 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); 4857 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 4858 4859 } else { 4860 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); 4861 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 4862 } 4863 4864 if (flags & NV_RX_AVAIL) { 4865 ret = 0; 4866 } else if (np->desc_ver == DESC_VER_1) { 4867 if (flags & NV_RX_ERROR) 4868 ret = 0; 4869 } else { 4870 if (flags & NV_RX2_ERROR) { 4871 ret = 0; 4872 } 4873 } 4874 4875 if (ret) { 4876 if (len != pkt_len) { 4877 ret = 0; 4878 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 4879 dev->name, len, pkt_len); 4880 } else { 4881 rx_skb = np->rx_skb[0].skb; 4882 for (i = 0; i < pkt_len; i++) { 4883 if (rx_skb->data[i] != (u8)(i & 0xff)) { 4884 ret = 0; 4885 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", 4886 dev->name, i); 4887 break; 4888 } 4889 } 4890 } 4891 } else { 4892 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); 4893 } 4894 4895 pci_unmap_page(np->pci_dev, test_dma_addr, 4896 (skb_end_pointer(tx_skb) - tx_skb->data), 4897 PCI_DMA_TODEVICE); 4898 dev_kfree_skb_any(tx_skb); 4899 out: 4900 /* stop engines */ 4901 nv_stop_rxtx(dev); 4902 nv_txrx_reset(dev); 4903 /* drain rx queue */ 4904 nv_drain_rxtx(dev); 4905 4906 if (netif_running(dev)) { 4907 writel(misc1_flags, base + NvRegMisc1); 4908 writel(filter_flags, base + NvRegPacketFilterFlags); 4909 nv_enable_irq(dev); 4910 } 4911 4912 return ret; 4913} 4914 4915static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) 4916{ 4917 struct fe_priv *np = netdev_priv(dev); 4918 u8 __iomem *base = get_hwbase(dev); 4919 int result; 4920 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); 4921 4922 if (!nv_link_test(dev)) { 4923 test->flags |= ETH_TEST_FL_FAILED; 4924 buffer[0] = 1; 4925 } 4926 4927 if (test->flags & ETH_TEST_FL_OFFLINE) { 4928 if (netif_running(dev)) { 4929 netif_stop_queue(dev); 4930#ifdef CONFIG_FORCEDETH_NAPI 4931 napi_disable(&np->napi); 4932#endif 4933 netif_tx_lock_bh(dev); 4934 spin_lock_irq(&np->lock); 4935 nv_disable_hw_interrupts(dev, np->irqmask); 4936 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 4937 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4938 } else { 4939 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4940 } 4941 /* stop engines */ 4942 nv_stop_rxtx(dev); 4943 nv_txrx_reset(dev); 4944 /* drain rx queue */ 4945 nv_drain_rxtx(dev); 4946 spin_unlock_irq(&np->lock); 4947 netif_tx_unlock_bh(dev); 4948 } 4949 4950 if (!nv_register_test(dev)) { 4951 test->flags |= ETH_TEST_FL_FAILED; 4952 buffer[1] = 1; 4953 } 4954 4955 result = nv_interrupt_test(dev); 4956 if (result != 1) { 4957 test->flags |= ETH_TEST_FL_FAILED; 4958 buffer[2] = 1; 4959 } 4960 if (result == 0) { 4961 /* bail out */ 4962 return; 4963 } 4964 4965 if (!nv_loopback_test(dev)) { 4966 test->flags |= ETH_TEST_FL_FAILED; 4967 buffer[3] = 1; 4968 } 4969 4970 if (netif_running(dev)) { 4971 /* reinit driver view of the rx queue */ 4972 set_bufsize(dev); 4973 if (nv_init_ring(dev)) { 4974 if (!np->in_shutdown) 4975 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4976 } 4977 /* reinit nic view of the rx queue */ 4978 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4979 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4980 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4981 base + NvRegRingSizes); 4982 pci_push(base); 4983 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4984 pci_push(base); 4985 /* restart rx engine */ 4986 nv_start_rxtx(dev); 4987 netif_start_queue(dev); 4988#ifdef CONFIG_FORCEDETH_NAPI 4989 napi_enable(&np->napi); 4990#endif 4991 nv_enable_hw_interrupts(dev, np->irqmask); 4992 } 4993 } 4994} 4995 4996static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) 4997{ 4998 switch (stringset) { 4999 case ETH_SS_STATS: 5000 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); 5001 break; 5002 case ETH_SS_TEST: 5003 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); 5004 break; 5005 } 5006} 5007 5008static const struct ethtool_ops ops = { 5009 .get_drvinfo = nv_get_drvinfo, 5010 .get_link = ethtool_op_get_link, 5011 .get_wol = nv_get_wol, 5012 .set_wol = nv_set_wol, 5013 .get_settings = nv_get_settings, 5014 .set_settings = nv_set_settings, 5015 .get_regs_len = nv_get_regs_len, 5016 .get_regs = nv_get_regs, 5017 .nway_reset = nv_nway_reset, 5018 .set_tso = nv_set_tso, 5019 .get_ringparam = nv_get_ringparam, 5020 .set_ringparam = nv_set_ringparam, 5021 .get_pauseparam = nv_get_pauseparam, 5022 .set_pauseparam = nv_set_pauseparam, 5023 .get_rx_csum = nv_get_rx_csum, 5024 .set_rx_csum = nv_set_rx_csum, 5025 .set_tx_csum = nv_set_tx_csum, 5026 .set_sg = nv_set_sg, 5027 .get_strings = nv_get_strings, 5028 .get_ethtool_stats = nv_get_ethtool_stats, 5029 .get_sset_count = nv_get_sset_count, 5030 .self_test = nv_self_test, 5031}; 5032 5033static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 5034{ 5035 struct fe_priv *np = get_nvpriv(dev); 5036 5037 spin_lock_irq(&np->lock); 5038 5039 /* save vlan group */ 5040 np->vlangrp = grp; 5041 5042 if (grp) { 5043 /* enable vlan on MAC */ 5044 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; 5045 } else { 5046 /* disable vlan on MAC */ 5047 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; 5048 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; 5049 } 5050 5051 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5052 5053 spin_unlock_irq(&np->lock); 5054} 5055 5056/* The mgmt unit and driver use a semaphore to access the phy during init */ 5057static int nv_mgmt_acquire_sema(struct net_device *dev) 5058{ 5059 u8 __iomem *base = get_hwbase(dev); 5060 int i; 5061 u32 tx_ctrl, mgmt_sema; 5062 5063 for (i = 0; i < 10; i++) { 5064 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; 5065 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) 5066 break; 5067 msleep(500); 5068 } 5069 5070 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) 5071 return 0; 5072 5073 for (i = 0; i < 2; i++) { 5074 tx_ctrl = readl(base + NvRegTransmitterControl); 5075 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; 5076 writel(tx_ctrl, base + NvRegTransmitterControl); 5077 5078 /* verify that semaphore was acquired */ 5079 tx_ctrl = readl(base + NvRegTransmitterControl); 5080 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && 5081 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) 5082 return 1; 5083 else 5084 udelay(50); 5085 } 5086 5087 return 0; 5088} 5089 5090static int nv_open(struct net_device *dev) 5091{ 5092 struct fe_priv *np = netdev_priv(dev); 5093 u8 __iomem *base = get_hwbase(dev); 5094 int ret = 1; 5095 int oom, i; 5096 u32 low; 5097 5098 dprintk(KERN_DEBUG "nv_open: begin\n"); 5099 5100 /* erase previous misconfiguration */ 5101 if (np->driver_data & DEV_HAS_POWER_CNTRL) 5102 nv_mac_reset(dev); 5103 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5104 writel(0, base + NvRegMulticastAddrB); 5105 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5106 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5107 writel(0, base + NvRegPacketFilterFlags); 5108 5109 writel(0, base + NvRegTransmitterControl); 5110 writel(0, base + NvRegReceiverControl); 5111 5112 writel(0, base + NvRegAdapterControl); 5113 5114 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 5115 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 5116 5117 /* initialize descriptor rings */ 5118 set_bufsize(dev); 5119 oom = nv_init_ring(dev); 5120 5121 writel(0, base + NvRegLinkSpeed); 5122 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5123 nv_txrx_reset(dev); 5124 writel(0, base + NvRegUnknownSetupReg6); 5125 5126 np->in_shutdown = 0; 5127 5128 /* give hw rings */ 5129 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5130 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5131 base + NvRegRingSizes); 5132 5133 writel(np->linkspeed, base + NvRegLinkSpeed); 5134 if (np->desc_ver == DESC_VER_1) 5135 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 5136 else 5137 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); 5138 writel(np->txrxctl_bits, base + NvRegTxRxControl); 5139 writel(np->vlanctl_bits, base + NvRegVlanControl); 5140 pci_push(base); 5141 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 5142 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 5143 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 5144 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 5145 5146 writel(0, base + NvRegMIIMask); 5147 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5148 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5149 5150 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 5151 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 5152 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 5153 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5154 5155 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 5156 5157 get_random_bytes(&low, sizeof(low)); 5158 low &= NVREG_SLOTTIME_MASK; 5159 if (np->desc_ver == DESC_VER_1) { 5160 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime); 5161 } else { 5162 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) { 5163 /* setup legacy backoff */ 5164 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime); 5165 } else { 5166 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime); 5167 nv_gear_backoff_reseed(dev); 5168 } 5169 } 5170 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); 5171 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); 5172 if (poll_interval == -1) { 5173 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 5174 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 5175 else 5176 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 5177 } 5178 else 5179 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 5180 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5181 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 5182 base + NvRegAdapterControl); 5183 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); 5184 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); 5185 if (np->wolenabled) 5186 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 5187 5188 i = readl(base + NvRegPowerState); 5189 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 5190 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 5191 5192 pci_push(base); 5193 udelay(10); 5194 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 5195 5196 nv_disable_hw_interrupts(dev, np->irqmask); 5197 pci_push(base); 5198 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5199 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5200 pci_push(base); 5201 5202 if (nv_request_irq(dev, 0)) { 5203 goto out_drain; 5204 } 5205 5206 /* ask for interrupts */ 5207 nv_enable_hw_interrupts(dev, np->irqmask); 5208 5209 spin_lock_irq(&np->lock); 5210 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5211 writel(0, base + NvRegMulticastAddrB); 5212 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5213 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5214 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5215 /* One manual link speed update: Interrupts are enabled, future link 5216 * speed changes cause interrupts and are handled by nv_link_irq(). 5217 */ 5218 { 5219 u32 miistat; 5220 miistat = readl(base + NvRegMIIStatus); 5221 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5222 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); 5223 } 5224 /* set linkspeed to invalid value, thus force nv_update_linkspeed 5225 * to init hw */ 5226 np->linkspeed = 0; 5227 ret = nv_update_linkspeed(dev); 5228 nv_start_rxtx(dev); 5229 netif_start_queue(dev); 5230#ifdef CONFIG_FORCEDETH_NAPI 5231 napi_enable(&np->napi); 5232#endif 5233 5234 if (ret) { 5235 netif_carrier_on(dev); 5236 } else { 5237 printk(KERN_INFO "%s: no link during initialization.\n", dev->name); 5238 netif_carrier_off(dev); 5239 } 5240 if (oom) 5241 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5242 5243 /* start statistics timer */ 5244 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) 5245 mod_timer(&np->stats_poll, 5246 round_jiffies(jiffies + STATS_INTERVAL)); 5247 5248 spin_unlock_irq(&np->lock); 5249 5250 return 0; 5251out_drain: 5252 nv_drain_rxtx(dev); 5253 return ret; 5254} 5255 5256static int nv_close(struct net_device *dev) 5257{ 5258 struct fe_priv *np = netdev_priv(dev); 5259 u8 __iomem *base; 5260 5261 spin_lock_irq(&np->lock); 5262 np->in_shutdown = 1; 5263 spin_unlock_irq(&np->lock); 5264#ifdef CONFIG_FORCEDETH_NAPI 5265 napi_disable(&np->napi); 5266#endif 5267 synchronize_irq(np->pci_dev->irq); 5268 5269 del_timer_sync(&np->oom_kick); 5270 del_timer_sync(&np->nic_poll); 5271 del_timer_sync(&np->stats_poll); 5272 5273 netif_stop_queue(dev); 5274 spin_lock_irq(&np->lock); 5275 nv_stop_rxtx(dev); 5276 nv_txrx_reset(dev); 5277 5278 /* disable interrupts on the nic or we will lock up */ 5279 base = get_hwbase(dev); 5280 nv_disable_hw_interrupts(dev, np->irqmask); 5281 pci_push(base); 5282 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 5283 5284 spin_unlock_irq(&np->lock); 5285 5286 nv_free_irq(dev); 5287 5288 nv_drain_rxtx(dev); 5289 5290 if (np->wolenabled) { 5291 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5292 nv_start_rx(dev); 5293 } 5294 5295 /* FIXME: power down nic */ 5296 5297 return 0; 5298} 5299 5300static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 5301{ 5302 struct net_device *dev; 5303 struct fe_priv *np; 5304 unsigned long addr; 5305 u8 __iomem *base; 5306 int err, i; 5307 u32 powerstate, txreg; 5308 u32 phystate_orig = 0, phystate; 5309 int phyinitialized = 0; 5310 DECLARE_MAC_BUF(mac); 5311 static int printed_version; 5312 5313 if (!printed_version++) 5314 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet" 5315 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION); 5316 5317 dev = alloc_etherdev(sizeof(struct fe_priv)); 5318 err = -ENOMEM; 5319 if (!dev) 5320 goto out; 5321 5322 np = netdev_priv(dev); 5323 np->dev = dev; 5324 np->pci_dev = pci_dev; 5325 spin_lock_init(&np->lock); 5326 SET_NETDEV_DEV(dev, &pci_dev->dev); 5327 5328 init_timer(&np->oom_kick); 5329 np->oom_kick.data = (unsigned long) dev; 5330 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ 5331 init_timer(&np->nic_poll); 5332 np->nic_poll.data = (unsigned long) dev; 5333 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ 5334 init_timer(&np->stats_poll); 5335 np->stats_poll.data = (unsigned long) dev; 5336 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ 5337 5338 err = pci_enable_device(pci_dev); 5339 if (err) 5340 goto out_free; 5341 5342 pci_set_master(pci_dev); 5343 5344 err = pci_request_regions(pci_dev, DRV_NAME); 5345 if (err < 0) 5346 goto out_disable; 5347 5348 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2)) 5349 np->register_size = NV_PCI_REGSZ_VER3; 5350 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 5351 np->register_size = NV_PCI_REGSZ_VER2; 5352 else 5353 np->register_size = NV_PCI_REGSZ_VER1; 5354 5355 err = -EINVAL; 5356 addr = 0; 5357 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5358 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", 5359 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), 5360 pci_resource_len(pci_dev, i), 5361 pci_resource_flags(pci_dev, i)); 5362 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 5363 pci_resource_len(pci_dev, i) >= np->register_size) { 5364 addr = pci_resource_start(pci_dev, i); 5365 break; 5366 } 5367 } 5368 if (i == DEVICE_COUNT_RESOURCE) { 5369 dev_printk(KERN_INFO, &pci_dev->dev, 5370 "Couldn't find register window\n"); 5371 goto out_relreg; 5372 } 5373 5374 /* copy of driver data */ 5375 np->driver_data = id->driver_data; 5376 /* copy of device id */ 5377 np->device_id = id->device; 5378 5379 /* handle different descriptor versions */ 5380 if (id->driver_data & DEV_HAS_HIGH_DMA) { 5381 /* packet format 3: supports 40-bit addressing */ 5382 np->desc_ver = DESC_VER_3; 5383 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 5384 if (dma_64bit) { 5385 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) 5386 dev_printk(KERN_INFO, &pci_dev->dev, 5387 "64-bit DMA failed, using 32-bit addressing\n"); 5388 else 5389 dev->features |= NETIF_F_HIGHDMA; 5390 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) { 5391 dev_printk(KERN_INFO, &pci_dev->dev, 5392 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); 5393 } 5394 } 5395 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 5396 /* packet format 2: supports jumbo frames */ 5397 np->desc_ver = DESC_VER_2; 5398 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; 5399 } else { 5400 /* original packet format */ 5401 np->desc_ver = DESC_VER_1; 5402 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; 5403 } 5404 5405 np->pkt_limit = NV_PKTLIMIT_1; 5406 if (id->driver_data & DEV_HAS_LARGEDESC) 5407 np->pkt_limit = NV_PKTLIMIT_2; 5408 5409 if (id->driver_data & DEV_HAS_CHECKSUM) { 5410 np->rx_csum = 1; 5411 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5412 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 5413 dev->features |= NETIF_F_TSO; 5414 } 5415 5416 np->vlanctl_bits = 0; 5417 if (id->driver_data & DEV_HAS_VLAN) { 5418 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 5419 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; 5420 dev->vlan_rx_register = nv_vlan_rx_register; 5421 } 5422 5423 np->msi_flags = 0; 5424 if ((id->driver_data & DEV_HAS_MSI) && msi) { 5425 np->msi_flags |= NV_MSI_CAPABLE; 5426 } 5427 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 5428 np->msi_flags |= NV_MSI_X_CAPABLE; 5429 } 5430 5431 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 5432 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || 5433 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || 5434 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) { 5435 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 5436 } 5437 5438 5439 err = -ENOMEM; 5440 np->base = ioremap(addr, np->register_size); 5441 if (!np->base) 5442 goto out_relreg; 5443 dev->base_addr = (unsigned long)np->base; 5444 5445 dev->irq = pci_dev->irq; 5446 5447 np->rx_ring_size = RX_RING_DEFAULT; 5448 np->tx_ring_size = TX_RING_DEFAULT; 5449 5450 if (!nv_optimized(np)) { 5451 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 5452 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 5453 &np->ring_addr); 5454 if (!np->rx_ring.orig) 5455 goto out_unmap; 5456 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 5457 } else { 5458 np->rx_ring.ex = pci_alloc_consistent(pci_dev, 5459 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 5460 &np->ring_addr); 5461 if (!np->rx_ring.ex) 5462 goto out_unmap; 5463 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 5464 } 5465 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5466 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5467 if (!np->rx_skb || !np->tx_skb) 5468 goto out_freering; 5469 5470 dev->open = nv_open; 5471 dev->stop = nv_close; 5472 5473 if (!nv_optimized(np)) 5474 dev->hard_start_xmit = nv_start_xmit; 5475 else 5476 dev->hard_start_xmit = nv_start_xmit_optimized; 5477 dev->get_stats = nv_get_stats; 5478 dev->change_mtu = nv_change_mtu; 5479 dev->set_mac_address = nv_set_mac_address; 5480 dev->set_multicast_list = nv_set_multicast; 5481#ifdef CONFIG_NET_POLL_CONTROLLER 5482 dev->poll_controller = nv_poll_controller; 5483#endif 5484#ifdef CONFIG_FORCEDETH_NAPI 5485 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5486#endif 5487 SET_ETHTOOL_OPS(dev, &ops); 5488 dev->tx_timeout = nv_tx_timeout; 5489 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5490 5491 pci_set_drvdata(pci_dev, dev); 5492 5493 /* read the mac address */ 5494 base = get_hwbase(dev); 5495 np->orig_mac[0] = readl(base + NvRegMacAddrA); 5496 np->orig_mac[1] = readl(base + NvRegMacAddrB); 5497 5498 /* check the workaround bit for correct mac address order */ 5499 txreg = readl(base + NvRegTransmitPoll); 5500 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) { 5501 /* mac address is already in correct order */ 5502 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5503 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5504 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5505 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5506 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5507 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5508 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { 5509 /* mac address is already in correct order */ 5510 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5511 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5512 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5513 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5514 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5515 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5516 /* 5517 * Set orig mac address back to the reversed version. 5518 * This flag will be cleared during low power transition. 5519 * Therefore, we should always put back the reversed address. 5520 */ 5521 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) + 5522 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24); 5523 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8); 5524 } else { 5525 /* need to reverse mac address to correct order */ 5526 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 5527 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 5528 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 5529 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 5530 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 5531 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 5532 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5533 } 5534 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 5535 5536 if (!is_valid_ether_addr(dev->perm_addr)) { 5537 /* 5538 * Bad mac address. At least one bios sets the mac address 5539 * to 01:23:45:67:89:ab 5540 */ 5541 dev_printk(KERN_ERR, &pci_dev->dev, 5542 "Invalid Mac address detected: %s\n", 5543 print_mac(mac, dev->dev_addr)); 5544 dev_printk(KERN_ERR, &pci_dev->dev, 5545 "Please complain to your hardware vendor. Switching to a random MAC.\n"); 5546 dev->dev_addr[0] = 0x00; 5547 dev->dev_addr[1] = 0x00; 5548 dev->dev_addr[2] = 0x6c; 5549 get_random_bytes(&dev->dev_addr[3], 3); 5550 } 5551 5552 dprintk(KERN_DEBUG "%s: MAC Address %s\n", 5553 pci_name(pci_dev), print_mac(mac, dev->dev_addr)); 5554 5555 /* set mac address */ 5556 nv_copy_mac_to_hw(dev); 5557 5558 /* disable WOL */ 5559 writel(0, base + NvRegWakeUpFlags); 5560 np->wolenabled = 0; 5561 5562 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5563 5564 /* take phy and nic out of low power mode */ 5565 powerstate = readl(base + NvRegPowerState2); 5566 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; 5567 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || 5568 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && 5569 pci_dev->revision >= 0xA3) 5570 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; 5571 writel(powerstate, base + NvRegPowerState2); 5572 } 5573 5574 if (np->desc_ver == DESC_VER_1) { 5575 np->tx_flags = NV_TX_VALID; 5576 } else { 5577 np->tx_flags = NV_TX2_VALID; 5578 } 5579 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { 5580 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5581 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5582 np->msi_flags |= 0x0003; 5583 } else { 5584 np->irqmask = NVREG_IRQMASK_CPU; 5585 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5586 np->msi_flags |= 0x0001; 5587 } 5588 5589 if (id->driver_data & DEV_NEED_TIMERIRQ) 5590 np->irqmask |= NVREG_IRQ_TIMER; 5591 if (id->driver_data & DEV_NEED_LINKTIMER) { 5592 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); 5593 np->need_linktimer = 1; 5594 np->link_timeout = jiffies + LINK_TIMEOUT; 5595 } else { 5596 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); 5597 np->need_linktimer = 0; 5598 } 5599 5600 /* Limit the number of tx's outstanding for hw bug */ 5601 if (id->driver_data & DEV_NEED_TX_LIMIT) { 5602 np->tx_limit = 1; 5603 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_32 || 5604 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_33 || 5605 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_34 || 5606 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_35 || 5607 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_36 || 5608 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_37 || 5609 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_38 || 5610 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_39) && 5611 pci_dev->revision >= 0xA2) 5612 np->tx_limit = 0; 5613 } 5614 5615 /* clear phy state and temporarily halt phy interrupts */ 5616 writel(0, base + NvRegMIIMask); 5617 phystate = readl(base + NvRegAdapterControl); 5618 if (phystate & NVREG_ADAPTCTL_RUNNING) { 5619 phystate_orig = 1; 5620 phystate &= ~NVREG_ADAPTCTL_RUNNING; 5621 writel(phystate, base + NvRegAdapterControl); 5622 } 5623 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5624 5625 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 5626 /* management unit running on the mac? */ 5627 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) { 5628 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; 5629 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use); 5630 if (nv_mgmt_acquire_sema(dev)) { 5631 /* management unit setup the phy already? */ 5632 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 5633 NVREG_XMITCTL_SYNC_PHY_INIT) { 5634 /* phy is inited by mgmt unit */ 5635 phyinitialized = 1; 5636 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); 5637 } else { 5638 /* we need to init the phy */ 5639 } 5640 } 5641 } 5642 } 5643 5644 /* find a suitable phy */ 5645 for (i = 1; i <= 32; i++) { 5646 int id1, id2; 5647 int phyaddr = i & 0x1F; 5648 5649 spin_lock_irq(&np->lock); 5650 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); 5651 spin_unlock_irq(&np->lock); 5652 if (id1 < 0 || id1 == 0xffff) 5653 continue; 5654 spin_lock_irq(&np->lock); 5655 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); 5656 spin_unlock_irq(&np->lock); 5657 if (id2 < 0 || id2 == 0xffff) 5658 continue; 5659 5660 np->phy_model = id2 & PHYID2_MODEL_MASK; 5661 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 5662 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 5663 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 5664 pci_name(pci_dev), id1, id2, phyaddr); 5665 np->phyaddr = phyaddr; 5666 np->phy_oui = id1 | id2; 5667 5668 /* Realtek hardcoded phy id1 to all zero's on certain phys */ 5669 if (np->phy_oui == PHY_OUI_REALTEK2) 5670 np->phy_oui = PHY_OUI_REALTEK; 5671 /* Setup phy revision for Realtek */ 5672 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211) 5673 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK; 5674 5675 break; 5676 } 5677 if (i == 33) { 5678 dev_printk(KERN_INFO, &pci_dev->dev, 5679 "open: Could not find a valid PHY.\n"); 5680 goto out_error; 5681 } 5682 5683 if (!phyinitialized) { 5684 /* reset it */ 5685 phy_init(dev); 5686 } else { 5687 /* see if it is a gigabit phy */ 5688 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 5689 if (mii_status & PHY_GIGABIT) { 5690 np->gigabit = PHY_GIGABIT; 5691 } 5692 } 5693 5694 /* set default link speed settings */ 5695 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 5696 np->duplex = 0; 5697 np->autoneg = 1; 5698 5699 err = register_netdev(dev); 5700 if (err) { 5701 dev_printk(KERN_INFO, &pci_dev->dev, 5702 "unable to register netdev: %d\n", err); 5703 goto out_error; 5704 } 5705 5706 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, " 5707 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 5708 dev->name, 5709 np->phy_oui, 5710 np->phyaddr, 5711 dev->dev_addr[0], 5712 dev->dev_addr[1], 5713 dev->dev_addr[2], 5714 dev->dev_addr[3], 5715 dev->dev_addr[4], 5716 dev->dev_addr[5]); 5717 5718 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 5719 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5720 dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ? 5721 "csum " : "", 5722 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 5723 "vlan " : "", 5724 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", 5725 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", 5726 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", 5727 np->gigabit == PHY_GIGABIT ? "gbit " : "", 5728 np->need_linktimer ? "lnktim " : "", 5729 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", 5730 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", 5731 np->desc_ver); 5732 5733 return 0; 5734 5735out_error: 5736 if (phystate_orig) 5737 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 5738 pci_set_drvdata(pci_dev, NULL); 5739out_freering: 5740 free_rings(dev); 5741out_unmap: 5742 iounmap(get_hwbase(dev)); 5743out_relreg: 5744 pci_release_regions(pci_dev); 5745out_disable: 5746 pci_disable_device(pci_dev); 5747out_free: 5748 free_netdev(dev); 5749out: 5750 return err; 5751} 5752 5753static void nv_restore_phy(struct net_device *dev) 5754{ 5755 struct fe_priv *np = netdev_priv(dev); 5756 u16 phy_reserved, mii_control; 5757 5758 if (np->phy_oui == PHY_OUI_REALTEK && 5759 np->phy_model == PHY_MODEL_REALTEK_8201 && 5760 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 5761 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3); 5762 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); 5763 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 5764 phy_reserved |= PHY_REALTEK_INIT8; 5765 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved); 5766 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1); 5767 5768 /* restart auto negotiation */ 5769 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 5770 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 5771 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control); 5772 } 5773} 5774 5775static void __devexit nv_remove(struct pci_dev *pci_dev) 5776{ 5777 struct net_device *dev = pci_get_drvdata(pci_dev); 5778 struct fe_priv *np = netdev_priv(dev); 5779 u8 __iomem *base = get_hwbase(dev); 5780 5781 unregister_netdev(dev); 5782 5783 /* special op: write back the misordered MAC address - otherwise 5784 * the next nv_probe would see a wrong address. 5785 */ 5786 writel(np->orig_mac[0], base + NvRegMacAddrA); 5787 writel(np->orig_mac[1], base + NvRegMacAddrB); 5788 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV, 5789 base + NvRegTransmitPoll); 5790 5791 /* restore any phy related changes */ 5792 nv_restore_phy(dev); 5793 5794 /* free all structures */ 5795 free_rings(dev); 5796 iounmap(get_hwbase(dev)); 5797 pci_release_regions(pci_dev); 5798 pci_disable_device(pci_dev); 5799 free_netdev(dev); 5800 pci_set_drvdata(pci_dev, NULL); 5801} 5802 5803#ifdef CONFIG_PM 5804static int nv_suspend(struct pci_dev *pdev, pm_message_t state) 5805{ 5806 struct net_device *dev = pci_get_drvdata(pdev); 5807 struct fe_priv *np = netdev_priv(dev); 5808 5809 if (!netif_running(dev)) 5810 goto out; 5811 5812 netif_device_detach(dev); 5813 5814 // Gross. 5815 nv_close(dev); 5816 5817 pci_save_state(pdev); 5818 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); 5819 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5820out: 5821 return 0; 5822} 5823 5824static int nv_resume(struct pci_dev *pdev) 5825{ 5826 struct net_device *dev = pci_get_drvdata(pdev); 5827 u8 __iomem *base = get_hwbase(dev); 5828 int rc = 0; 5829 u32 txreg; 5830 5831 if (!netif_running(dev)) 5832 goto out; 5833 5834 netif_device_attach(dev); 5835 5836 pci_set_power_state(pdev, PCI_D0); 5837 pci_restore_state(pdev); 5838 pci_enable_wake(pdev, PCI_D0, 0); 5839 5840 /* restore mac address reverse flag */ 5841 txreg = readl(base + NvRegTransmitPoll); 5842 txreg |= NVREG_TRANSMITPOLL_MAC_ADDR_REV; 5843 writel(txreg, base + NvRegTransmitPoll); 5844 5845 rc = nv_open(dev); 5846 nv_set_multicast(dev); 5847out: 5848 return rc; 5849} 5850#else 5851#define nv_suspend NULL 5852#define nv_resume NULL 5853#endif /* CONFIG_PM */ 5854 5855static struct pci_device_id pci_tbl[] = { 5856 { /* nForce Ethernet Controller */ 5857 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), 5858 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5859 }, 5860 { /* nForce2 Ethernet Controller */ 5861 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), 5862 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5863 }, 5864 { /* nForce3 Ethernet Controller */ 5865 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), 5866 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5867 }, 5868 { /* nForce3 Ethernet Controller */ 5869 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), 5870 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5871 }, 5872 { /* nForce3 Ethernet Controller */ 5873 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), 5874 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5875 }, 5876 { /* nForce3 Ethernet Controller */ 5877 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), 5878 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5879 }, 5880 { /* nForce3 Ethernet Controller */ 5881 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), 5882 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5883 }, 5884 { /* CK804 Ethernet Controller */ 5885 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), 5886 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 5887 }, 5888 { /* CK804 Ethernet Controller */ 5889 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), 5890 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 5891 }, 5892 { /* MCP04 Ethernet Controller */ 5893 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), 5894 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 5895 }, 5896 { /* MCP04 Ethernet Controller */ 5897 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), 5898 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 5899 }, 5900 { /* MCP51 Ethernet Controller */ 5901 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 5902 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, 5903 }, 5904 { /* MCP51 Ethernet Controller */ 5905 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 5906 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, 5907 }, 5908 { /* MCP55 Ethernet Controller */ 5909 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 5910 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT, 5911 }, 5912 { /* MCP55 Ethernet Controller */ 5913 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 5914 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT, 5915 }, 5916 { /* MCP61 Ethernet Controller */ 5917 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 5918 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5919 }, 5920 { /* MCP61 Ethernet Controller */ 5921 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 5922 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5923 }, 5924 { /* MCP61 Ethernet Controller */ 5925 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 5926 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5927 }, 5928 { /* MCP61 Ethernet Controller */ 5929 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 5930 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5931 }, 5932 { /* MCP65 Ethernet Controller */ 5933 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 5934 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 5935 }, 5936 { /* MCP65 Ethernet Controller */ 5937 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 5938 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 5939 }, 5940 { /* MCP65 Ethernet Controller */ 5941 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 5942 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 5943 }, 5944 { /* MCP65 Ethernet Controller */ 5945 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 5946 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 5947 }, 5948 { /* MCP67 Ethernet Controller */ 5949 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 5950 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 5951 }, 5952 { /* MCP67 Ethernet Controller */ 5953 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 5954 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 5955 }, 5956 { /* MCP67 Ethernet Controller */ 5957 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 5958 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 5959 }, 5960 { /* MCP67 Ethernet Controller */ 5961 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 5962 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 5963 }, 5964 { /* MCP73 Ethernet Controller */ 5965 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28), 5966 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 5967 }, 5968 { /* MCP73 Ethernet Controller */ 5969 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29), 5970 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 5971 }, 5972 { /* MCP73 Ethernet Controller */ 5973 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30), 5974 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 5975 }, 5976 { /* MCP73 Ethernet Controller */ 5977 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), 5978 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 5979 }, 5980 { /* MCP77 Ethernet Controller */ 5981 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 5982 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 5983 }, 5984 { /* MCP77 Ethernet Controller */ 5985 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 5986 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 5987 }, 5988 { /* MCP77 Ethernet Controller */ 5989 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 5990 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 5991 }, 5992 { /* MCP77 Ethernet Controller */ 5993 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 5994 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 5995 }, 5996 { /* MCP79 Ethernet Controller */ 5997 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 5998 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 5999 }, 6000 { /* MCP79 Ethernet Controller */ 6001 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 6002 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6003 }, 6004 { /* MCP79 Ethernet Controller */ 6005 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 6006 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6007 }, 6008 { /* MCP79 Ethernet Controller */ 6009 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 6010 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6011 }, 6012 {0,}, 6013}; 6014 6015static struct pci_driver driver = { 6016 .name = DRV_NAME, 6017 .id_table = pci_tbl, 6018 .probe = nv_probe, 6019 .remove = __devexit_p(nv_remove), 6020 .suspend = nv_suspend, 6021 .resume = nv_resume, 6022}; 6023 6024static int __init init_nic(void) 6025{ 6026 return pci_register_driver(&driver); 6027} 6028 6029static void __exit exit_nic(void) 6030{ 6031 pci_unregister_driver(&driver); 6032} 6033 6034module_param(max_interrupt_work, int, 0); 6035MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 6036module_param(optimization_mode, int, 0); 6037MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); 6038module_param(poll_interval, int, 0); 6039MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 6040module_param(msi, int, 0); 6041MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); 6042module_param(msix, int, 0); 6043MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); 6044module_param(dma_64bit, int, 0); 6045MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 6046module_param(phy_cross, int, 0); 6047MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); 6048 6049MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 6050MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 6051MODULE_LICENSE("GPL"); 6052 6053MODULE_DEVICE_TABLE(pci, pci_tbl); 6054 6055module_init(init_nic); 6056module_exit(exit_nic);