at v2.6.27-rc2 6161 lines 192 kB view raw
1/* 2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. 3 * 4 * Note: This driver is a cleanroom reimplementation based on reverse 5 * engineered documentation written by Carl-Daniel Hailfinger 6 * and Andrew de Quincey. 7 * 8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered 9 * trademarks of NVIDIA Corporation in the United States and other 10 * countries. 11 * 12 * Copyright (C) 2003,4,5 Manfred Spraul 13 * Copyright (C) 2004 Andrew de Quincey (wol support) 14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 15 * IRQ rate fixes, bigendian fixes, cleanups, verification) 16 * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation 17 * 18 * This program is free software; you can redistribute it and/or modify 19 * it under the terms of the GNU General Public License as published by 20 * the Free Software Foundation; either version 2 of the License, or 21 * (at your option) any later version. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, write to the Free Software 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 31 * 32 * Known bugs: 33 * We suspect that on some hardware no TX done interrupts are generated. 34 * This means recovery from netif_stop_queue only happens if the hw timer 35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) 36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance. 37 * If your hardware reliably generates tx done interrupts, then you can remove 38 * DEV_NEED_TIMERIRQ from the driver_data flags. 39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 40 * superfluous timer interrupts from the nic. 41 */ 42#define FORCEDETH_VERSION "0.61" 43#define DRV_NAME "forcedeth" 44 45#include <linux/module.h> 46#include <linux/types.h> 47#include <linux/pci.h> 48#include <linux/interrupt.h> 49#include <linux/netdevice.h> 50#include <linux/etherdevice.h> 51#include <linux/delay.h> 52#include <linux/spinlock.h> 53#include <linux/ethtool.h> 54#include <linux/timer.h> 55#include <linux/skbuff.h> 56#include <linux/mii.h> 57#include <linux/random.h> 58#include <linux/init.h> 59#include <linux/if_vlan.h> 60#include <linux/dma-mapping.h> 61 62#include <asm/irq.h> 63#include <asm/io.h> 64#include <asm/uaccess.h> 65#include <asm/system.h> 66 67#if 0 68#define dprintk printk 69#else 70#define dprintk(x...) do { } while (0) 71#endif 72 73#define TX_WORK_PER_LOOP 64 74#define RX_WORK_PER_LOOP 64 75 76/* 77 * Hardware access: 78 */ 79 80#define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */ 81#define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */ 82#define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */ 83#define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */ 84#define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */ 85#define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */ 86#define DEV_HAS_MSI 0x00040 /* device supports MSI */ 87#define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */ 88#define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */ 89#define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */ 90#define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */ 91#define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */ 92#define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */ 93#define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */ 94#define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */ 95#define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */ 96#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */ 97#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */ 98#define DEV_NEED_TX_LIMIT 0x40000 /* device needs to limit tx */ 99#define DEV_HAS_GEAR_MODE 0x80000 /* device supports gear mode */ 100 101enum { 102 NvRegIrqStatus = 0x000, 103#define NVREG_IRQSTAT_MIIEVENT 0x040 104#define NVREG_IRQSTAT_MASK 0x81ff 105 NvRegIrqMask = 0x004, 106#define NVREG_IRQ_RX_ERROR 0x0001 107#define NVREG_IRQ_RX 0x0002 108#define NVREG_IRQ_RX_NOBUF 0x0004 109#define NVREG_IRQ_TX_ERR 0x0008 110#define NVREG_IRQ_TX_OK 0x0010 111#define NVREG_IRQ_TIMER 0x0020 112#define NVREG_IRQ_LINK 0x0040 113#define NVREG_IRQ_RX_FORCED 0x0080 114#define NVREG_IRQ_TX_FORCED 0x0100 115#define NVREG_IRQ_RECOVER_ERROR 0x8000 116#define NVREG_IRQMASK_THROUGHPUT 0x00df 117#define NVREG_IRQMASK_CPU 0x0060 118#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) 119#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) 120#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) 121 122#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ 123 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ 124 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR)) 125 126 NvRegUnknownSetupReg6 = 0x008, 127#define NVREG_UNKSETUP6_VAL 3 128 129/* 130 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic 131 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 132 */ 133 NvRegPollingInterval = 0x00c, 134#define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */ 135#define NVREG_POLL_DEFAULT_CPU 13 136 NvRegMSIMap0 = 0x020, 137 NvRegMSIMap1 = 0x024, 138 NvRegMSIIrqMask = 0x030, 139#define NVREG_MSI_VECTOR_0_ENABLED 0x01 140 NvRegMisc1 = 0x080, 141#define NVREG_MISC1_PAUSE_TX 0x01 142#define NVREG_MISC1_HD 0x02 143#define NVREG_MISC1_FORCE 0x3b0f3c 144 145 NvRegMacReset = 0x34, 146#define NVREG_MAC_RESET_ASSERT 0x0F3 147 NvRegTransmitterControl = 0x084, 148#define NVREG_XMITCTL_START 0x01 149#define NVREG_XMITCTL_MGMT_ST 0x40000000 150#define NVREG_XMITCTL_SYNC_MASK 0x000f0000 151#define NVREG_XMITCTL_SYNC_NOT_READY 0x0 152#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 153#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 154#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 155#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 156#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 157#define NVREG_XMITCTL_HOST_LOADED 0x00004000 158#define NVREG_XMITCTL_TX_PATH_EN 0x01000000 159 NvRegTransmitterStatus = 0x088, 160#define NVREG_XMITSTAT_BUSY 0x01 161 162 NvRegPacketFilterFlags = 0x8c, 163#define NVREG_PFF_PAUSE_RX 0x08 164#define NVREG_PFF_ALWAYS 0x7F0000 165#define NVREG_PFF_PROMISC 0x80 166#define NVREG_PFF_MYADDR 0x20 167#define NVREG_PFF_LOOPBACK 0x10 168 169 NvRegOffloadConfig = 0x90, 170#define NVREG_OFFLOAD_HOMEPHY 0x601 171#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 172 NvRegReceiverControl = 0x094, 173#define NVREG_RCVCTL_START 0x01 174#define NVREG_RCVCTL_RX_PATH_EN 0x01000000 175 NvRegReceiverStatus = 0x98, 176#define NVREG_RCVSTAT_BUSY 0x01 177 178 NvRegSlotTime = 0x9c, 179#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 180#define NVREG_SLOTTIME_10_100_FULL 0x00007f00 181#define NVREG_SLOTTIME_1000_FULL 0x0003ff00 182#define NVREG_SLOTTIME_HALF 0x0000ff00 183#define NVREG_SLOTTIME_DEFAULT 0x00007f00 184#define NVREG_SLOTTIME_MASK 0x000000ff 185 186 NvRegTxDeferral = 0xA0, 187#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 188#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 189#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 190#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f 191#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f 192#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000 193 NvRegRxDeferral = 0xA4, 194#define NVREG_RX_DEFERRAL_DEFAULT 0x16 195 NvRegMacAddrA = 0xA8, 196 NvRegMacAddrB = 0xAC, 197 NvRegMulticastAddrA = 0xB0, 198#define NVREG_MCASTADDRA_FORCE 0x01 199 NvRegMulticastAddrB = 0xB4, 200 NvRegMulticastMaskA = 0xB8, 201#define NVREG_MCASTMASKA_NONE 0xffffffff 202 NvRegMulticastMaskB = 0xBC, 203#define NVREG_MCASTMASKB_NONE 0xffff 204 205 NvRegPhyInterface = 0xC0, 206#define PHY_RGMII 0x10000000 207 NvRegBackOffControl = 0xC4, 208#define NVREG_BKOFFCTRL_DEFAULT 0x70000000 209#define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff 210#define NVREG_BKOFFCTRL_SELECT 24 211#define NVREG_BKOFFCTRL_GEAR 12 212 213 NvRegTxRingPhysAddr = 0x100, 214 NvRegRxRingPhysAddr = 0x104, 215 NvRegRingSizes = 0x108, 216#define NVREG_RINGSZ_TXSHIFT 0 217#define NVREG_RINGSZ_RXSHIFT 16 218 NvRegTransmitPoll = 0x10c, 219#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 220 NvRegLinkSpeed = 0x110, 221#define NVREG_LINKSPEED_FORCE 0x10000 222#define NVREG_LINKSPEED_10 1000 223#define NVREG_LINKSPEED_100 100 224#define NVREG_LINKSPEED_1000 50 225#define NVREG_LINKSPEED_MASK (0xFFF) 226 NvRegUnknownSetupReg5 = 0x130, 227#define NVREG_UNKSETUP5_BIT31 (1<<31) 228 NvRegTxWatermark = 0x13c, 229#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 230#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 231#define NVREG_TX_WM_DESC2_3_1000 0xfe08000 232 NvRegTxRxControl = 0x144, 233#define NVREG_TXRXCTL_KICK 0x0001 234#define NVREG_TXRXCTL_BIT1 0x0002 235#define NVREG_TXRXCTL_BIT2 0x0004 236#define NVREG_TXRXCTL_IDLE 0x0008 237#define NVREG_TXRXCTL_RESET 0x0010 238#define NVREG_TXRXCTL_RXCHECK 0x0400 239#define NVREG_TXRXCTL_DESC_1 0 240#define NVREG_TXRXCTL_DESC_2 0x002100 241#define NVREG_TXRXCTL_DESC_3 0xc02200 242#define NVREG_TXRXCTL_VLANSTRIP 0x00040 243#define NVREG_TXRXCTL_VLANINS 0x00080 244 NvRegTxRingPhysAddrHigh = 0x148, 245 NvRegRxRingPhysAddrHigh = 0x14C, 246 NvRegTxPauseFrame = 0x170, 247#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080 248#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 249#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 250#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 251 NvRegMIIStatus = 0x180, 252#define NVREG_MIISTAT_ERROR 0x0001 253#define NVREG_MIISTAT_LINKCHANGE 0x0008 254#define NVREG_MIISTAT_MASK_RW 0x0007 255#define NVREG_MIISTAT_MASK_ALL 0x000f 256 NvRegMIIMask = 0x184, 257#define NVREG_MII_LINKCHANGE 0x0008 258 259 NvRegAdapterControl = 0x188, 260#define NVREG_ADAPTCTL_START 0x02 261#define NVREG_ADAPTCTL_LINKUP 0x04 262#define NVREG_ADAPTCTL_PHYVALID 0x40000 263#define NVREG_ADAPTCTL_RUNNING 0x100000 264#define NVREG_ADAPTCTL_PHYSHIFT 24 265 NvRegMIISpeed = 0x18c, 266#define NVREG_MIISPEED_BIT8 (1<<8) 267#define NVREG_MIIDELAY 5 268 NvRegMIIControl = 0x190, 269#define NVREG_MIICTL_INUSE 0x08000 270#define NVREG_MIICTL_WRITE 0x00400 271#define NVREG_MIICTL_ADDRSHIFT 5 272 NvRegMIIData = 0x194, 273 NvRegWakeUpFlags = 0x200, 274#define NVREG_WAKEUPFLAGS_VAL 0x7770 275#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 276#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 277#define NVREG_WAKEUPFLAGS_D3SHIFT 12 278#define NVREG_WAKEUPFLAGS_D2SHIFT 8 279#define NVREG_WAKEUPFLAGS_D1SHIFT 4 280#define NVREG_WAKEUPFLAGS_D0SHIFT 0 281#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 282#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 283#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 284#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 285 286 NvRegPatternCRC = 0x204, 287 NvRegPatternMask = 0x208, 288 NvRegPowerCap = 0x268, 289#define NVREG_POWERCAP_D3SUPP (1<<30) 290#define NVREG_POWERCAP_D2SUPP (1<<26) 291#define NVREG_POWERCAP_D1SUPP (1<<25) 292 NvRegPowerState = 0x26c, 293#define NVREG_POWERSTATE_POWEREDUP 0x8000 294#define NVREG_POWERSTATE_VALID 0x0100 295#define NVREG_POWERSTATE_MASK 0x0003 296#define NVREG_POWERSTATE_D0 0x0000 297#define NVREG_POWERSTATE_D1 0x0001 298#define NVREG_POWERSTATE_D2 0x0002 299#define NVREG_POWERSTATE_D3 0x0003 300 NvRegTxCnt = 0x280, 301 NvRegTxZeroReXmt = 0x284, 302 NvRegTxOneReXmt = 0x288, 303 NvRegTxManyReXmt = 0x28c, 304 NvRegTxLateCol = 0x290, 305 NvRegTxUnderflow = 0x294, 306 NvRegTxLossCarrier = 0x298, 307 NvRegTxExcessDef = 0x29c, 308 NvRegTxRetryErr = 0x2a0, 309 NvRegRxFrameErr = 0x2a4, 310 NvRegRxExtraByte = 0x2a8, 311 NvRegRxLateCol = 0x2ac, 312 NvRegRxRunt = 0x2b0, 313 NvRegRxFrameTooLong = 0x2b4, 314 NvRegRxOverflow = 0x2b8, 315 NvRegRxFCSErr = 0x2bc, 316 NvRegRxFrameAlignErr = 0x2c0, 317 NvRegRxLenErr = 0x2c4, 318 NvRegRxUnicast = 0x2c8, 319 NvRegRxMulticast = 0x2cc, 320 NvRegRxBroadcast = 0x2d0, 321 NvRegTxDef = 0x2d4, 322 NvRegTxFrame = 0x2d8, 323 NvRegRxCnt = 0x2dc, 324 NvRegTxPause = 0x2e0, 325 NvRegRxPause = 0x2e4, 326 NvRegRxDropFrame = 0x2e8, 327 NvRegVlanControl = 0x300, 328#define NVREG_VLANCONTROL_ENABLE 0x2000 329 NvRegMSIXMap0 = 0x3e0, 330 NvRegMSIXMap1 = 0x3e4, 331 NvRegMSIXIrqStatus = 0x3f0, 332 333 NvRegPowerState2 = 0x600, 334#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11 335#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 336#define NVREG_POWERSTATE2_PHY_RESET 0x0004 337}; 338 339/* Big endian: should work, but is untested */ 340struct ring_desc { 341 __le32 buf; 342 __le32 flaglen; 343}; 344 345struct ring_desc_ex { 346 __le32 bufhigh; 347 __le32 buflow; 348 __le32 txvlan; 349 __le32 flaglen; 350}; 351 352union ring_type { 353 struct ring_desc* orig; 354 struct ring_desc_ex* ex; 355}; 356 357#define FLAG_MASK_V1 0xffff0000 358#define FLAG_MASK_V2 0xffffc000 359#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 360#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) 361 362#define NV_TX_LASTPACKET (1<<16) 363#define NV_TX_RETRYERROR (1<<19) 364#define NV_TX_RETRYCOUNT_MASK (0xF<<20) 365#define NV_TX_FORCED_INTERRUPT (1<<24) 366#define NV_TX_DEFERRED (1<<26) 367#define NV_TX_CARRIERLOST (1<<27) 368#define NV_TX_LATECOLLISION (1<<28) 369#define NV_TX_UNDERFLOW (1<<29) 370#define NV_TX_ERROR (1<<30) 371#define NV_TX_VALID (1<<31) 372 373#define NV_TX2_LASTPACKET (1<<29) 374#define NV_TX2_RETRYERROR (1<<18) 375#define NV_TX2_RETRYCOUNT_MASK (0xF<<19) 376#define NV_TX2_FORCED_INTERRUPT (1<<30) 377#define NV_TX2_DEFERRED (1<<25) 378#define NV_TX2_CARRIERLOST (1<<26) 379#define NV_TX2_LATECOLLISION (1<<27) 380#define NV_TX2_UNDERFLOW (1<<28) 381/* error and valid are the same for both */ 382#define NV_TX2_ERROR (1<<30) 383#define NV_TX2_VALID (1<<31) 384#define NV_TX2_TSO (1<<28) 385#define NV_TX2_TSO_SHIFT 14 386#define NV_TX2_TSO_MAX_SHIFT 14 387#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) 388#define NV_TX2_CHECKSUM_L3 (1<<27) 389#define NV_TX2_CHECKSUM_L4 (1<<26) 390 391#define NV_TX3_VLAN_TAG_PRESENT (1<<18) 392 393#define NV_RX_DESCRIPTORVALID (1<<16) 394#define NV_RX_MISSEDFRAME (1<<17) 395#define NV_RX_SUBSTRACT1 (1<<18) 396#define NV_RX_ERROR1 (1<<23) 397#define NV_RX_ERROR2 (1<<24) 398#define NV_RX_ERROR3 (1<<25) 399#define NV_RX_ERROR4 (1<<26) 400#define NV_RX_CRCERR (1<<27) 401#define NV_RX_OVERFLOW (1<<28) 402#define NV_RX_FRAMINGERR (1<<29) 403#define NV_RX_ERROR (1<<30) 404#define NV_RX_AVAIL (1<<31) 405 406#define NV_RX2_CHECKSUMMASK (0x1C000000) 407#define NV_RX2_CHECKSUM_IP (0x10000000) 408#define NV_RX2_CHECKSUM_IP_TCP (0x14000000) 409#define NV_RX2_CHECKSUM_IP_UDP (0x18000000) 410#define NV_RX2_DESCRIPTORVALID (1<<29) 411#define NV_RX2_SUBSTRACT1 (1<<25) 412#define NV_RX2_ERROR1 (1<<18) 413#define NV_RX2_ERROR2 (1<<19) 414#define NV_RX2_ERROR3 (1<<20) 415#define NV_RX2_ERROR4 (1<<21) 416#define NV_RX2_CRCERR (1<<22) 417#define NV_RX2_OVERFLOW (1<<23) 418#define NV_RX2_FRAMINGERR (1<<24) 419/* error and avail are the same for both */ 420#define NV_RX2_ERROR (1<<30) 421#define NV_RX2_AVAIL (1<<31) 422 423#define NV_RX3_VLAN_TAG_PRESENT (1<<16) 424#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 425 426/* Miscelaneous hardware related defines: */ 427#define NV_PCI_REGSZ_VER1 0x270 428#define NV_PCI_REGSZ_VER2 0x2d4 429#define NV_PCI_REGSZ_VER3 0x604 430#define NV_PCI_REGSZ_MAX 0x604 431 432/* various timeout delays: all in usec */ 433#define NV_TXRX_RESET_DELAY 4 434#define NV_TXSTOP_DELAY1 10 435#define NV_TXSTOP_DELAY1MAX 500000 436#define NV_TXSTOP_DELAY2 100 437#define NV_RXSTOP_DELAY1 10 438#define NV_RXSTOP_DELAY1MAX 500000 439#define NV_RXSTOP_DELAY2 100 440#define NV_SETUP5_DELAY 5 441#define NV_SETUP5_DELAYMAX 50000 442#define NV_POWERUP_DELAY 5 443#define NV_POWERUP_DELAYMAX 5000 444#define NV_MIIBUSY_DELAY 50 445#define NV_MIIPHY_DELAY 10 446#define NV_MIIPHY_DELAYMAX 10000 447#define NV_MAC_RESET_DELAY 64 448 449#define NV_WAKEUPPATTERNS 5 450#define NV_WAKEUPMASKENTRIES 4 451 452/* General driver defaults */ 453#define NV_WATCHDOG_TIMEO (5*HZ) 454 455#define RX_RING_DEFAULT 128 456#define TX_RING_DEFAULT 256 457#define RX_RING_MIN 128 458#define TX_RING_MIN 64 459#define RING_MAX_DESC_VER_1 1024 460#define RING_MAX_DESC_VER_2_3 16384 461 462/* rx/tx mac addr + type + vlan + align + slack*/ 463#define NV_RX_HEADERS (64) 464/* even more slack. */ 465#define NV_RX_ALLOC_PAD (64) 466 467/* maximum mtu size */ 468#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ 469#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ 470 471#define OOM_REFILL (1+HZ/20) 472#define POLL_WAIT (1+HZ/100) 473#define LINK_TIMEOUT (3*HZ) 474#define STATS_INTERVAL (10*HZ) 475 476/* 477 * desc_ver values: 478 * The nic supports three different descriptor types: 479 * - DESC_VER_1: Original 480 * - DESC_VER_2: support for jumbo frames. 481 * - DESC_VER_3: 64-bit format. 482 */ 483#define DESC_VER_1 1 484#define DESC_VER_2 2 485#define DESC_VER_3 3 486 487/* PHY defines */ 488#define PHY_OUI_MARVELL 0x5043 489#define PHY_OUI_CICADA 0x03f1 490#define PHY_OUI_VITESSE 0x01c1 491#define PHY_OUI_REALTEK 0x0732 492#define PHY_OUI_REALTEK2 0x0020 493#define PHYID1_OUI_MASK 0x03ff 494#define PHYID1_OUI_SHFT 6 495#define PHYID2_OUI_MASK 0xfc00 496#define PHYID2_OUI_SHFT 10 497#define PHYID2_MODEL_MASK 0x03f0 498#define PHY_MODEL_REALTEK_8211 0x0110 499#define PHY_REV_MASK 0x0001 500#define PHY_REV_REALTEK_8211B 0x0000 501#define PHY_REV_REALTEK_8211C 0x0001 502#define PHY_MODEL_REALTEK_8201 0x0200 503#define PHY_MODEL_MARVELL_E3016 0x0220 504#define PHY_MARVELL_E3016_INITMASK 0x0300 505#define PHY_CICADA_INIT1 0x0f000 506#define PHY_CICADA_INIT2 0x0e00 507#define PHY_CICADA_INIT3 0x01000 508#define PHY_CICADA_INIT4 0x0200 509#define PHY_CICADA_INIT5 0x0004 510#define PHY_CICADA_INIT6 0x02000 511#define PHY_VITESSE_INIT_REG1 0x1f 512#define PHY_VITESSE_INIT_REG2 0x10 513#define PHY_VITESSE_INIT_REG3 0x11 514#define PHY_VITESSE_INIT_REG4 0x12 515#define PHY_VITESSE_INIT_MSK1 0xc 516#define PHY_VITESSE_INIT_MSK2 0x0180 517#define PHY_VITESSE_INIT1 0x52b5 518#define PHY_VITESSE_INIT2 0xaf8a 519#define PHY_VITESSE_INIT3 0x8 520#define PHY_VITESSE_INIT4 0x8f8a 521#define PHY_VITESSE_INIT5 0xaf86 522#define PHY_VITESSE_INIT6 0x8f86 523#define PHY_VITESSE_INIT7 0xaf82 524#define PHY_VITESSE_INIT8 0x0100 525#define PHY_VITESSE_INIT9 0x8f82 526#define PHY_VITESSE_INIT10 0x0 527#define PHY_REALTEK_INIT_REG1 0x1f 528#define PHY_REALTEK_INIT_REG2 0x19 529#define PHY_REALTEK_INIT_REG3 0x13 530#define PHY_REALTEK_INIT_REG4 0x14 531#define PHY_REALTEK_INIT_REG5 0x18 532#define PHY_REALTEK_INIT_REG6 0x11 533#define PHY_REALTEK_INIT_REG7 0x01 534#define PHY_REALTEK_INIT1 0x0000 535#define PHY_REALTEK_INIT2 0x8e00 536#define PHY_REALTEK_INIT3 0x0001 537#define PHY_REALTEK_INIT4 0xad17 538#define PHY_REALTEK_INIT5 0xfb54 539#define PHY_REALTEK_INIT6 0xf5c7 540#define PHY_REALTEK_INIT7 0x1000 541#define PHY_REALTEK_INIT8 0x0003 542#define PHY_REALTEK_INIT9 0x0008 543#define PHY_REALTEK_INIT10 0x0005 544#define PHY_REALTEK_INIT11 0x0200 545#define PHY_REALTEK_INIT_MSK1 0x0003 546 547#define PHY_GIGABIT 0x0100 548 549#define PHY_TIMEOUT 0x1 550#define PHY_ERROR 0x2 551 552#define PHY_100 0x1 553#define PHY_1000 0x2 554#define PHY_HALF 0x100 555 556#define NV_PAUSEFRAME_RX_CAPABLE 0x0001 557#define NV_PAUSEFRAME_TX_CAPABLE 0x0002 558#define NV_PAUSEFRAME_RX_ENABLE 0x0004 559#define NV_PAUSEFRAME_TX_ENABLE 0x0008 560#define NV_PAUSEFRAME_RX_REQ 0x0010 561#define NV_PAUSEFRAME_TX_REQ 0x0020 562#define NV_PAUSEFRAME_AUTONEG 0x0040 563 564/* MSI/MSI-X defines */ 565#define NV_MSI_X_MAX_VECTORS 8 566#define NV_MSI_X_VECTORS_MASK 0x000f 567#define NV_MSI_CAPABLE 0x0010 568#define NV_MSI_X_CAPABLE 0x0020 569#define NV_MSI_ENABLED 0x0040 570#define NV_MSI_X_ENABLED 0x0080 571 572#define NV_MSI_X_VECTOR_ALL 0x0 573#define NV_MSI_X_VECTOR_RX 0x0 574#define NV_MSI_X_VECTOR_TX 0x1 575#define NV_MSI_X_VECTOR_OTHER 0x2 576 577#define NV_RESTART_TX 0x1 578#define NV_RESTART_RX 0x2 579 580#define NV_TX_LIMIT_COUNT 16 581 582/* statistics */ 583struct nv_ethtool_str { 584 char name[ETH_GSTRING_LEN]; 585}; 586 587static const struct nv_ethtool_str nv_estats_str[] = { 588 { "tx_bytes" }, 589 { "tx_zero_rexmt" }, 590 { "tx_one_rexmt" }, 591 { "tx_many_rexmt" }, 592 { "tx_late_collision" }, 593 { "tx_fifo_errors" }, 594 { "tx_carrier_errors" }, 595 { "tx_excess_deferral" }, 596 { "tx_retry_error" }, 597 { "rx_frame_error" }, 598 { "rx_extra_byte" }, 599 { "rx_late_collision" }, 600 { "rx_runt" }, 601 { "rx_frame_too_long" }, 602 { "rx_over_errors" }, 603 { "rx_crc_errors" }, 604 { "rx_frame_align_error" }, 605 { "rx_length_error" }, 606 { "rx_unicast" }, 607 { "rx_multicast" }, 608 { "rx_broadcast" }, 609 { "rx_packets" }, 610 { "rx_errors_total" }, 611 { "tx_errors_total" }, 612 613 /* version 2 stats */ 614 { "tx_deferral" }, 615 { "tx_packets" }, 616 { "rx_bytes" }, 617 { "tx_pause" }, 618 { "rx_pause" }, 619 { "rx_drop_frame" } 620}; 621 622struct nv_ethtool_stats { 623 u64 tx_bytes; 624 u64 tx_zero_rexmt; 625 u64 tx_one_rexmt; 626 u64 tx_many_rexmt; 627 u64 tx_late_collision; 628 u64 tx_fifo_errors; 629 u64 tx_carrier_errors; 630 u64 tx_excess_deferral; 631 u64 tx_retry_error; 632 u64 rx_frame_error; 633 u64 rx_extra_byte; 634 u64 rx_late_collision; 635 u64 rx_runt; 636 u64 rx_frame_too_long; 637 u64 rx_over_errors; 638 u64 rx_crc_errors; 639 u64 rx_frame_align_error; 640 u64 rx_length_error; 641 u64 rx_unicast; 642 u64 rx_multicast; 643 u64 rx_broadcast; 644 u64 rx_packets; 645 u64 rx_errors_total; 646 u64 tx_errors_total; 647 648 /* version 2 stats */ 649 u64 tx_deferral; 650 u64 tx_packets; 651 u64 rx_bytes; 652 u64 tx_pause; 653 u64 rx_pause; 654 u64 rx_drop_frame; 655}; 656 657#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 658#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 659 660/* diagnostics */ 661#define NV_TEST_COUNT_BASE 3 662#define NV_TEST_COUNT_EXTENDED 4 663 664static const struct nv_ethtool_str nv_etests_str[] = { 665 { "link (online/offline)" }, 666 { "register (offline) " }, 667 { "interrupt (offline) " }, 668 { "loopback (offline) " } 669}; 670 671struct register_test { 672 __u32 reg; 673 __u32 mask; 674}; 675 676static const struct register_test nv_registers_test[] = { 677 { NvRegUnknownSetupReg6, 0x01 }, 678 { NvRegMisc1, 0x03c }, 679 { NvRegOffloadConfig, 0x03ff }, 680 { NvRegMulticastAddrA, 0xffffffff }, 681 { NvRegTxWatermark, 0x0ff }, 682 { NvRegWakeUpFlags, 0x07777 }, 683 { 0,0 } 684}; 685 686struct nv_skb_map { 687 struct sk_buff *skb; 688 dma_addr_t dma; 689 unsigned int dma_len; 690 struct ring_desc_ex *first_tx_desc; 691 struct nv_skb_map *next_tx_ctx; 692}; 693 694/* 695 * SMP locking: 696 * All hardware access under dev->priv->lock, except the performance 697 * critical parts: 698 * - rx is (pseudo-) lockless: it relies on the single-threading provided 699 * by the arch code for interrupts. 700 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 701 * needs dev->priv->lock :-( 702 * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 703 */ 704 705/* in dev: base, irq */ 706struct fe_priv { 707 spinlock_t lock; 708 709 struct net_device *dev; 710 struct napi_struct napi; 711 712 /* General data: 713 * Locking: spin_lock(&np->lock); */ 714 struct nv_ethtool_stats estats; 715 int in_shutdown; 716 u32 linkspeed; 717 int duplex; 718 int autoneg; 719 int fixed_mode; 720 int phyaddr; 721 int wolenabled; 722 unsigned int phy_oui; 723 unsigned int phy_model; 724 unsigned int phy_rev; 725 u16 gigabit; 726 int intr_test; 727 int recover_error; 728 729 /* General data: RO fields */ 730 dma_addr_t ring_addr; 731 struct pci_dev *pci_dev; 732 u32 orig_mac[2]; 733 u32 irqmask; 734 u32 desc_ver; 735 u32 txrxctl_bits; 736 u32 vlanctl_bits; 737 u32 driver_data; 738 u32 device_id; 739 u32 register_size; 740 int rx_csum; 741 u32 mac_in_use; 742 743 void __iomem *base; 744 745 /* rx specific fields. 746 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 747 */ 748 union ring_type get_rx, put_rx, first_rx, last_rx; 749 struct nv_skb_map *get_rx_ctx, *put_rx_ctx; 750 struct nv_skb_map *first_rx_ctx, *last_rx_ctx; 751 struct nv_skb_map *rx_skb; 752 753 union ring_type rx_ring; 754 unsigned int rx_buf_sz; 755 unsigned int pkt_limit; 756 struct timer_list oom_kick; 757 struct timer_list nic_poll; 758 struct timer_list stats_poll; 759 u32 nic_poll_irq; 760 int rx_ring_size; 761 762 /* media detection workaround. 763 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 764 */ 765 int need_linktimer; 766 unsigned long link_timeout; 767 /* 768 * tx specific fields. 769 */ 770 union ring_type get_tx, put_tx, first_tx, last_tx; 771 struct nv_skb_map *get_tx_ctx, *put_tx_ctx; 772 struct nv_skb_map *first_tx_ctx, *last_tx_ctx; 773 struct nv_skb_map *tx_skb; 774 775 union ring_type tx_ring; 776 u32 tx_flags; 777 int tx_ring_size; 778 int tx_limit; 779 u32 tx_pkts_in_progress; 780 struct nv_skb_map *tx_change_owner; 781 struct nv_skb_map *tx_end_flip; 782 int tx_stop; 783 784 /* vlan fields */ 785 struct vlan_group *vlangrp; 786 787 /* msi/msi-x fields */ 788 u32 msi_flags; 789 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; 790 791 /* flow control */ 792 u32 pause_flags; 793 794 /* power saved state */ 795 u32 saved_config_space[NV_PCI_REGSZ_MAX/4]; 796}; 797 798/* 799 * Maximum number of loops until we assume that a bit in the irq mask 800 * is stuck. Overridable with module param. 801 */ 802static int max_interrupt_work = 5; 803 804/* 805 * Optimization can be either throuput mode or cpu mode 806 * 807 * Throughput Mode: Every tx and rx packet will generate an interrupt. 808 * CPU Mode: Interrupts are controlled by a timer. 809 */ 810enum { 811 NV_OPTIMIZATION_MODE_THROUGHPUT, 812 NV_OPTIMIZATION_MODE_CPU 813}; 814static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; 815 816/* 817 * Poll interval for timer irq 818 * 819 * This interval determines how frequent an interrupt is generated. 820 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] 821 * Min = 0, and Max = 65535 822 */ 823static int poll_interval = -1; 824 825/* 826 * MSI interrupts 827 */ 828enum { 829 NV_MSI_INT_DISABLED, 830 NV_MSI_INT_ENABLED 831}; 832static int msi = NV_MSI_INT_ENABLED; 833 834/* 835 * MSIX interrupts 836 */ 837enum { 838 NV_MSIX_INT_DISABLED, 839 NV_MSIX_INT_ENABLED 840}; 841static int msix = NV_MSIX_INT_DISABLED; 842 843/* 844 * DMA 64bit 845 */ 846enum { 847 NV_DMA_64BIT_DISABLED, 848 NV_DMA_64BIT_ENABLED 849}; 850static int dma_64bit = NV_DMA_64BIT_ENABLED; 851 852/* 853 * Crossover Detection 854 * Realtek 8201 phy + some OEM boards do not work properly. 855 */ 856enum { 857 NV_CROSSOVER_DETECTION_DISABLED, 858 NV_CROSSOVER_DETECTION_ENABLED 859}; 860static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; 861 862static inline struct fe_priv *get_nvpriv(struct net_device *dev) 863{ 864 return netdev_priv(dev); 865} 866 867static inline u8 __iomem *get_hwbase(struct net_device *dev) 868{ 869 return ((struct fe_priv *)netdev_priv(dev))->base; 870} 871 872static inline void pci_push(u8 __iomem *base) 873{ 874 /* force out pending posted writes */ 875 readl(base); 876} 877 878static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 879{ 880 return le32_to_cpu(prd->flaglen) 881 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 882} 883 884static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 885{ 886 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; 887} 888 889static bool nv_optimized(struct fe_priv *np) 890{ 891 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 892 return false; 893 return true; 894} 895 896static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 897 int delay, int delaymax, const char *msg) 898{ 899 u8 __iomem *base = get_hwbase(dev); 900 901 pci_push(base); 902 do { 903 udelay(delay); 904 delaymax -= delay; 905 if (delaymax < 0) { 906 if (msg) 907 printk(msg); 908 return 1; 909 } 910 } while ((readl(base + offset) & mask) != target); 911 return 0; 912} 913 914#define NV_SETUP_RX_RING 0x01 915#define NV_SETUP_TX_RING 0x02 916 917static inline u32 dma_low(dma_addr_t addr) 918{ 919 return addr; 920} 921 922static inline u32 dma_high(dma_addr_t addr) 923{ 924 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */ 925} 926 927static void setup_hw_rings(struct net_device *dev, int rxtx_flags) 928{ 929 struct fe_priv *np = get_nvpriv(dev); 930 u8 __iomem *base = get_hwbase(dev); 931 932 if (!nv_optimized(np)) { 933 if (rxtx_flags & NV_SETUP_RX_RING) { 934 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 935 } 936 if (rxtx_flags & NV_SETUP_TX_RING) { 937 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 938 } 939 } else { 940 if (rxtx_flags & NV_SETUP_RX_RING) { 941 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 942 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); 943 } 944 if (rxtx_flags & NV_SETUP_TX_RING) { 945 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); 946 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh); 947 } 948 } 949} 950 951static void free_rings(struct net_device *dev) 952{ 953 struct fe_priv *np = get_nvpriv(dev); 954 955 if (!nv_optimized(np)) { 956 if (np->rx_ring.orig) 957 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 958 np->rx_ring.orig, np->ring_addr); 959 } else { 960 if (np->rx_ring.ex) 961 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 962 np->rx_ring.ex, np->ring_addr); 963 } 964 if (np->rx_skb) 965 kfree(np->rx_skb); 966 if (np->tx_skb) 967 kfree(np->tx_skb); 968} 969 970static int using_multi_irqs(struct net_device *dev) 971{ 972 struct fe_priv *np = get_nvpriv(dev); 973 974 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 975 ((np->msi_flags & NV_MSI_X_ENABLED) && 976 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) 977 return 0; 978 else 979 return 1; 980} 981 982static void nv_enable_irq(struct net_device *dev) 983{ 984 struct fe_priv *np = get_nvpriv(dev); 985 986 if (!using_multi_irqs(dev)) { 987 if (np->msi_flags & NV_MSI_X_ENABLED) 988 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 989 else 990 enable_irq(np->pci_dev->irq); 991 } else { 992 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 993 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 994 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 995 } 996} 997 998static void nv_disable_irq(struct net_device *dev) 999{ 1000 struct fe_priv *np = get_nvpriv(dev); 1001 1002 if (!using_multi_irqs(dev)) { 1003 if (np->msi_flags & NV_MSI_X_ENABLED) 1004 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1005 else 1006 disable_irq(np->pci_dev->irq); 1007 } else { 1008 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1009 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1010 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1011 } 1012} 1013 1014/* In MSIX mode, a write to irqmask behaves as XOR */ 1015static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) 1016{ 1017 u8 __iomem *base = get_hwbase(dev); 1018 1019 writel(mask, base + NvRegIrqMask); 1020} 1021 1022static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) 1023{ 1024 struct fe_priv *np = get_nvpriv(dev); 1025 u8 __iomem *base = get_hwbase(dev); 1026 1027 if (np->msi_flags & NV_MSI_X_ENABLED) { 1028 writel(mask, base + NvRegIrqMask); 1029 } else { 1030 if (np->msi_flags & NV_MSI_ENABLED) 1031 writel(0, base + NvRegMSIIrqMask); 1032 writel(0, base + NvRegIrqMask); 1033 } 1034} 1035 1036#define MII_READ (-1) 1037/* mii_rw: read/write a register on the PHY. 1038 * 1039 * Caller must guarantee serialization 1040 */ 1041static int mii_rw(struct net_device *dev, int addr, int miireg, int value) 1042{ 1043 u8 __iomem *base = get_hwbase(dev); 1044 u32 reg; 1045 int retval; 1046 1047 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus); 1048 1049 reg = readl(base + NvRegMIIControl); 1050 if (reg & NVREG_MIICTL_INUSE) { 1051 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); 1052 udelay(NV_MIIBUSY_DELAY); 1053 } 1054 1055 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; 1056 if (value != MII_READ) { 1057 writel(value, base + NvRegMIIData); 1058 reg |= NVREG_MIICTL_WRITE; 1059 } 1060 writel(reg, base + NvRegMIIControl); 1061 1062 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1063 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { 1064 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", 1065 dev->name, miireg, addr); 1066 retval = -1; 1067 } else if (value != MII_READ) { 1068 /* it was a write operation - fewer failures are detectable */ 1069 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", 1070 dev->name, value, miireg, addr); 1071 retval = 0; 1072 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1073 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", 1074 dev->name, miireg, addr); 1075 retval = -1; 1076 } else { 1077 retval = readl(base + NvRegMIIData); 1078 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", 1079 dev->name, miireg, addr, retval); 1080 } 1081 1082 return retval; 1083} 1084 1085static int phy_reset(struct net_device *dev, u32 bmcr_setup) 1086{ 1087 struct fe_priv *np = netdev_priv(dev); 1088 u32 miicontrol; 1089 unsigned int tries = 0; 1090 1091 miicontrol = BMCR_RESET | bmcr_setup; 1092 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1093 return -1; 1094 } 1095 1096 /* wait for 500ms */ 1097 msleep(500); 1098 1099 /* must wait till reset is deasserted */ 1100 while (miicontrol & BMCR_RESET) { 1101 msleep(10); 1102 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1103 /* FIXME: 100 tries seem excessive */ 1104 if (tries++ > 100) 1105 return -1; 1106 } 1107 return 0; 1108} 1109 1110static int phy_init(struct net_device *dev) 1111{ 1112 struct fe_priv *np = get_nvpriv(dev); 1113 u8 __iomem *base = get_hwbase(dev); 1114 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1115 1116 /* phy errata for E3016 phy */ 1117 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1118 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1119 reg &= ~PHY_MARVELL_E3016_INITMASK; 1120 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { 1121 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); 1122 return PHY_ERROR; 1123 } 1124 } 1125 if (np->phy_oui == PHY_OUI_REALTEK) { 1126 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1127 np->phy_rev == PHY_REV_REALTEK_8211B) { 1128 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1129 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1130 return PHY_ERROR; 1131 } 1132 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1133 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1134 return PHY_ERROR; 1135 } 1136 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1137 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1138 return PHY_ERROR; 1139 } 1140 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1141 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1142 return PHY_ERROR; 1143 } 1144 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1145 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1146 return PHY_ERROR; 1147 } 1148 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) { 1149 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1150 return PHY_ERROR; 1151 } 1152 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1153 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1154 return PHY_ERROR; 1155 } 1156 } 1157 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1158 np->phy_rev == PHY_REV_REALTEK_8211C) { 1159 u32 powerstate = readl(base + NvRegPowerState2); 1160 1161 /* need to perform hw phy reset */ 1162 powerstate |= NVREG_POWERSTATE2_PHY_RESET; 1163 writel(powerstate, base + NvRegPowerState2); 1164 msleep(25); 1165 1166 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET; 1167 writel(powerstate, base + NvRegPowerState2); 1168 msleep(25); 1169 1170 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1171 reg |= PHY_REALTEK_INIT9; 1172 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) { 1173 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1174 return PHY_ERROR; 1175 } 1176 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) { 1177 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1178 return PHY_ERROR; 1179 } 1180 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ); 1181 if (!(reg & PHY_REALTEK_INIT11)) { 1182 reg |= PHY_REALTEK_INIT11; 1183 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) { 1184 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1185 return PHY_ERROR; 1186 } 1187 } 1188 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1189 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1190 return PHY_ERROR; 1191 } 1192 } 1193 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1194 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 || 1195 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 || 1196 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 || 1197 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 || 1198 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 || 1199 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 || 1200 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 || 1201 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) { 1202 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1203 phy_reserved |= PHY_REALTEK_INIT7; 1204 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) { 1205 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1206 return PHY_ERROR; 1207 } 1208 } 1209 } 1210 } 1211 1212 /* set advertise register */ 1213 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1214 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1215 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1216 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); 1217 return PHY_ERROR; 1218 } 1219 1220 /* get phy interface type */ 1221 phyinterface = readl(base + NvRegPhyInterface); 1222 1223 /* see if gigabit phy */ 1224 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1225 if (mii_status & PHY_GIGABIT) { 1226 np->gigabit = PHY_GIGABIT; 1227 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 1228 mii_control_1000 &= ~ADVERTISE_1000HALF; 1229 if (phyinterface & PHY_RGMII) 1230 mii_control_1000 |= ADVERTISE_1000FULL; 1231 else 1232 mii_control_1000 &= ~ADVERTISE_1000FULL; 1233 1234 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1235 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1236 return PHY_ERROR; 1237 } 1238 } 1239 else 1240 np->gigabit = 0; 1241 1242 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1243 mii_control |= BMCR_ANENABLE; 1244 1245 if (np->phy_oui == PHY_OUI_REALTEK && 1246 np->phy_model == PHY_MODEL_REALTEK_8211 && 1247 np->phy_rev == PHY_REV_REALTEK_8211C) { 1248 /* start autoneg since we already performed hw reset above */ 1249 mii_control |= BMCR_ANRESTART; 1250 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1251 printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev)); 1252 return PHY_ERROR; 1253 } 1254 } else { 1255 /* reset the phy 1256 * (certain phys need bmcr to be setup with reset) 1257 */ 1258 if (phy_reset(dev, mii_control)) { 1259 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1260 return PHY_ERROR; 1261 } 1262 } 1263 1264 /* phy vendor specific configuration */ 1265 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1266 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1267 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1268 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1269 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { 1270 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1271 return PHY_ERROR; 1272 } 1273 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1274 phy_reserved |= PHY_CICADA_INIT5; 1275 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { 1276 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1277 return PHY_ERROR; 1278 } 1279 } 1280 if (np->phy_oui == PHY_OUI_CICADA) { 1281 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); 1282 phy_reserved |= PHY_CICADA_INIT6; 1283 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { 1284 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1285 return PHY_ERROR; 1286 } 1287 } 1288 if (np->phy_oui == PHY_OUI_VITESSE) { 1289 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) { 1290 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1291 return PHY_ERROR; 1292 } 1293 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) { 1294 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1295 return PHY_ERROR; 1296 } 1297 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1298 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1299 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1300 return PHY_ERROR; 1301 } 1302 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1303 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1304 phy_reserved |= PHY_VITESSE_INIT3; 1305 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1306 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1307 return PHY_ERROR; 1308 } 1309 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) { 1310 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1311 return PHY_ERROR; 1312 } 1313 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) { 1314 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1315 return PHY_ERROR; 1316 } 1317 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1318 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1319 phy_reserved |= PHY_VITESSE_INIT3; 1320 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1321 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1322 return PHY_ERROR; 1323 } 1324 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1325 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1326 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1327 return PHY_ERROR; 1328 } 1329 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { 1330 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1331 return PHY_ERROR; 1332 } 1333 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { 1334 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1335 return PHY_ERROR; 1336 } 1337 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1338 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1339 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1340 return PHY_ERROR; 1341 } 1342 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1343 phy_reserved &= ~PHY_VITESSE_INIT_MSK2; 1344 phy_reserved |= PHY_VITESSE_INIT8; 1345 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1346 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1347 return PHY_ERROR; 1348 } 1349 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) { 1350 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1351 return PHY_ERROR; 1352 } 1353 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) { 1354 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1355 return PHY_ERROR; 1356 } 1357 } 1358 if (np->phy_oui == PHY_OUI_REALTEK) { 1359 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1360 np->phy_rev == PHY_REV_REALTEK_8211B) { 1361 /* reset could have cleared these out, set them back */ 1362 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1363 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1364 return PHY_ERROR; 1365 } 1366 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1367 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1368 return PHY_ERROR; 1369 } 1370 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1371 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1372 return PHY_ERROR; 1373 } 1374 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1375 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1376 return PHY_ERROR; 1377 } 1378 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1379 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1380 return PHY_ERROR; 1381 } 1382 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) { 1383 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1384 return PHY_ERROR; 1385 } 1386 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1387 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1388 return PHY_ERROR; 1389 } 1390 } 1391 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1392 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 || 1393 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 || 1394 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 || 1395 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 || 1396 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 || 1397 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 || 1398 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 || 1399 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) { 1400 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1401 phy_reserved |= PHY_REALTEK_INIT7; 1402 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) { 1403 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1404 return PHY_ERROR; 1405 } 1406 } 1407 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 1408 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1409 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1410 return PHY_ERROR; 1411 } 1412 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); 1413 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 1414 phy_reserved |= PHY_REALTEK_INIT3; 1415 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) { 1416 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1417 return PHY_ERROR; 1418 } 1419 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1420 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1421 return PHY_ERROR; 1422 } 1423 } 1424 } 1425 } 1426 1427 /* some phys clear out pause advertisment on reset, set it back */ 1428 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); 1429 1430 /* restart auto negotiation */ 1431 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1432 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1433 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1434 return PHY_ERROR; 1435 } 1436 1437 return 0; 1438} 1439 1440static void nv_start_rx(struct net_device *dev) 1441{ 1442 struct fe_priv *np = netdev_priv(dev); 1443 u8 __iomem *base = get_hwbase(dev); 1444 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1445 1446 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 1447 /* Already running? Stop it. */ 1448 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1449 rx_ctrl &= ~NVREG_RCVCTL_START; 1450 writel(rx_ctrl, base + NvRegReceiverControl); 1451 pci_push(base); 1452 } 1453 writel(np->linkspeed, base + NvRegLinkSpeed); 1454 pci_push(base); 1455 rx_ctrl |= NVREG_RCVCTL_START; 1456 if (np->mac_in_use) 1457 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1458 writel(rx_ctrl, base + NvRegReceiverControl); 1459 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1460 dev->name, np->duplex, np->linkspeed); 1461 pci_push(base); 1462} 1463 1464static void nv_stop_rx(struct net_device *dev) 1465{ 1466 struct fe_priv *np = netdev_priv(dev); 1467 u8 __iomem *base = get_hwbase(dev); 1468 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1469 1470 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 1471 if (!np->mac_in_use) 1472 rx_ctrl &= ~NVREG_RCVCTL_START; 1473 else 1474 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1475 writel(rx_ctrl, base + NvRegReceiverControl); 1476 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1477 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1478 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1479 1480 udelay(NV_RXSTOP_DELAY2); 1481 if (!np->mac_in_use) 1482 writel(0, base + NvRegLinkSpeed); 1483} 1484 1485static void nv_start_tx(struct net_device *dev) 1486{ 1487 struct fe_priv *np = netdev_priv(dev); 1488 u8 __iomem *base = get_hwbase(dev); 1489 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1490 1491 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 1492 tx_ctrl |= NVREG_XMITCTL_START; 1493 if (np->mac_in_use) 1494 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1495 writel(tx_ctrl, base + NvRegTransmitterControl); 1496 pci_push(base); 1497} 1498 1499static void nv_stop_tx(struct net_device *dev) 1500{ 1501 struct fe_priv *np = netdev_priv(dev); 1502 u8 __iomem *base = get_hwbase(dev); 1503 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1504 1505 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 1506 if (!np->mac_in_use) 1507 tx_ctrl &= ~NVREG_XMITCTL_START; 1508 else 1509 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1510 writel(tx_ctrl, base + NvRegTransmitterControl); 1511 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1512 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1513 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1514 1515 udelay(NV_TXSTOP_DELAY2); 1516 if (!np->mac_in_use) 1517 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, 1518 base + NvRegTransmitPoll); 1519} 1520 1521static void nv_start_rxtx(struct net_device *dev) 1522{ 1523 nv_start_rx(dev); 1524 nv_start_tx(dev); 1525} 1526 1527static void nv_stop_rxtx(struct net_device *dev) 1528{ 1529 nv_stop_rx(dev); 1530 nv_stop_tx(dev); 1531} 1532 1533static void nv_txrx_reset(struct net_device *dev) 1534{ 1535 struct fe_priv *np = netdev_priv(dev); 1536 u8 __iomem *base = get_hwbase(dev); 1537 1538 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); 1539 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1540 pci_push(base); 1541 udelay(NV_TXRX_RESET_DELAY); 1542 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1543 pci_push(base); 1544} 1545 1546static void nv_mac_reset(struct net_device *dev) 1547{ 1548 struct fe_priv *np = netdev_priv(dev); 1549 u8 __iomem *base = get_hwbase(dev); 1550 u32 temp1, temp2, temp3; 1551 1552 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); 1553 1554 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1555 pci_push(base); 1556 1557 /* save registers since they will be cleared on reset */ 1558 temp1 = readl(base + NvRegMacAddrA); 1559 temp2 = readl(base + NvRegMacAddrB); 1560 temp3 = readl(base + NvRegTransmitPoll); 1561 1562 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); 1563 pci_push(base); 1564 udelay(NV_MAC_RESET_DELAY); 1565 writel(0, base + NvRegMacReset); 1566 pci_push(base); 1567 udelay(NV_MAC_RESET_DELAY); 1568 1569 /* restore saved registers */ 1570 writel(temp1, base + NvRegMacAddrA); 1571 writel(temp2, base + NvRegMacAddrB); 1572 writel(temp3, base + NvRegTransmitPoll); 1573 1574 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1575 pci_push(base); 1576} 1577 1578static void nv_get_hw_stats(struct net_device *dev) 1579{ 1580 struct fe_priv *np = netdev_priv(dev); 1581 u8 __iomem *base = get_hwbase(dev); 1582 1583 np->estats.tx_bytes += readl(base + NvRegTxCnt); 1584 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); 1585 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); 1586 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); 1587 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); 1588 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); 1589 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); 1590 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); 1591 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); 1592 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); 1593 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); 1594 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); 1595 np->estats.rx_runt += readl(base + NvRegRxRunt); 1596 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); 1597 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); 1598 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); 1599 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); 1600 np->estats.rx_length_error += readl(base + NvRegRxLenErr); 1601 np->estats.rx_unicast += readl(base + NvRegRxUnicast); 1602 np->estats.rx_multicast += readl(base + NvRegRxMulticast); 1603 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); 1604 np->estats.rx_packets = 1605 np->estats.rx_unicast + 1606 np->estats.rx_multicast + 1607 np->estats.rx_broadcast; 1608 np->estats.rx_errors_total = 1609 np->estats.rx_crc_errors + 1610 np->estats.rx_over_errors + 1611 np->estats.rx_frame_error + 1612 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + 1613 np->estats.rx_late_collision + 1614 np->estats.rx_runt + 1615 np->estats.rx_frame_too_long; 1616 np->estats.tx_errors_total = 1617 np->estats.tx_late_collision + 1618 np->estats.tx_fifo_errors + 1619 np->estats.tx_carrier_errors + 1620 np->estats.tx_excess_deferral + 1621 np->estats.tx_retry_error; 1622 1623 if (np->driver_data & DEV_HAS_STATISTICS_V2) { 1624 np->estats.tx_deferral += readl(base + NvRegTxDef); 1625 np->estats.tx_packets += readl(base + NvRegTxFrame); 1626 np->estats.rx_bytes += readl(base + NvRegRxCnt); 1627 np->estats.tx_pause += readl(base + NvRegTxPause); 1628 np->estats.rx_pause += readl(base + NvRegRxPause); 1629 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1630 } 1631} 1632 1633/* 1634 * nv_get_stats: dev->get_stats function 1635 * Get latest stats value from the nic. 1636 * Called with read_lock(&dev_base_lock) held for read - 1637 * only synchronized against unregister_netdevice. 1638 */ 1639static struct net_device_stats *nv_get_stats(struct net_device *dev) 1640{ 1641 struct fe_priv *np = netdev_priv(dev); 1642 1643 /* If the nic supports hw counters then retrieve latest values */ 1644 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) { 1645 nv_get_hw_stats(dev); 1646 1647 /* copy to net_device stats */ 1648 dev->stats.tx_bytes = np->estats.tx_bytes; 1649 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1650 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1651 dev->stats.rx_crc_errors = np->estats.rx_crc_errors; 1652 dev->stats.rx_over_errors = np->estats.rx_over_errors; 1653 dev->stats.rx_errors = np->estats.rx_errors_total; 1654 dev->stats.tx_errors = np->estats.tx_errors_total; 1655 } 1656 1657 return &dev->stats; 1658} 1659 1660/* 1661 * nv_alloc_rx: fill rx ring entries. 1662 * Return 1 if the allocations for the skbs failed and the 1663 * rx engine is without Available descriptors 1664 */ 1665static int nv_alloc_rx(struct net_device *dev) 1666{ 1667 struct fe_priv *np = netdev_priv(dev); 1668 struct ring_desc* less_rx; 1669 1670 less_rx = np->get_rx.orig; 1671 if (less_rx-- == np->first_rx.orig) 1672 less_rx = np->last_rx.orig; 1673 1674 while (np->put_rx.orig != less_rx) { 1675 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1676 if (skb) { 1677 np->put_rx_ctx->skb = skb; 1678 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1679 skb->data, 1680 skb_tailroom(skb), 1681 PCI_DMA_FROMDEVICE); 1682 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1683 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); 1684 wmb(); 1685 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1686 if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) 1687 np->put_rx.orig = np->first_rx.orig; 1688 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1689 np->put_rx_ctx = np->first_rx_ctx; 1690 } else { 1691 return 1; 1692 } 1693 } 1694 return 0; 1695} 1696 1697static int nv_alloc_rx_optimized(struct net_device *dev) 1698{ 1699 struct fe_priv *np = netdev_priv(dev); 1700 struct ring_desc_ex* less_rx; 1701 1702 less_rx = np->get_rx.ex; 1703 if (less_rx-- == np->first_rx.ex) 1704 less_rx = np->last_rx.ex; 1705 1706 while (np->put_rx.ex != less_rx) { 1707 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1708 if (skb) { 1709 np->put_rx_ctx->skb = skb; 1710 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1711 skb->data, 1712 skb_tailroom(skb), 1713 PCI_DMA_FROMDEVICE); 1714 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1715 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); 1716 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); 1717 wmb(); 1718 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1719 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) 1720 np->put_rx.ex = np->first_rx.ex; 1721 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1722 np->put_rx_ctx = np->first_rx_ctx; 1723 } else { 1724 return 1; 1725 } 1726 } 1727 return 0; 1728} 1729 1730/* If rx bufs are exhausted called after 50ms to attempt to refresh */ 1731#ifdef CONFIG_FORCEDETH_NAPI 1732static void nv_do_rx_refill(unsigned long data) 1733{ 1734 struct net_device *dev = (struct net_device *) data; 1735 struct fe_priv *np = netdev_priv(dev); 1736 1737 /* Just reschedule NAPI rx processing */ 1738 netif_rx_schedule(dev, &np->napi); 1739} 1740#else 1741static void nv_do_rx_refill(unsigned long data) 1742{ 1743 struct net_device *dev = (struct net_device *) data; 1744 struct fe_priv *np = netdev_priv(dev); 1745 int retcode; 1746 1747 if (!using_multi_irqs(dev)) { 1748 if (np->msi_flags & NV_MSI_X_ENABLED) 1749 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1750 else 1751 disable_irq(np->pci_dev->irq); 1752 } else { 1753 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1754 } 1755 if (!nv_optimized(np)) 1756 retcode = nv_alloc_rx(dev); 1757 else 1758 retcode = nv_alloc_rx_optimized(dev); 1759 if (retcode) { 1760 spin_lock_irq(&np->lock); 1761 if (!np->in_shutdown) 1762 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1763 spin_unlock_irq(&np->lock); 1764 } 1765 if (!using_multi_irqs(dev)) { 1766 if (np->msi_flags & NV_MSI_X_ENABLED) 1767 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1768 else 1769 enable_irq(np->pci_dev->irq); 1770 } else { 1771 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1772 } 1773} 1774#endif 1775 1776static void nv_init_rx(struct net_device *dev) 1777{ 1778 struct fe_priv *np = netdev_priv(dev); 1779 int i; 1780 1781 np->get_rx = np->put_rx = np->first_rx = np->rx_ring; 1782 1783 if (!nv_optimized(np)) 1784 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; 1785 else 1786 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; 1787 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; 1788 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; 1789 1790 for (i = 0; i < np->rx_ring_size; i++) { 1791 if (!nv_optimized(np)) { 1792 np->rx_ring.orig[i].flaglen = 0; 1793 np->rx_ring.orig[i].buf = 0; 1794 } else { 1795 np->rx_ring.ex[i].flaglen = 0; 1796 np->rx_ring.ex[i].txvlan = 0; 1797 np->rx_ring.ex[i].bufhigh = 0; 1798 np->rx_ring.ex[i].buflow = 0; 1799 } 1800 np->rx_skb[i].skb = NULL; 1801 np->rx_skb[i].dma = 0; 1802 } 1803} 1804 1805static void nv_init_tx(struct net_device *dev) 1806{ 1807 struct fe_priv *np = netdev_priv(dev); 1808 int i; 1809 1810 np->get_tx = np->put_tx = np->first_tx = np->tx_ring; 1811 1812 if (!nv_optimized(np)) 1813 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; 1814 else 1815 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; 1816 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; 1817 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; 1818 np->tx_pkts_in_progress = 0; 1819 np->tx_change_owner = NULL; 1820 np->tx_end_flip = NULL; 1821 1822 for (i = 0; i < np->tx_ring_size; i++) { 1823 if (!nv_optimized(np)) { 1824 np->tx_ring.orig[i].flaglen = 0; 1825 np->tx_ring.orig[i].buf = 0; 1826 } else { 1827 np->tx_ring.ex[i].flaglen = 0; 1828 np->tx_ring.ex[i].txvlan = 0; 1829 np->tx_ring.ex[i].bufhigh = 0; 1830 np->tx_ring.ex[i].buflow = 0; 1831 } 1832 np->tx_skb[i].skb = NULL; 1833 np->tx_skb[i].dma = 0; 1834 np->tx_skb[i].dma_len = 0; 1835 np->tx_skb[i].first_tx_desc = NULL; 1836 np->tx_skb[i].next_tx_ctx = NULL; 1837 } 1838} 1839 1840static int nv_init_ring(struct net_device *dev) 1841{ 1842 struct fe_priv *np = netdev_priv(dev); 1843 1844 nv_init_tx(dev); 1845 nv_init_rx(dev); 1846 1847 if (!nv_optimized(np)) 1848 return nv_alloc_rx(dev); 1849 else 1850 return nv_alloc_rx_optimized(dev); 1851} 1852 1853static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb) 1854{ 1855 struct fe_priv *np = netdev_priv(dev); 1856 1857 if (tx_skb->dma) { 1858 pci_unmap_page(np->pci_dev, tx_skb->dma, 1859 tx_skb->dma_len, 1860 PCI_DMA_TODEVICE); 1861 tx_skb->dma = 0; 1862 } 1863 if (tx_skb->skb) { 1864 dev_kfree_skb_any(tx_skb->skb); 1865 tx_skb->skb = NULL; 1866 return 1; 1867 } else { 1868 return 0; 1869 } 1870} 1871 1872static void nv_drain_tx(struct net_device *dev) 1873{ 1874 struct fe_priv *np = netdev_priv(dev); 1875 unsigned int i; 1876 1877 for (i = 0; i < np->tx_ring_size; i++) { 1878 if (!nv_optimized(np)) { 1879 np->tx_ring.orig[i].flaglen = 0; 1880 np->tx_ring.orig[i].buf = 0; 1881 } else { 1882 np->tx_ring.ex[i].flaglen = 0; 1883 np->tx_ring.ex[i].txvlan = 0; 1884 np->tx_ring.ex[i].bufhigh = 0; 1885 np->tx_ring.ex[i].buflow = 0; 1886 } 1887 if (nv_release_txskb(dev, &np->tx_skb[i])) 1888 dev->stats.tx_dropped++; 1889 np->tx_skb[i].dma = 0; 1890 np->tx_skb[i].dma_len = 0; 1891 np->tx_skb[i].first_tx_desc = NULL; 1892 np->tx_skb[i].next_tx_ctx = NULL; 1893 } 1894 np->tx_pkts_in_progress = 0; 1895 np->tx_change_owner = NULL; 1896 np->tx_end_flip = NULL; 1897} 1898 1899static void nv_drain_rx(struct net_device *dev) 1900{ 1901 struct fe_priv *np = netdev_priv(dev); 1902 int i; 1903 1904 for (i = 0; i < np->rx_ring_size; i++) { 1905 if (!nv_optimized(np)) { 1906 np->rx_ring.orig[i].flaglen = 0; 1907 np->rx_ring.orig[i].buf = 0; 1908 } else { 1909 np->rx_ring.ex[i].flaglen = 0; 1910 np->rx_ring.ex[i].txvlan = 0; 1911 np->rx_ring.ex[i].bufhigh = 0; 1912 np->rx_ring.ex[i].buflow = 0; 1913 } 1914 wmb(); 1915 if (np->rx_skb[i].skb) { 1916 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, 1917 (skb_end_pointer(np->rx_skb[i].skb) - 1918 np->rx_skb[i].skb->data), 1919 PCI_DMA_FROMDEVICE); 1920 dev_kfree_skb(np->rx_skb[i].skb); 1921 np->rx_skb[i].skb = NULL; 1922 } 1923 } 1924} 1925 1926static void nv_drain_rxtx(struct net_device *dev) 1927{ 1928 nv_drain_tx(dev); 1929 nv_drain_rx(dev); 1930} 1931 1932static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) 1933{ 1934 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); 1935} 1936 1937static void nv_legacybackoff_reseed(struct net_device *dev) 1938{ 1939 u8 __iomem *base = get_hwbase(dev); 1940 u32 reg; 1941 u32 low; 1942 int tx_status = 0; 1943 1944 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK; 1945 get_random_bytes(&low, sizeof(low)); 1946 reg |= low & NVREG_SLOTTIME_MASK; 1947 1948 /* Need to stop tx before change takes effect. 1949 * Caller has already gained np->lock. 1950 */ 1951 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START; 1952 if (tx_status) 1953 nv_stop_tx(dev); 1954 nv_stop_rx(dev); 1955 writel(reg, base + NvRegSlotTime); 1956 if (tx_status) 1957 nv_start_tx(dev); 1958 nv_start_rx(dev); 1959} 1960 1961/* Gear Backoff Seeds */ 1962#define BACKOFF_SEEDSET_ROWS 8 1963#define BACKOFF_SEEDSET_LFSRS 15 1964 1965/* Known Good seed sets */ 1966static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 1967 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 1968 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, 1969 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 1970 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, 1971 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, 1972 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, 1973 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, 1974 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}}; 1975 1976static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 1977 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 1978 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 1979 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, 1980 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 1981 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 1982 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 1983 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 1984 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}}; 1985 1986static void nv_gear_backoff_reseed(struct net_device *dev) 1987{ 1988 u8 __iomem *base = get_hwbase(dev); 1989 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed; 1990 u32 temp, seedset, combinedSeed; 1991 int i; 1992 1993 /* Setup seed for free running LFSR */ 1994 /* We are going to read the time stamp counter 3 times 1995 and swizzle bits around to increase randomness */ 1996 get_random_bytes(&miniseed1, sizeof(miniseed1)); 1997 miniseed1 &= 0x0fff; 1998 if (miniseed1 == 0) 1999 miniseed1 = 0xabc; 2000 2001 get_random_bytes(&miniseed2, sizeof(miniseed2)); 2002 miniseed2 &= 0x0fff; 2003 if (miniseed2 == 0) 2004 miniseed2 = 0xabc; 2005 miniseed2_reversed = 2006 ((miniseed2 & 0xF00) >> 8) | 2007 (miniseed2 & 0x0F0) | 2008 ((miniseed2 & 0x00F) << 8); 2009 2010 get_random_bytes(&miniseed3, sizeof(miniseed3)); 2011 miniseed3 &= 0x0fff; 2012 if (miniseed3 == 0) 2013 miniseed3 = 0xabc; 2014 miniseed3_reversed = 2015 ((miniseed3 & 0xF00) >> 8) | 2016 (miniseed3 & 0x0F0) | 2017 ((miniseed3 & 0x00F) << 8); 2018 2019 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) | 2020 (miniseed2 ^ miniseed3_reversed); 2021 2022 /* Seeds can not be zero */ 2023 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0) 2024 combinedSeed |= 0x08; 2025 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0) 2026 combinedSeed |= 0x8000; 2027 2028 /* No need to disable tx here */ 2029 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); 2030 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; 2031 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; 2032 writel(temp,base + NvRegBackOffControl); 2033 2034 /* Setup seeds for all gear LFSRs. */ 2035 get_random_bytes(&seedset, sizeof(seedset)); 2036 seedset = seedset % BACKOFF_SEEDSET_ROWS; 2037 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) 2038 { 2039 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); 2040 temp |= main_seedset[seedset][i-1] & 0x3ff; 2041 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); 2042 writel(temp, base + NvRegBackOffControl); 2043 } 2044} 2045 2046/* 2047 * nv_start_xmit: dev->hard_start_xmit function 2048 * Called with netif_tx_lock held. 2049 */ 2050static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 2051{ 2052 struct fe_priv *np = netdev_priv(dev); 2053 u32 tx_flags = 0; 2054 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 2055 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2056 unsigned int i; 2057 u32 offset = 0; 2058 u32 bcnt; 2059 u32 size = skb->len-skb->data_len; 2060 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2061 u32 empty_slots; 2062 struct ring_desc* put_tx; 2063 struct ring_desc* start_tx; 2064 struct ring_desc* prev_tx; 2065 struct nv_skb_map* prev_tx_ctx; 2066 unsigned long flags; 2067 2068 /* add fragments to entries count */ 2069 for (i = 0; i < fragments; i++) { 2070 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 2071 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2072 } 2073 2074 empty_slots = nv_get_empty_tx_slots(np); 2075 if (unlikely(empty_slots <= entries)) { 2076 spin_lock_irqsave(&np->lock, flags); 2077 netif_stop_queue(dev); 2078 np->tx_stop = 1; 2079 spin_unlock_irqrestore(&np->lock, flags); 2080 return NETDEV_TX_BUSY; 2081 } 2082 2083 start_tx = put_tx = np->put_tx.orig; 2084 2085 /* setup the header buffer */ 2086 do { 2087 prev_tx = put_tx; 2088 prev_tx_ctx = np->put_tx_ctx; 2089 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2090 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2091 PCI_DMA_TODEVICE); 2092 np->put_tx_ctx->dma_len = bcnt; 2093 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2094 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2095 2096 tx_flags = np->tx_flags; 2097 offset += bcnt; 2098 size -= bcnt; 2099 if (unlikely(put_tx++ == np->last_tx.orig)) 2100 put_tx = np->first_tx.orig; 2101 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2102 np->put_tx_ctx = np->first_tx_ctx; 2103 } while (size); 2104 2105 /* setup the fragments */ 2106 for (i = 0; i < fragments; i++) { 2107 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2108 u32 size = frag->size; 2109 offset = 0; 2110 2111 do { 2112 prev_tx = put_tx; 2113 prev_tx_ctx = np->put_tx_ctx; 2114 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2115 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 2116 PCI_DMA_TODEVICE); 2117 np->put_tx_ctx->dma_len = bcnt; 2118 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2119 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2120 2121 offset += bcnt; 2122 size -= bcnt; 2123 if (unlikely(put_tx++ == np->last_tx.orig)) 2124 put_tx = np->first_tx.orig; 2125 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2126 np->put_tx_ctx = np->first_tx_ctx; 2127 } while (size); 2128 } 2129 2130 /* set last fragment flag */ 2131 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); 2132 2133 /* save skb in this slot's context area */ 2134 prev_tx_ctx->skb = skb; 2135 2136 if (skb_is_gso(skb)) 2137 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2138 else 2139 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2140 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2141 2142 spin_lock_irqsave(&np->lock, flags); 2143 2144 /* set tx flags */ 2145 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2146 np->put_tx.orig = put_tx; 2147 2148 spin_unlock_irqrestore(&np->lock, flags); 2149 2150 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", 2151 dev->name, entries, tx_flags_extra); 2152 { 2153 int j; 2154 for (j=0; j<64; j++) { 2155 if ((j%16) == 0) 2156 dprintk("\n%03x:", j); 2157 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2158 } 2159 dprintk("\n"); 2160 } 2161 2162 dev->trans_start = jiffies; 2163 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2164 return NETDEV_TX_OK; 2165} 2166 2167static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) 2168{ 2169 struct fe_priv *np = netdev_priv(dev); 2170 u32 tx_flags = 0; 2171 u32 tx_flags_extra; 2172 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2173 unsigned int i; 2174 u32 offset = 0; 2175 u32 bcnt; 2176 u32 size = skb->len-skb->data_len; 2177 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2178 u32 empty_slots; 2179 struct ring_desc_ex* put_tx; 2180 struct ring_desc_ex* start_tx; 2181 struct ring_desc_ex* prev_tx; 2182 struct nv_skb_map* prev_tx_ctx; 2183 struct nv_skb_map* start_tx_ctx; 2184 unsigned long flags; 2185 2186 /* add fragments to entries count */ 2187 for (i = 0; i < fragments; i++) { 2188 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 2189 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2190 } 2191 2192 empty_slots = nv_get_empty_tx_slots(np); 2193 if (unlikely(empty_slots <= entries)) { 2194 spin_lock_irqsave(&np->lock, flags); 2195 netif_stop_queue(dev); 2196 np->tx_stop = 1; 2197 spin_unlock_irqrestore(&np->lock, flags); 2198 return NETDEV_TX_BUSY; 2199 } 2200 2201 start_tx = put_tx = np->put_tx.ex; 2202 start_tx_ctx = np->put_tx_ctx; 2203 2204 /* setup the header buffer */ 2205 do { 2206 prev_tx = put_tx; 2207 prev_tx_ctx = np->put_tx_ctx; 2208 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2209 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2210 PCI_DMA_TODEVICE); 2211 np->put_tx_ctx->dma_len = bcnt; 2212 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2213 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2214 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2215 2216 tx_flags = NV_TX2_VALID; 2217 offset += bcnt; 2218 size -= bcnt; 2219 if (unlikely(put_tx++ == np->last_tx.ex)) 2220 put_tx = np->first_tx.ex; 2221 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2222 np->put_tx_ctx = np->first_tx_ctx; 2223 } while (size); 2224 2225 /* setup the fragments */ 2226 for (i = 0; i < fragments; i++) { 2227 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2228 u32 size = frag->size; 2229 offset = 0; 2230 2231 do { 2232 prev_tx = put_tx; 2233 prev_tx_ctx = np->put_tx_ctx; 2234 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2235 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 2236 PCI_DMA_TODEVICE); 2237 np->put_tx_ctx->dma_len = bcnt; 2238 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2239 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2240 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2241 2242 offset += bcnt; 2243 size -= bcnt; 2244 if (unlikely(put_tx++ == np->last_tx.ex)) 2245 put_tx = np->first_tx.ex; 2246 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2247 np->put_tx_ctx = np->first_tx_ctx; 2248 } while (size); 2249 } 2250 2251 /* set last fragment flag */ 2252 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); 2253 2254 /* save skb in this slot's context area */ 2255 prev_tx_ctx->skb = skb; 2256 2257 if (skb_is_gso(skb)) 2258 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2259 else 2260 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2261 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2262 2263 /* vlan tag */ 2264 if (likely(!np->vlangrp)) { 2265 start_tx->txvlan = 0; 2266 } else { 2267 if (vlan_tx_tag_present(skb)) 2268 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb)); 2269 else 2270 start_tx->txvlan = 0; 2271 } 2272 2273 spin_lock_irqsave(&np->lock, flags); 2274 2275 if (np->tx_limit) { 2276 /* Limit the number of outstanding tx. Setup all fragments, but 2277 * do not set the VALID bit on the first descriptor. Save a pointer 2278 * to that descriptor and also for next skb_map element. 2279 */ 2280 2281 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) { 2282 if (!np->tx_change_owner) 2283 np->tx_change_owner = start_tx_ctx; 2284 2285 /* remove VALID bit */ 2286 tx_flags &= ~NV_TX2_VALID; 2287 start_tx_ctx->first_tx_desc = start_tx; 2288 start_tx_ctx->next_tx_ctx = np->put_tx_ctx; 2289 np->tx_end_flip = np->put_tx_ctx; 2290 } else { 2291 np->tx_pkts_in_progress++; 2292 } 2293 } 2294 2295 /* set tx flags */ 2296 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2297 np->put_tx.ex = put_tx; 2298 2299 spin_unlock_irqrestore(&np->lock, flags); 2300 2301 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", 2302 dev->name, entries, tx_flags_extra); 2303 { 2304 int j; 2305 for (j=0; j<64; j++) { 2306 if ((j%16) == 0) 2307 dprintk("\n%03x:", j); 2308 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2309 } 2310 dprintk("\n"); 2311 } 2312 2313 dev->trans_start = jiffies; 2314 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2315 return NETDEV_TX_OK; 2316} 2317 2318static inline void nv_tx_flip_ownership(struct net_device *dev) 2319{ 2320 struct fe_priv *np = netdev_priv(dev); 2321 2322 np->tx_pkts_in_progress--; 2323 if (np->tx_change_owner) { 2324 np->tx_change_owner->first_tx_desc->flaglen |= 2325 cpu_to_le32(NV_TX2_VALID); 2326 np->tx_pkts_in_progress++; 2327 2328 np->tx_change_owner = np->tx_change_owner->next_tx_ctx; 2329 if (np->tx_change_owner == np->tx_end_flip) 2330 np->tx_change_owner = NULL; 2331 2332 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2333 } 2334} 2335 2336/* 2337 * nv_tx_done: check for completed packets, release the skbs. 2338 * 2339 * Caller must own np->lock. 2340 */ 2341static void nv_tx_done(struct net_device *dev) 2342{ 2343 struct fe_priv *np = netdev_priv(dev); 2344 u32 flags; 2345 struct ring_desc* orig_get_tx = np->get_tx.orig; 2346 2347 while ((np->get_tx.orig != np->put_tx.orig) && 2348 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) { 2349 2350 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", 2351 dev->name, flags); 2352 2353 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, 2354 np->get_tx_ctx->dma_len, 2355 PCI_DMA_TODEVICE); 2356 np->get_tx_ctx->dma = 0; 2357 2358 if (np->desc_ver == DESC_VER_1) { 2359 if (flags & NV_TX_LASTPACKET) { 2360 if (flags & NV_TX_ERROR) { 2361 if (flags & NV_TX_UNDERFLOW) 2362 dev->stats.tx_fifo_errors++; 2363 if (flags & NV_TX_CARRIERLOST) 2364 dev->stats.tx_carrier_errors++; 2365 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) 2366 nv_legacybackoff_reseed(dev); 2367 dev->stats.tx_errors++; 2368 } else { 2369 dev->stats.tx_packets++; 2370 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2371 } 2372 dev_kfree_skb_any(np->get_tx_ctx->skb); 2373 np->get_tx_ctx->skb = NULL; 2374 } 2375 } else { 2376 if (flags & NV_TX2_LASTPACKET) { 2377 if (flags & NV_TX2_ERROR) { 2378 if (flags & NV_TX2_UNDERFLOW) 2379 dev->stats.tx_fifo_errors++; 2380 if (flags & NV_TX2_CARRIERLOST) 2381 dev->stats.tx_carrier_errors++; 2382 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2383 nv_legacybackoff_reseed(dev); 2384 dev->stats.tx_errors++; 2385 } else { 2386 dev->stats.tx_packets++; 2387 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2388 } 2389 dev_kfree_skb_any(np->get_tx_ctx->skb); 2390 np->get_tx_ctx->skb = NULL; 2391 } 2392 } 2393 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) 2394 np->get_tx.orig = np->first_tx.orig; 2395 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2396 np->get_tx_ctx = np->first_tx_ctx; 2397 } 2398 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { 2399 np->tx_stop = 0; 2400 netif_wake_queue(dev); 2401 } 2402} 2403 2404static void nv_tx_done_optimized(struct net_device *dev, int limit) 2405{ 2406 struct fe_priv *np = netdev_priv(dev); 2407 u32 flags; 2408 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 2409 2410 while ((np->get_tx.ex != np->put_tx.ex) && 2411 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) && 2412 (limit-- > 0)) { 2413 2414 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", 2415 dev->name, flags); 2416 2417 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, 2418 np->get_tx_ctx->dma_len, 2419 PCI_DMA_TODEVICE); 2420 np->get_tx_ctx->dma = 0; 2421 2422 if (flags & NV_TX2_LASTPACKET) { 2423 if (!(flags & NV_TX2_ERROR)) 2424 dev->stats.tx_packets++; 2425 else { 2426 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { 2427 if (np->driver_data & DEV_HAS_GEAR_MODE) 2428 nv_gear_backoff_reseed(dev); 2429 else 2430 nv_legacybackoff_reseed(dev); 2431 } 2432 } 2433 2434 dev_kfree_skb_any(np->get_tx_ctx->skb); 2435 np->get_tx_ctx->skb = NULL; 2436 2437 if (np->tx_limit) { 2438 nv_tx_flip_ownership(dev); 2439 } 2440 } 2441 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 2442 np->get_tx.ex = np->first_tx.ex; 2443 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2444 np->get_tx_ctx = np->first_tx_ctx; 2445 } 2446 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { 2447 np->tx_stop = 0; 2448 netif_wake_queue(dev); 2449 } 2450} 2451 2452/* 2453 * nv_tx_timeout: dev->tx_timeout function 2454 * Called with netif_tx_lock held. 2455 */ 2456static void nv_tx_timeout(struct net_device *dev) 2457{ 2458 struct fe_priv *np = netdev_priv(dev); 2459 u8 __iomem *base = get_hwbase(dev); 2460 u32 status; 2461 2462 if (np->msi_flags & NV_MSI_X_ENABLED) 2463 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2464 else 2465 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2466 2467 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); 2468 2469 { 2470 int i; 2471 2472 printk(KERN_INFO "%s: Ring at %lx\n", 2473 dev->name, (unsigned long)np->ring_addr); 2474 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 2475 for (i=0;i<=np->register_size;i+= 32) { 2476 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 2477 i, 2478 readl(base + i + 0), readl(base + i + 4), 2479 readl(base + i + 8), readl(base + i + 12), 2480 readl(base + i + 16), readl(base + i + 20), 2481 readl(base + i + 24), readl(base + i + 28)); 2482 } 2483 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2484 for (i=0;i<np->tx_ring_size;i+= 4) { 2485 if (!nv_optimized(np)) { 2486 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2487 i, 2488 le32_to_cpu(np->tx_ring.orig[i].buf), 2489 le32_to_cpu(np->tx_ring.orig[i].flaglen), 2490 le32_to_cpu(np->tx_ring.orig[i+1].buf), 2491 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), 2492 le32_to_cpu(np->tx_ring.orig[i+2].buf), 2493 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), 2494 le32_to_cpu(np->tx_ring.orig[i+3].buf), 2495 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); 2496 } else { 2497 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 2498 i, 2499 le32_to_cpu(np->tx_ring.ex[i].bufhigh), 2500 le32_to_cpu(np->tx_ring.ex[i].buflow), 2501 le32_to_cpu(np->tx_ring.ex[i].flaglen), 2502 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), 2503 le32_to_cpu(np->tx_ring.ex[i+1].buflow), 2504 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), 2505 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), 2506 le32_to_cpu(np->tx_ring.ex[i+2].buflow), 2507 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), 2508 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), 2509 le32_to_cpu(np->tx_ring.ex[i+3].buflow), 2510 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); 2511 } 2512 } 2513 } 2514 2515 spin_lock_irq(&np->lock); 2516 2517 /* 1) stop tx engine */ 2518 nv_stop_tx(dev); 2519 2520 /* 2) check that the packets were not sent already: */ 2521 if (!nv_optimized(np)) 2522 nv_tx_done(dev); 2523 else 2524 nv_tx_done_optimized(dev, np->tx_ring_size); 2525 2526 /* 3) if there are dead entries: clear everything */ 2527 if (np->get_tx_ctx != np->put_tx_ctx) { 2528 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 2529 nv_drain_tx(dev); 2530 nv_init_tx(dev); 2531 setup_hw_rings(dev, NV_SETUP_TX_RING); 2532 } 2533 2534 netif_wake_queue(dev); 2535 2536 /* 4) restart tx engine */ 2537 nv_start_tx(dev); 2538 spin_unlock_irq(&np->lock); 2539} 2540 2541/* 2542 * Called when the nic notices a mismatch between the actual data len on the 2543 * wire and the len indicated in the 802 header 2544 */ 2545static int nv_getlen(struct net_device *dev, void *packet, int datalen) 2546{ 2547 int hdrlen; /* length of the 802 header */ 2548 int protolen; /* length as stored in the proto field */ 2549 2550 /* 1) calculate len according to header */ 2551 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2552 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 2553 hdrlen = VLAN_HLEN; 2554 } else { 2555 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 2556 hdrlen = ETH_HLEN; 2557 } 2558 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", 2559 dev->name, datalen, protolen, hdrlen); 2560 if (protolen > ETH_DATA_LEN) 2561 return datalen; /* Value in proto field not a len, no checks possible */ 2562 2563 protolen += hdrlen; 2564 /* consistency checks: */ 2565 if (datalen > ETH_ZLEN) { 2566 if (datalen >= protolen) { 2567 /* more data on wire than in 802 header, trim of 2568 * additional data. 2569 */ 2570 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2571 dev->name, protolen); 2572 return protolen; 2573 } else { 2574 /* less data on wire than mentioned in header. 2575 * Discard the packet. 2576 */ 2577 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", 2578 dev->name); 2579 return -1; 2580 } 2581 } else { 2582 /* short packet. Accept only if 802 values are also short */ 2583 if (protolen > ETH_ZLEN) { 2584 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", 2585 dev->name); 2586 return -1; 2587 } 2588 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2589 dev->name, datalen); 2590 return datalen; 2591 } 2592} 2593 2594static int nv_rx_process(struct net_device *dev, int limit) 2595{ 2596 struct fe_priv *np = netdev_priv(dev); 2597 u32 flags; 2598 int rx_work = 0; 2599 struct sk_buff *skb; 2600 int len; 2601 2602 while((np->get_rx.orig != np->put_rx.orig) && 2603 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2604 (rx_work < limit)) { 2605 2606 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", 2607 dev->name, flags); 2608 2609 /* 2610 * the packet is for us - immediately tear down the pci mapping. 2611 * TODO: check if a prefetch of the first cacheline improves 2612 * the performance. 2613 */ 2614 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2615 np->get_rx_ctx->dma_len, 2616 PCI_DMA_FROMDEVICE); 2617 skb = np->get_rx_ctx->skb; 2618 np->get_rx_ctx->skb = NULL; 2619 2620 { 2621 int j; 2622 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2623 for (j=0; j<64; j++) { 2624 if ((j%16) == 0) 2625 dprintk("\n%03x:", j); 2626 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2627 } 2628 dprintk("\n"); 2629 } 2630 /* look at what we actually got: */ 2631 if (np->desc_ver == DESC_VER_1) { 2632 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2633 len = flags & LEN_MASK_V1; 2634 if (unlikely(flags & NV_RX_ERROR)) { 2635 if (flags & NV_RX_ERROR4) { 2636 len = nv_getlen(dev, skb->data, len); 2637 if (len < 0) { 2638 dev->stats.rx_errors++; 2639 dev_kfree_skb(skb); 2640 goto next_pkt; 2641 } 2642 } 2643 /* framing errors are soft errors */ 2644 else if (flags & NV_RX_FRAMINGERR) { 2645 if (flags & NV_RX_SUBSTRACT1) { 2646 len--; 2647 } 2648 } 2649 /* the rest are hard errors */ 2650 else { 2651 if (flags & NV_RX_MISSEDFRAME) 2652 dev->stats.rx_missed_errors++; 2653 if (flags & NV_RX_CRCERR) 2654 dev->stats.rx_crc_errors++; 2655 if (flags & NV_RX_OVERFLOW) 2656 dev->stats.rx_over_errors++; 2657 dev->stats.rx_errors++; 2658 dev_kfree_skb(skb); 2659 goto next_pkt; 2660 } 2661 } 2662 } else { 2663 dev_kfree_skb(skb); 2664 goto next_pkt; 2665 } 2666 } else { 2667 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2668 len = flags & LEN_MASK_V2; 2669 if (unlikely(flags & NV_RX2_ERROR)) { 2670 if (flags & NV_RX2_ERROR4) { 2671 len = nv_getlen(dev, skb->data, len); 2672 if (len < 0) { 2673 dev->stats.rx_errors++; 2674 dev_kfree_skb(skb); 2675 goto next_pkt; 2676 } 2677 } 2678 /* framing errors are soft errors */ 2679 else if (flags & NV_RX2_FRAMINGERR) { 2680 if (flags & NV_RX2_SUBSTRACT1) { 2681 len--; 2682 } 2683 } 2684 /* the rest are hard errors */ 2685 else { 2686 if (flags & NV_RX2_CRCERR) 2687 dev->stats.rx_crc_errors++; 2688 if (flags & NV_RX2_OVERFLOW) 2689 dev->stats.rx_over_errors++; 2690 dev->stats.rx_errors++; 2691 dev_kfree_skb(skb); 2692 goto next_pkt; 2693 } 2694 } 2695 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2696 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2697 skb->ip_summed = CHECKSUM_UNNECESSARY; 2698 } else { 2699 dev_kfree_skb(skb); 2700 goto next_pkt; 2701 } 2702 } 2703 /* got a valid packet - forward it to the network core */ 2704 skb_put(skb, len); 2705 skb->protocol = eth_type_trans(skb, dev); 2706 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", 2707 dev->name, len, skb->protocol); 2708#ifdef CONFIG_FORCEDETH_NAPI 2709 netif_receive_skb(skb); 2710#else 2711 netif_rx(skb); 2712#endif 2713 dev->last_rx = jiffies; 2714 dev->stats.rx_packets++; 2715 dev->stats.rx_bytes += len; 2716next_pkt: 2717 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2718 np->get_rx.orig = np->first_rx.orig; 2719 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2720 np->get_rx_ctx = np->first_rx_ctx; 2721 2722 rx_work++; 2723 } 2724 2725 return rx_work; 2726} 2727 2728static int nv_rx_process_optimized(struct net_device *dev, int limit) 2729{ 2730 struct fe_priv *np = netdev_priv(dev); 2731 u32 flags; 2732 u32 vlanflags = 0; 2733 int rx_work = 0; 2734 struct sk_buff *skb; 2735 int len; 2736 2737 while((np->get_rx.ex != np->put_rx.ex) && 2738 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2739 (rx_work < limit)) { 2740 2741 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", 2742 dev->name, flags); 2743 2744 /* 2745 * the packet is for us - immediately tear down the pci mapping. 2746 * TODO: check if a prefetch of the first cacheline improves 2747 * the performance. 2748 */ 2749 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2750 np->get_rx_ctx->dma_len, 2751 PCI_DMA_FROMDEVICE); 2752 skb = np->get_rx_ctx->skb; 2753 np->get_rx_ctx->skb = NULL; 2754 2755 { 2756 int j; 2757 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2758 for (j=0; j<64; j++) { 2759 if ((j%16) == 0) 2760 dprintk("\n%03x:", j); 2761 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2762 } 2763 dprintk("\n"); 2764 } 2765 /* look at what we actually got: */ 2766 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2767 len = flags & LEN_MASK_V2; 2768 if (unlikely(flags & NV_RX2_ERROR)) { 2769 if (flags & NV_RX2_ERROR4) { 2770 len = nv_getlen(dev, skb->data, len); 2771 if (len < 0) { 2772 dev_kfree_skb(skb); 2773 goto next_pkt; 2774 } 2775 } 2776 /* framing errors are soft errors */ 2777 else if (flags & NV_RX2_FRAMINGERR) { 2778 if (flags & NV_RX2_SUBSTRACT1) { 2779 len--; 2780 } 2781 } 2782 /* the rest are hard errors */ 2783 else { 2784 dev_kfree_skb(skb); 2785 goto next_pkt; 2786 } 2787 } 2788 2789 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2790 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2791 skb->ip_summed = CHECKSUM_UNNECESSARY; 2792 2793 /* got a valid packet - forward it to the network core */ 2794 skb_put(skb, len); 2795 skb->protocol = eth_type_trans(skb, dev); 2796 prefetch(skb->data); 2797 2798 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n", 2799 dev->name, len, skb->protocol); 2800 2801 if (likely(!np->vlangrp)) { 2802#ifdef CONFIG_FORCEDETH_NAPI 2803 netif_receive_skb(skb); 2804#else 2805 netif_rx(skb); 2806#endif 2807 } else { 2808 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2809 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2810#ifdef CONFIG_FORCEDETH_NAPI 2811 vlan_hwaccel_receive_skb(skb, np->vlangrp, 2812 vlanflags & NV_RX3_VLAN_TAG_MASK); 2813#else 2814 vlan_hwaccel_rx(skb, np->vlangrp, 2815 vlanflags & NV_RX3_VLAN_TAG_MASK); 2816#endif 2817 } else { 2818#ifdef CONFIG_FORCEDETH_NAPI 2819 netif_receive_skb(skb); 2820#else 2821 netif_rx(skb); 2822#endif 2823 } 2824 } 2825 2826 dev->last_rx = jiffies; 2827 dev->stats.rx_packets++; 2828 dev->stats.rx_bytes += len; 2829 } else { 2830 dev_kfree_skb(skb); 2831 } 2832next_pkt: 2833 if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) 2834 np->get_rx.ex = np->first_rx.ex; 2835 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2836 np->get_rx_ctx = np->first_rx_ctx; 2837 2838 rx_work++; 2839 } 2840 2841 return rx_work; 2842} 2843 2844static void set_bufsize(struct net_device *dev) 2845{ 2846 struct fe_priv *np = netdev_priv(dev); 2847 2848 if (dev->mtu <= ETH_DATA_LEN) 2849 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; 2850 else 2851 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; 2852} 2853 2854/* 2855 * nv_change_mtu: dev->change_mtu function 2856 * Called with dev_base_lock held for read. 2857 */ 2858static int nv_change_mtu(struct net_device *dev, int new_mtu) 2859{ 2860 struct fe_priv *np = netdev_priv(dev); 2861 int old_mtu; 2862 2863 if (new_mtu < 64 || new_mtu > np->pkt_limit) 2864 return -EINVAL; 2865 2866 old_mtu = dev->mtu; 2867 dev->mtu = new_mtu; 2868 2869 /* return early if the buffer sizes will not change */ 2870 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) 2871 return 0; 2872 if (old_mtu == new_mtu) 2873 return 0; 2874 2875 /* synchronized against open : rtnl_lock() held by caller */ 2876 if (netif_running(dev)) { 2877 u8 __iomem *base = get_hwbase(dev); 2878 /* 2879 * It seems that the nic preloads valid ring entries into an 2880 * internal buffer. The procedure for flushing everything is 2881 * guessed, there is probably a simpler approach. 2882 * Changing the MTU is a rare event, it shouldn't matter. 2883 */ 2884 nv_disable_irq(dev); 2885 netif_tx_lock_bh(dev); 2886 netif_addr_lock(dev); 2887 spin_lock(&np->lock); 2888 /* stop engines */ 2889 nv_stop_rxtx(dev); 2890 nv_txrx_reset(dev); 2891 /* drain rx queue */ 2892 nv_drain_rxtx(dev); 2893 /* reinit driver view of the rx queue */ 2894 set_bufsize(dev); 2895 if (nv_init_ring(dev)) { 2896 if (!np->in_shutdown) 2897 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2898 } 2899 /* reinit nic view of the rx queue */ 2900 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 2901 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 2902 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 2903 base + NvRegRingSizes); 2904 pci_push(base); 2905 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2906 pci_push(base); 2907 2908 /* restart rx engine */ 2909 nv_start_rxtx(dev); 2910 spin_unlock(&np->lock); 2911 netif_addr_unlock(dev); 2912 netif_tx_unlock_bh(dev); 2913 nv_enable_irq(dev); 2914 } 2915 return 0; 2916} 2917 2918static void nv_copy_mac_to_hw(struct net_device *dev) 2919{ 2920 u8 __iomem *base = get_hwbase(dev); 2921 u32 mac[2]; 2922 2923 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 2924 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 2925 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 2926 2927 writel(mac[0], base + NvRegMacAddrA); 2928 writel(mac[1], base + NvRegMacAddrB); 2929} 2930 2931/* 2932 * nv_set_mac_address: dev->set_mac_address function 2933 * Called with rtnl_lock() held. 2934 */ 2935static int nv_set_mac_address(struct net_device *dev, void *addr) 2936{ 2937 struct fe_priv *np = netdev_priv(dev); 2938 struct sockaddr *macaddr = (struct sockaddr*)addr; 2939 2940 if (!is_valid_ether_addr(macaddr->sa_data)) 2941 return -EADDRNOTAVAIL; 2942 2943 /* synchronized against open : rtnl_lock() held by caller */ 2944 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 2945 2946 if (netif_running(dev)) { 2947 netif_tx_lock_bh(dev); 2948 netif_addr_lock(dev); 2949 spin_lock_irq(&np->lock); 2950 2951 /* stop rx engine */ 2952 nv_stop_rx(dev); 2953 2954 /* set mac address */ 2955 nv_copy_mac_to_hw(dev); 2956 2957 /* restart rx engine */ 2958 nv_start_rx(dev); 2959 spin_unlock_irq(&np->lock); 2960 netif_addr_unlock(dev); 2961 netif_tx_unlock_bh(dev); 2962 } else { 2963 nv_copy_mac_to_hw(dev); 2964 } 2965 return 0; 2966} 2967 2968/* 2969 * nv_set_multicast: dev->set_multicast function 2970 * Called with netif_tx_lock held. 2971 */ 2972static void nv_set_multicast(struct net_device *dev) 2973{ 2974 struct fe_priv *np = netdev_priv(dev); 2975 u8 __iomem *base = get_hwbase(dev); 2976 u32 addr[2]; 2977 u32 mask[2]; 2978 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; 2979 2980 memset(addr, 0, sizeof(addr)); 2981 memset(mask, 0, sizeof(mask)); 2982 2983 if (dev->flags & IFF_PROMISC) { 2984 pff |= NVREG_PFF_PROMISC; 2985 } else { 2986 pff |= NVREG_PFF_MYADDR; 2987 2988 if (dev->flags & IFF_ALLMULTI || dev->mc_list) { 2989 u32 alwaysOff[2]; 2990 u32 alwaysOn[2]; 2991 2992 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; 2993 if (dev->flags & IFF_ALLMULTI) { 2994 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 2995 } else { 2996 struct dev_mc_list *walk; 2997 2998 walk = dev->mc_list; 2999 while (walk != NULL) { 3000 u32 a, b; 3001 a = le32_to_cpu(*(__le32 *) walk->dmi_addr); 3002 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4])); 3003 alwaysOn[0] &= a; 3004 alwaysOff[0] &= ~a; 3005 alwaysOn[1] &= b; 3006 alwaysOff[1] &= ~b; 3007 walk = walk->next; 3008 } 3009 } 3010 addr[0] = alwaysOn[0]; 3011 addr[1] = alwaysOn[1]; 3012 mask[0] = alwaysOn[0] | alwaysOff[0]; 3013 mask[1] = alwaysOn[1] | alwaysOff[1]; 3014 } else { 3015 mask[0] = NVREG_MCASTMASKA_NONE; 3016 mask[1] = NVREG_MCASTMASKB_NONE; 3017 } 3018 } 3019 addr[0] |= NVREG_MCASTADDRA_FORCE; 3020 pff |= NVREG_PFF_ALWAYS; 3021 spin_lock_irq(&np->lock); 3022 nv_stop_rx(dev); 3023 writel(addr[0], base + NvRegMulticastAddrA); 3024 writel(addr[1], base + NvRegMulticastAddrB); 3025 writel(mask[0], base + NvRegMulticastMaskA); 3026 writel(mask[1], base + NvRegMulticastMaskB); 3027 writel(pff, base + NvRegPacketFilterFlags); 3028 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", 3029 dev->name); 3030 nv_start_rx(dev); 3031 spin_unlock_irq(&np->lock); 3032} 3033 3034static void nv_update_pause(struct net_device *dev, u32 pause_flags) 3035{ 3036 struct fe_priv *np = netdev_priv(dev); 3037 u8 __iomem *base = get_hwbase(dev); 3038 3039 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); 3040 3041 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { 3042 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; 3043 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { 3044 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); 3045 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3046 } else { 3047 writel(pff, base + NvRegPacketFilterFlags); 3048 } 3049 } 3050 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 3051 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 3052 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 3053 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; 3054 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) 3055 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; 3056 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) 3057 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; 3058 writel(pause_enable, base + NvRegTxPauseFrame); 3059 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 3060 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3061 } else { 3062 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 3063 writel(regmisc, base + NvRegMisc1); 3064 } 3065 } 3066} 3067 3068/** 3069 * nv_update_linkspeed: Setup the MAC according to the link partner 3070 * @dev: Network device to be configured 3071 * 3072 * The function queries the PHY and checks if there is a link partner. 3073 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is 3074 * set to 10 MBit HD. 3075 * 3076 * The function returns 0 if there is no link partner and 1 if there is 3077 * a good link partner. 3078 */ 3079static int nv_update_linkspeed(struct net_device *dev) 3080{ 3081 struct fe_priv *np = netdev_priv(dev); 3082 u8 __iomem *base = get_hwbase(dev); 3083 int adv = 0; 3084 int lpa = 0; 3085 int adv_lpa, adv_pause, lpa_pause; 3086 int newls = np->linkspeed; 3087 int newdup = np->duplex; 3088 int mii_status; 3089 int retval = 0; 3090 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 3091 u32 txrxFlags = 0; 3092 u32 phy_exp; 3093 3094 /* BMSR_LSTATUS is latched, read it twice: 3095 * we want the current value. 3096 */ 3097 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3098 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3099 3100 if (!(mii_status & BMSR_LSTATUS)) { 3101 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", 3102 dev->name); 3103 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3104 newdup = 0; 3105 retval = 0; 3106 goto set_speed; 3107 } 3108 3109 if (np->autoneg == 0) { 3110 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", 3111 dev->name, np->fixed_mode); 3112 if (np->fixed_mode & LPA_100FULL) { 3113 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3114 newdup = 1; 3115 } else if (np->fixed_mode & LPA_100HALF) { 3116 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3117 newdup = 0; 3118 } else if (np->fixed_mode & LPA_10FULL) { 3119 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3120 newdup = 1; 3121 } else { 3122 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3123 newdup = 0; 3124 } 3125 retval = 1; 3126 goto set_speed; 3127 } 3128 /* check auto negotiation is complete */ 3129 if (!(mii_status & BMSR_ANEGCOMPLETE)) { 3130 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ 3131 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3132 newdup = 0; 3133 retval = 0; 3134 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); 3135 goto set_speed; 3136 } 3137 3138 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3139 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 3140 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", 3141 dev->name, adv, lpa); 3142 3143 retval = 1; 3144 if (np->gigabit == PHY_GIGABIT) { 3145 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3146 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); 3147 3148 if ((control_1000 & ADVERTISE_1000FULL) && 3149 (status_1000 & LPA_1000FULL)) { 3150 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", 3151 dev->name); 3152 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 3153 newdup = 1; 3154 goto set_speed; 3155 } 3156 } 3157 3158 /* FIXME: handle parallel detection properly */ 3159 adv_lpa = lpa & adv; 3160 if (adv_lpa & LPA_100FULL) { 3161 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3162 newdup = 1; 3163 } else if (adv_lpa & LPA_100HALF) { 3164 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3165 newdup = 0; 3166 } else if (adv_lpa & LPA_10FULL) { 3167 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3168 newdup = 1; 3169 } else if (adv_lpa & LPA_10HALF) { 3170 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3171 newdup = 0; 3172 } else { 3173 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); 3174 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3175 newdup = 0; 3176 } 3177 3178set_speed: 3179 if (np->duplex == newdup && np->linkspeed == newls) 3180 return retval; 3181 3182 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", 3183 dev->name, np->linkspeed, np->duplex, newls, newdup); 3184 3185 np->duplex = newdup; 3186 np->linkspeed = newls; 3187 3188 /* The transmitter and receiver must be restarted for safe update */ 3189 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) { 3190 txrxFlags |= NV_RESTART_TX; 3191 nv_stop_tx(dev); 3192 } 3193 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { 3194 txrxFlags |= NV_RESTART_RX; 3195 nv_stop_rx(dev); 3196 } 3197 3198 if (np->gigabit == PHY_GIGABIT) { 3199 phyreg = readl(base + NvRegSlotTime); 3200 phyreg &= ~(0x3FF00); 3201 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) || 3202 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)) 3203 phyreg |= NVREG_SLOTTIME_10_100_FULL; 3204 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 3205 phyreg |= NVREG_SLOTTIME_1000_FULL; 3206 writel(phyreg, base + NvRegSlotTime); 3207 } 3208 3209 phyreg = readl(base + NvRegPhyInterface); 3210 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 3211 if (np->duplex == 0) 3212 phyreg |= PHY_HALF; 3213 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 3214 phyreg |= PHY_100; 3215 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3216 phyreg |= PHY_1000; 3217 writel(phyreg, base + NvRegPhyInterface); 3218 3219 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ 3220 if (phyreg & PHY_RGMII) { 3221 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { 3222 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 3223 } else { 3224 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { 3225 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) 3226 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10; 3227 else 3228 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100; 3229 } else { 3230 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 3231 } 3232 } 3233 } else { 3234 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) 3235 txreg = NVREG_TX_DEFERRAL_MII_STRETCH; 3236 else 3237 txreg = NVREG_TX_DEFERRAL_DEFAULT; 3238 } 3239 writel(txreg, base + NvRegTxDeferral); 3240 3241 if (np->desc_ver == DESC_VER_1) { 3242 txreg = NVREG_TX_WM_DESC1_DEFAULT; 3243 } else { 3244 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3245 txreg = NVREG_TX_WM_DESC2_3_1000; 3246 else 3247 txreg = NVREG_TX_WM_DESC2_3_DEFAULT; 3248 } 3249 writel(txreg, base + NvRegTxWatermark); 3250 3251 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 3252 base + NvRegMisc1); 3253 pci_push(base); 3254 writel(np->linkspeed, base + NvRegLinkSpeed); 3255 pci_push(base); 3256 3257 pause_flags = 0; 3258 /* setup pause frame */ 3259 if (np->duplex != 0) { 3260 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3261 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 3262 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 3263 3264 switch (adv_pause) { 3265 case ADVERTISE_PAUSE_CAP: 3266 if (lpa_pause & LPA_PAUSE_CAP) { 3267 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3268 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3269 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3270 } 3271 break; 3272 case ADVERTISE_PAUSE_ASYM: 3273 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 3274 { 3275 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3276 } 3277 break; 3278 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: 3279 if (lpa_pause & LPA_PAUSE_CAP) 3280 { 3281 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3282 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3283 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3284 } 3285 if (lpa_pause == LPA_PAUSE_ASYM) 3286 { 3287 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3288 } 3289 break; 3290 } 3291 } else { 3292 pause_flags = np->pause_flags; 3293 } 3294 } 3295 nv_update_pause(dev, pause_flags); 3296 3297 if (txrxFlags & NV_RESTART_TX) 3298 nv_start_tx(dev); 3299 if (txrxFlags & NV_RESTART_RX) 3300 nv_start_rx(dev); 3301 3302 return retval; 3303} 3304 3305static void nv_linkchange(struct net_device *dev) 3306{ 3307 if (nv_update_linkspeed(dev)) { 3308 if (!netif_carrier_ok(dev)) { 3309 netif_carrier_on(dev); 3310 printk(KERN_INFO "%s: link up.\n", dev->name); 3311 nv_start_rx(dev); 3312 } 3313 } else { 3314 if (netif_carrier_ok(dev)) { 3315 netif_carrier_off(dev); 3316 printk(KERN_INFO "%s: link down.\n", dev->name); 3317 nv_stop_rx(dev); 3318 } 3319 } 3320} 3321 3322static void nv_link_irq(struct net_device *dev) 3323{ 3324 u8 __iomem *base = get_hwbase(dev); 3325 u32 miistat; 3326 3327 miistat = readl(base + NvRegMIIStatus); 3328 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus); 3329 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); 3330 3331 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 3332 nv_linkchange(dev); 3333 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); 3334} 3335 3336static void nv_msi_workaround(struct fe_priv *np) 3337{ 3338 3339 /* Need to toggle the msi irq mask within the ethernet device, 3340 * otherwise, future interrupts will not be detected. 3341 */ 3342 if (np->msi_flags & NV_MSI_ENABLED) { 3343 u8 __iomem *base = np->base; 3344 3345 writel(0, base + NvRegMSIIrqMask); 3346 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 3347 } 3348} 3349 3350static irqreturn_t nv_nic_irq(int foo, void *data) 3351{ 3352 struct net_device *dev = (struct net_device *) data; 3353 struct fe_priv *np = netdev_priv(dev); 3354 u8 __iomem *base = get_hwbase(dev); 3355 u32 events; 3356 int i; 3357 3358 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 3359 3360 for (i=0; ; i++) { 3361 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3362 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3363 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3364 } else { 3365 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3366 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 3367 } 3368 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3369 if (!(events & np->irqmask)) 3370 break; 3371 3372 nv_msi_workaround(np); 3373 3374 spin_lock(&np->lock); 3375 nv_tx_done(dev); 3376 spin_unlock(&np->lock); 3377 3378#ifdef CONFIG_FORCEDETH_NAPI 3379 if (events & NVREG_IRQ_RX_ALL) { 3380 netif_rx_schedule(dev, &np->napi); 3381 3382 /* Disable furthur receive irq's */ 3383 spin_lock(&np->lock); 3384 np->irqmask &= ~NVREG_IRQ_RX_ALL; 3385 3386 if (np->msi_flags & NV_MSI_X_ENABLED) 3387 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3388 else 3389 writel(np->irqmask, base + NvRegIrqMask); 3390 spin_unlock(&np->lock); 3391 } 3392#else 3393 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) { 3394 if (unlikely(nv_alloc_rx(dev))) { 3395 spin_lock(&np->lock); 3396 if (!np->in_shutdown) 3397 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3398 spin_unlock(&np->lock); 3399 } 3400 } 3401#endif 3402 if (unlikely(events & NVREG_IRQ_LINK)) { 3403 spin_lock(&np->lock); 3404 nv_link_irq(dev); 3405 spin_unlock(&np->lock); 3406 } 3407 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3408 spin_lock(&np->lock); 3409 nv_linkchange(dev); 3410 spin_unlock(&np->lock); 3411 np->link_timeout = jiffies + LINK_TIMEOUT; 3412 } 3413 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 3414 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3415 dev->name, events); 3416 } 3417 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { 3418 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3419 dev->name, events); 3420 } 3421 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { 3422 spin_lock(&np->lock); 3423 /* disable interrupts on the nic */ 3424 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3425 writel(0, base + NvRegIrqMask); 3426 else 3427 writel(np->irqmask, base + NvRegIrqMask); 3428 pci_push(base); 3429 3430 if (!np->in_shutdown) { 3431 np->nic_poll_irq = np->irqmask; 3432 np->recover_error = 1; 3433 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3434 } 3435 spin_unlock(&np->lock); 3436 break; 3437 } 3438 if (unlikely(i > max_interrupt_work)) { 3439 spin_lock(&np->lock); 3440 /* disable interrupts on the nic */ 3441 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3442 writel(0, base + NvRegIrqMask); 3443 else 3444 writel(np->irqmask, base + NvRegIrqMask); 3445 pci_push(base); 3446 3447 if (!np->in_shutdown) { 3448 np->nic_poll_irq = np->irqmask; 3449 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3450 } 3451 spin_unlock(&np->lock); 3452 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 3453 break; 3454 } 3455 3456 } 3457 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3458 3459 return IRQ_RETVAL(i); 3460} 3461 3462/** 3463 * All _optimized functions are used to help increase performance 3464 * (reduce CPU and increase throughput). They use descripter version 3, 3465 * compiler directives, and reduce memory accesses. 3466 */ 3467static irqreturn_t nv_nic_irq_optimized(int foo, void *data) 3468{ 3469 struct net_device *dev = (struct net_device *) data; 3470 struct fe_priv *np = netdev_priv(dev); 3471 u8 __iomem *base = get_hwbase(dev); 3472 u32 events; 3473 int i; 3474 3475 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); 3476 3477 for (i=0; ; i++) { 3478 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3479 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3480 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3481 } else { 3482 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3483 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 3484 } 3485 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3486 if (!(events & np->irqmask)) 3487 break; 3488 3489 nv_msi_workaround(np); 3490 3491 spin_lock(&np->lock); 3492 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3493 spin_unlock(&np->lock); 3494 3495#ifdef CONFIG_FORCEDETH_NAPI 3496 if (events & NVREG_IRQ_RX_ALL) { 3497 netif_rx_schedule(dev, &np->napi); 3498 3499 /* Disable furthur receive irq's */ 3500 spin_lock(&np->lock); 3501 np->irqmask &= ~NVREG_IRQ_RX_ALL; 3502 3503 if (np->msi_flags & NV_MSI_X_ENABLED) 3504 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3505 else 3506 writel(np->irqmask, base + NvRegIrqMask); 3507 spin_unlock(&np->lock); 3508 } 3509#else 3510 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3511 if (unlikely(nv_alloc_rx_optimized(dev))) { 3512 spin_lock(&np->lock); 3513 if (!np->in_shutdown) 3514 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3515 spin_unlock(&np->lock); 3516 } 3517 } 3518#endif 3519 if (unlikely(events & NVREG_IRQ_LINK)) { 3520 spin_lock(&np->lock); 3521 nv_link_irq(dev); 3522 spin_unlock(&np->lock); 3523 } 3524 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3525 spin_lock(&np->lock); 3526 nv_linkchange(dev); 3527 spin_unlock(&np->lock); 3528 np->link_timeout = jiffies + LINK_TIMEOUT; 3529 } 3530 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 3531 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3532 dev->name, events); 3533 } 3534 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { 3535 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3536 dev->name, events); 3537 } 3538 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { 3539 spin_lock(&np->lock); 3540 /* disable interrupts on the nic */ 3541 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3542 writel(0, base + NvRegIrqMask); 3543 else 3544 writel(np->irqmask, base + NvRegIrqMask); 3545 pci_push(base); 3546 3547 if (!np->in_shutdown) { 3548 np->nic_poll_irq = np->irqmask; 3549 np->recover_error = 1; 3550 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3551 } 3552 spin_unlock(&np->lock); 3553 break; 3554 } 3555 3556 if (unlikely(i > max_interrupt_work)) { 3557 spin_lock(&np->lock); 3558 /* disable interrupts on the nic */ 3559 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3560 writel(0, base + NvRegIrqMask); 3561 else 3562 writel(np->irqmask, base + NvRegIrqMask); 3563 pci_push(base); 3564 3565 if (!np->in_shutdown) { 3566 np->nic_poll_irq = np->irqmask; 3567 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3568 } 3569 spin_unlock(&np->lock); 3570 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 3571 break; 3572 } 3573 3574 } 3575 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); 3576 3577 return IRQ_RETVAL(i); 3578} 3579 3580static irqreturn_t nv_nic_irq_tx(int foo, void *data) 3581{ 3582 struct net_device *dev = (struct net_device *) data; 3583 struct fe_priv *np = netdev_priv(dev); 3584 u8 __iomem *base = get_hwbase(dev); 3585 u32 events; 3586 int i; 3587 unsigned long flags; 3588 3589 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 3590 3591 for (i=0; ; i++) { 3592 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3593 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3594 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 3595 if (!(events & np->irqmask)) 3596 break; 3597 3598 spin_lock_irqsave(&np->lock, flags); 3599 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3600 spin_unlock_irqrestore(&np->lock, flags); 3601 3602 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 3603 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3604 dev->name, events); 3605 } 3606 if (unlikely(i > max_interrupt_work)) { 3607 spin_lock_irqsave(&np->lock, flags); 3608 /* disable interrupts on the nic */ 3609 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 3610 pci_push(base); 3611 3612 if (!np->in_shutdown) { 3613 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; 3614 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3615 } 3616 spin_unlock_irqrestore(&np->lock, flags); 3617 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 3618 break; 3619 } 3620 3621 } 3622 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); 3623 3624 return IRQ_RETVAL(i); 3625} 3626 3627#ifdef CONFIG_FORCEDETH_NAPI 3628static int nv_napi_poll(struct napi_struct *napi, int budget) 3629{ 3630 struct fe_priv *np = container_of(napi, struct fe_priv, napi); 3631 struct net_device *dev = np->dev; 3632 u8 __iomem *base = get_hwbase(dev); 3633 unsigned long flags; 3634 int pkts, retcode; 3635 3636 if (!nv_optimized(np)) { 3637 pkts = nv_rx_process(dev, budget); 3638 retcode = nv_alloc_rx(dev); 3639 } else { 3640 pkts = nv_rx_process_optimized(dev, budget); 3641 retcode = nv_alloc_rx_optimized(dev); 3642 } 3643 3644 if (retcode) { 3645 spin_lock_irqsave(&np->lock, flags); 3646 if (!np->in_shutdown) 3647 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3648 spin_unlock_irqrestore(&np->lock, flags); 3649 } 3650 3651 if (pkts < budget) { 3652 /* re-enable receive interrupts */ 3653 spin_lock_irqsave(&np->lock, flags); 3654 3655 __netif_rx_complete(dev, napi); 3656 3657 np->irqmask |= NVREG_IRQ_RX_ALL; 3658 if (np->msi_flags & NV_MSI_X_ENABLED) 3659 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3660 else 3661 writel(np->irqmask, base + NvRegIrqMask); 3662 3663 spin_unlock_irqrestore(&np->lock, flags); 3664 } 3665 return pkts; 3666} 3667#endif 3668 3669#ifdef CONFIG_FORCEDETH_NAPI 3670static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3671{ 3672 struct net_device *dev = (struct net_device *) data; 3673 struct fe_priv *np = netdev_priv(dev); 3674 u8 __iomem *base = get_hwbase(dev); 3675 u32 events; 3676 3677 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3678 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3679 3680 if (events) { 3681 netif_rx_schedule(dev, &np->napi); 3682 /* disable receive interrupts on the nic */ 3683 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3684 pci_push(base); 3685 } 3686 return IRQ_HANDLED; 3687} 3688#else 3689static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3690{ 3691 struct net_device *dev = (struct net_device *) data; 3692 struct fe_priv *np = netdev_priv(dev); 3693 u8 __iomem *base = get_hwbase(dev); 3694 u32 events; 3695 int i; 3696 unsigned long flags; 3697 3698 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 3699 3700 for (i=0; ; i++) { 3701 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3702 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3703 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 3704 if (!(events & np->irqmask)) 3705 break; 3706 3707 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3708 if (unlikely(nv_alloc_rx_optimized(dev))) { 3709 spin_lock_irqsave(&np->lock, flags); 3710 if (!np->in_shutdown) 3711 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3712 spin_unlock_irqrestore(&np->lock, flags); 3713 } 3714 } 3715 3716 if (unlikely(i > max_interrupt_work)) { 3717 spin_lock_irqsave(&np->lock, flags); 3718 /* disable interrupts on the nic */ 3719 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3720 pci_push(base); 3721 3722 if (!np->in_shutdown) { 3723 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; 3724 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3725 } 3726 spin_unlock_irqrestore(&np->lock, flags); 3727 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 3728 break; 3729 } 3730 } 3731 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 3732 3733 return IRQ_RETVAL(i); 3734} 3735#endif 3736 3737static irqreturn_t nv_nic_irq_other(int foo, void *data) 3738{ 3739 struct net_device *dev = (struct net_device *) data; 3740 struct fe_priv *np = netdev_priv(dev); 3741 u8 __iomem *base = get_hwbase(dev); 3742 u32 events; 3743 int i; 3744 unsigned long flags; 3745 3746 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 3747 3748 for (i=0; ; i++) { 3749 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3750 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3751 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3752 if (!(events & np->irqmask)) 3753 break; 3754 3755 /* check tx in case we reached max loop limit in tx isr */ 3756 spin_lock_irqsave(&np->lock, flags); 3757 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3758 spin_unlock_irqrestore(&np->lock, flags); 3759 3760 if (events & NVREG_IRQ_LINK) { 3761 spin_lock_irqsave(&np->lock, flags); 3762 nv_link_irq(dev); 3763 spin_unlock_irqrestore(&np->lock, flags); 3764 } 3765 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 3766 spin_lock_irqsave(&np->lock, flags); 3767 nv_linkchange(dev); 3768 spin_unlock_irqrestore(&np->lock, flags); 3769 np->link_timeout = jiffies + LINK_TIMEOUT; 3770 } 3771 if (events & NVREG_IRQ_RECOVER_ERROR) { 3772 spin_lock_irq(&np->lock); 3773 /* disable interrupts on the nic */ 3774 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3775 pci_push(base); 3776 3777 if (!np->in_shutdown) { 3778 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3779 np->recover_error = 1; 3780 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3781 } 3782 spin_unlock_irq(&np->lock); 3783 break; 3784 } 3785 if (events & (NVREG_IRQ_UNKNOWN)) { 3786 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3787 dev->name, events); 3788 } 3789 if (unlikely(i > max_interrupt_work)) { 3790 spin_lock_irqsave(&np->lock, flags); 3791 /* disable interrupts on the nic */ 3792 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3793 pci_push(base); 3794 3795 if (!np->in_shutdown) { 3796 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3797 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3798 } 3799 spin_unlock_irqrestore(&np->lock, flags); 3800 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 3801 break; 3802 } 3803 3804 } 3805 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); 3806 3807 return IRQ_RETVAL(i); 3808} 3809 3810static irqreturn_t nv_nic_irq_test(int foo, void *data) 3811{ 3812 struct net_device *dev = (struct net_device *) data; 3813 struct fe_priv *np = netdev_priv(dev); 3814 u8 __iomem *base = get_hwbase(dev); 3815 u32 events; 3816 3817 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); 3818 3819 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3820 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3821 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3822 } else { 3823 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3824 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3825 } 3826 pci_push(base); 3827 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3828 if (!(events & NVREG_IRQ_TIMER)) 3829 return IRQ_RETVAL(0); 3830 3831 nv_msi_workaround(np); 3832 3833 spin_lock(&np->lock); 3834 np->intr_test = 1; 3835 spin_unlock(&np->lock); 3836 3837 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); 3838 3839 return IRQ_RETVAL(1); 3840} 3841 3842static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) 3843{ 3844 u8 __iomem *base = get_hwbase(dev); 3845 int i; 3846 u32 msixmap = 0; 3847 3848 /* Each interrupt bit can be mapped to a MSIX vector (4 bits). 3849 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents 3850 * the remaining 8 interrupts. 3851 */ 3852 for (i = 0; i < 8; i++) { 3853 if ((irqmask >> i) & 0x1) { 3854 msixmap |= vector << (i << 2); 3855 } 3856 } 3857 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3858 3859 msixmap = 0; 3860 for (i = 0; i < 8; i++) { 3861 if ((irqmask >> (i + 8)) & 0x1) { 3862 msixmap |= vector << (i << 2); 3863 } 3864 } 3865 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3866} 3867 3868static int nv_request_irq(struct net_device *dev, int intr_test) 3869{ 3870 struct fe_priv *np = get_nvpriv(dev); 3871 u8 __iomem *base = get_hwbase(dev); 3872 int ret = 1; 3873 int i; 3874 irqreturn_t (*handler)(int foo, void *data); 3875 3876 if (intr_test) { 3877 handler = nv_nic_irq_test; 3878 } else { 3879 if (nv_optimized(np)) 3880 handler = nv_nic_irq_optimized; 3881 else 3882 handler = nv_nic_irq; 3883 } 3884 3885 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3886 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3887 np->msi_x_entry[i].entry = i; 3888 } 3889 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 3890 np->msi_flags |= NV_MSI_X_ENABLED; 3891 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3892 /* Request irq for rx handling */ 3893 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) { 3894 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 3895 pci_disable_msix(np->pci_dev); 3896 np->msi_flags &= ~NV_MSI_X_ENABLED; 3897 goto out_err; 3898 } 3899 /* Request irq for tx handling */ 3900 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) { 3901 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 3902 pci_disable_msix(np->pci_dev); 3903 np->msi_flags &= ~NV_MSI_X_ENABLED; 3904 goto out_free_rx; 3905 } 3906 /* Request irq for link and timer handling */ 3907 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) { 3908 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 3909 pci_disable_msix(np->pci_dev); 3910 np->msi_flags &= ~NV_MSI_X_ENABLED; 3911 goto out_free_tx; 3912 } 3913 /* map interrupts to their respective vector */ 3914 writel(0, base + NvRegMSIXMap0); 3915 writel(0, base + NvRegMSIXMap1); 3916 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); 3917 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); 3918 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 3919 } else { 3920 /* Request irq for all interrupts */ 3921 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { 3922 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3923 pci_disable_msix(np->pci_dev); 3924 np->msi_flags &= ~NV_MSI_X_ENABLED; 3925 goto out_err; 3926 } 3927 3928 /* map interrupts to vector 0 */ 3929 writel(0, base + NvRegMSIXMap0); 3930 writel(0, base + NvRegMSIXMap1); 3931 } 3932 } 3933 } 3934 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3935 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3936 np->msi_flags |= NV_MSI_ENABLED; 3937 dev->irq = np->pci_dev->irq; 3938 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3939 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3940 pci_disable_msi(np->pci_dev); 3941 np->msi_flags &= ~NV_MSI_ENABLED; 3942 dev->irq = np->pci_dev->irq; 3943 goto out_err; 3944 } 3945 3946 /* map interrupts to vector 0 */ 3947 writel(0, base + NvRegMSIMap0); 3948 writel(0, base + NvRegMSIMap1); 3949 /* enable msi vector 0 */ 3950 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 3951 } 3952 } 3953 if (ret != 0) { 3954 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) 3955 goto out_err; 3956 3957 } 3958 3959 return 0; 3960out_free_tx: 3961 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); 3962out_free_rx: 3963 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); 3964out_err: 3965 return 1; 3966} 3967 3968static void nv_free_irq(struct net_device *dev) 3969{ 3970 struct fe_priv *np = get_nvpriv(dev); 3971 int i; 3972 3973 if (np->msi_flags & NV_MSI_X_ENABLED) { 3974 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3975 free_irq(np->msi_x_entry[i].vector, dev); 3976 } 3977 pci_disable_msix(np->pci_dev); 3978 np->msi_flags &= ~NV_MSI_X_ENABLED; 3979 } else { 3980 free_irq(np->pci_dev->irq, dev); 3981 if (np->msi_flags & NV_MSI_ENABLED) { 3982 pci_disable_msi(np->pci_dev); 3983 np->msi_flags &= ~NV_MSI_ENABLED; 3984 } 3985 } 3986} 3987 3988static void nv_do_nic_poll(unsigned long data) 3989{ 3990 struct net_device *dev = (struct net_device *) data; 3991 struct fe_priv *np = netdev_priv(dev); 3992 u8 __iomem *base = get_hwbase(dev); 3993 u32 mask = 0; 3994 3995 /* 3996 * First disable irq(s) and then 3997 * reenable interrupts on the nic, we have to do this before calling 3998 * nv_nic_irq because that may decide to do otherwise 3999 */ 4000 4001 if (!using_multi_irqs(dev)) { 4002 if (np->msi_flags & NV_MSI_X_ENABLED) 4003 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 4004 else 4005 disable_irq_lockdep(np->pci_dev->irq); 4006 mask = np->irqmask; 4007 } else { 4008 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4009 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 4010 mask |= NVREG_IRQ_RX_ALL; 4011 } 4012 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4013 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 4014 mask |= NVREG_IRQ_TX_ALL; 4015 } 4016 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4017 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 4018 mask |= NVREG_IRQ_OTHER; 4019 } 4020 } 4021 np->nic_poll_irq = 0; 4022 4023 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */ 4024 4025 if (np->recover_error) { 4026 np->recover_error = 0; 4027 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n"); 4028 if (netif_running(dev)) { 4029 netif_tx_lock_bh(dev); 4030 netif_addr_lock(dev); 4031 spin_lock(&np->lock); 4032 /* stop engines */ 4033 nv_stop_rxtx(dev); 4034 nv_txrx_reset(dev); 4035 /* drain rx queue */ 4036 nv_drain_rxtx(dev); 4037 /* reinit driver view of the rx queue */ 4038 set_bufsize(dev); 4039 if (nv_init_ring(dev)) { 4040 if (!np->in_shutdown) 4041 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4042 } 4043 /* reinit nic view of the rx queue */ 4044 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4045 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4046 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4047 base + NvRegRingSizes); 4048 pci_push(base); 4049 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4050 pci_push(base); 4051 4052 /* restart rx engine */ 4053 nv_start_rxtx(dev); 4054 spin_unlock(&np->lock); 4055 netif_addr_unlock(dev); 4056 netif_tx_unlock_bh(dev); 4057 } 4058 } 4059 4060 4061 writel(mask, base + NvRegIrqMask); 4062 pci_push(base); 4063 4064 if (!using_multi_irqs(dev)) { 4065 if (nv_optimized(np)) 4066 nv_nic_irq_optimized(0, dev); 4067 else 4068 nv_nic_irq(0, dev); 4069 if (np->msi_flags & NV_MSI_X_ENABLED) 4070 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 4071 else 4072 enable_irq_lockdep(np->pci_dev->irq); 4073 } else { 4074 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4075 nv_nic_irq_rx(0, dev); 4076 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 4077 } 4078 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4079 nv_nic_irq_tx(0, dev); 4080 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 4081 } 4082 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4083 nv_nic_irq_other(0, dev); 4084 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 4085 } 4086 } 4087} 4088 4089#ifdef CONFIG_NET_POLL_CONTROLLER 4090static void nv_poll_controller(struct net_device *dev) 4091{ 4092 nv_do_nic_poll((unsigned long) dev); 4093} 4094#endif 4095 4096static void nv_do_stats_poll(unsigned long data) 4097{ 4098 struct net_device *dev = (struct net_device *) data; 4099 struct fe_priv *np = netdev_priv(dev); 4100 4101 nv_get_hw_stats(dev); 4102 4103 if (!np->in_shutdown) 4104 mod_timer(&np->stats_poll, 4105 round_jiffies(jiffies + STATS_INTERVAL)); 4106} 4107 4108static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 4109{ 4110 struct fe_priv *np = netdev_priv(dev); 4111 strcpy(info->driver, DRV_NAME); 4112 strcpy(info->version, FORCEDETH_VERSION); 4113 strcpy(info->bus_info, pci_name(np->pci_dev)); 4114} 4115 4116static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4117{ 4118 struct fe_priv *np = netdev_priv(dev); 4119 wolinfo->supported = WAKE_MAGIC; 4120 4121 spin_lock_irq(&np->lock); 4122 if (np->wolenabled) 4123 wolinfo->wolopts = WAKE_MAGIC; 4124 spin_unlock_irq(&np->lock); 4125} 4126 4127static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4128{ 4129 struct fe_priv *np = netdev_priv(dev); 4130 u8 __iomem *base = get_hwbase(dev); 4131 u32 flags = 0; 4132 4133 if (wolinfo->wolopts == 0) { 4134 np->wolenabled = 0; 4135 } else if (wolinfo->wolopts & WAKE_MAGIC) { 4136 np->wolenabled = 1; 4137 flags = NVREG_WAKEUPFLAGS_ENABLE; 4138 } 4139 if (netif_running(dev)) { 4140 spin_lock_irq(&np->lock); 4141 writel(flags, base + NvRegWakeUpFlags); 4142 spin_unlock_irq(&np->lock); 4143 } 4144 return 0; 4145} 4146 4147static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 4148{ 4149 struct fe_priv *np = netdev_priv(dev); 4150 int adv; 4151 4152 spin_lock_irq(&np->lock); 4153 ecmd->port = PORT_MII; 4154 if (!netif_running(dev)) { 4155 /* We do not track link speed / duplex setting if the 4156 * interface is disabled. Force a link check */ 4157 if (nv_update_linkspeed(dev)) { 4158 if (!netif_carrier_ok(dev)) 4159 netif_carrier_on(dev); 4160 } else { 4161 if (netif_carrier_ok(dev)) 4162 netif_carrier_off(dev); 4163 } 4164 } 4165 4166 if (netif_carrier_ok(dev)) { 4167 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 4168 case NVREG_LINKSPEED_10: 4169 ecmd->speed = SPEED_10; 4170 break; 4171 case NVREG_LINKSPEED_100: 4172 ecmd->speed = SPEED_100; 4173 break; 4174 case NVREG_LINKSPEED_1000: 4175 ecmd->speed = SPEED_1000; 4176 break; 4177 } 4178 ecmd->duplex = DUPLEX_HALF; 4179 if (np->duplex) 4180 ecmd->duplex = DUPLEX_FULL; 4181 } else { 4182 ecmd->speed = -1; 4183 ecmd->duplex = -1; 4184 } 4185 4186 ecmd->autoneg = np->autoneg; 4187 4188 ecmd->advertising = ADVERTISED_MII; 4189 if (np->autoneg) { 4190 ecmd->advertising |= ADVERTISED_Autoneg; 4191 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4192 if (adv & ADVERTISE_10HALF) 4193 ecmd->advertising |= ADVERTISED_10baseT_Half; 4194 if (adv & ADVERTISE_10FULL) 4195 ecmd->advertising |= ADVERTISED_10baseT_Full; 4196 if (adv & ADVERTISE_100HALF) 4197 ecmd->advertising |= ADVERTISED_100baseT_Half; 4198 if (adv & ADVERTISE_100FULL) 4199 ecmd->advertising |= ADVERTISED_100baseT_Full; 4200 if (np->gigabit == PHY_GIGABIT) { 4201 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4202 if (adv & ADVERTISE_1000FULL) 4203 ecmd->advertising |= ADVERTISED_1000baseT_Full; 4204 } 4205 } 4206 ecmd->supported = (SUPPORTED_Autoneg | 4207 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 4208 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 4209 SUPPORTED_MII); 4210 if (np->gigabit == PHY_GIGABIT) 4211 ecmd->supported |= SUPPORTED_1000baseT_Full; 4212 4213 ecmd->phy_address = np->phyaddr; 4214 ecmd->transceiver = XCVR_EXTERNAL; 4215 4216 /* ignore maxtxpkt, maxrxpkt for now */ 4217 spin_unlock_irq(&np->lock); 4218 return 0; 4219} 4220 4221static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 4222{ 4223 struct fe_priv *np = netdev_priv(dev); 4224 4225 if (ecmd->port != PORT_MII) 4226 return -EINVAL; 4227 if (ecmd->transceiver != XCVR_EXTERNAL) 4228 return -EINVAL; 4229 if (ecmd->phy_address != np->phyaddr) { 4230 /* TODO: support switching between multiple phys. Should be 4231 * trivial, but not enabled due to lack of test hardware. */ 4232 return -EINVAL; 4233 } 4234 if (ecmd->autoneg == AUTONEG_ENABLE) { 4235 u32 mask; 4236 4237 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 4238 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 4239 if (np->gigabit == PHY_GIGABIT) 4240 mask |= ADVERTISED_1000baseT_Full; 4241 4242 if ((ecmd->advertising & mask) == 0) 4243 return -EINVAL; 4244 4245 } else if (ecmd->autoneg == AUTONEG_DISABLE) { 4246 /* Note: autonegotiation disable, speed 1000 intentionally 4247 * forbidden - noone should need that. */ 4248 4249 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) 4250 return -EINVAL; 4251 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 4252 return -EINVAL; 4253 } else { 4254 return -EINVAL; 4255 } 4256 4257 netif_carrier_off(dev); 4258 if (netif_running(dev)) { 4259 unsigned long flags; 4260 4261 nv_disable_irq(dev); 4262 netif_tx_lock_bh(dev); 4263 netif_addr_lock(dev); 4264 /* with plain spinlock lockdep complains */ 4265 spin_lock_irqsave(&np->lock, flags); 4266 /* stop engines */ 4267 /* FIXME: 4268 * this can take some time, and interrupts are disabled 4269 * due to spin_lock_irqsave, but let's hope no daemon 4270 * is going to change the settings very often... 4271 * Worst case: 4272 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX 4273 * + some minor delays, which is up to a second approximately 4274 */ 4275 nv_stop_rxtx(dev); 4276 spin_unlock_irqrestore(&np->lock, flags); 4277 netif_addr_unlock(dev); 4278 netif_tx_unlock_bh(dev); 4279 } 4280 4281 if (ecmd->autoneg == AUTONEG_ENABLE) { 4282 int adv, bmcr; 4283 4284 np->autoneg = 1; 4285 4286 /* advertise only what has been requested */ 4287 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4288 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4289 if (ecmd->advertising & ADVERTISED_10baseT_Half) 4290 adv |= ADVERTISE_10HALF; 4291 if (ecmd->advertising & ADVERTISED_10baseT_Full) 4292 adv |= ADVERTISE_10FULL; 4293 if (ecmd->advertising & ADVERTISED_100baseT_Half) 4294 adv |= ADVERTISE_100HALF; 4295 if (ecmd->advertising & ADVERTISED_100baseT_Full) 4296 adv |= ADVERTISE_100FULL; 4297 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 4298 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4299 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4300 adv |= ADVERTISE_PAUSE_ASYM; 4301 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4302 4303 if (np->gigabit == PHY_GIGABIT) { 4304 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4305 adv &= ~ADVERTISE_1000FULL; 4306 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 4307 adv |= ADVERTISE_1000FULL; 4308 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4309 } 4310 4311 if (netif_running(dev)) 4312 printk(KERN_INFO "%s: link down.\n", dev->name); 4313 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4314 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4315 bmcr |= BMCR_ANENABLE; 4316 /* reset the phy in order for settings to stick, 4317 * and cause autoneg to start */ 4318 if (phy_reset(dev, bmcr)) { 4319 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4320 return -EINVAL; 4321 } 4322 } else { 4323 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4324 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4325 } 4326 } else { 4327 int adv, bmcr; 4328 4329 np->autoneg = 0; 4330 4331 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4332 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4333 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) 4334 adv |= ADVERTISE_10HALF; 4335 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) 4336 adv |= ADVERTISE_10FULL; 4337 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) 4338 adv |= ADVERTISE_100HALF; 4339 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) 4340 adv |= ADVERTISE_100FULL; 4341 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4342 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ 4343 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4344 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4345 } 4346 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { 4347 adv |= ADVERTISE_PAUSE_ASYM; 4348 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4349 } 4350 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4351 np->fixed_mode = adv; 4352 4353 if (np->gigabit == PHY_GIGABIT) { 4354 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4355 adv &= ~ADVERTISE_1000FULL; 4356 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4357 } 4358 4359 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4360 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); 4361 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) 4362 bmcr |= BMCR_FULLDPLX; 4363 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 4364 bmcr |= BMCR_SPEED100; 4365 if (np->phy_oui == PHY_OUI_MARVELL) { 4366 /* reset the phy in order for forced mode settings to stick */ 4367 if (phy_reset(dev, bmcr)) { 4368 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4369 return -EINVAL; 4370 } 4371 } else { 4372 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4373 if (netif_running(dev)) { 4374 /* Wait a bit and then reconfigure the nic. */ 4375 udelay(10); 4376 nv_linkchange(dev); 4377 } 4378 } 4379 } 4380 4381 if (netif_running(dev)) { 4382 nv_start_rxtx(dev); 4383 nv_enable_irq(dev); 4384 } 4385 4386 return 0; 4387} 4388 4389#define FORCEDETH_REGS_VER 1 4390 4391static int nv_get_regs_len(struct net_device *dev) 4392{ 4393 struct fe_priv *np = netdev_priv(dev); 4394 return np->register_size; 4395} 4396 4397static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 4398{ 4399 struct fe_priv *np = netdev_priv(dev); 4400 u8 __iomem *base = get_hwbase(dev); 4401 u32 *rbuf = buf; 4402 int i; 4403 4404 regs->version = FORCEDETH_REGS_VER; 4405 spin_lock_irq(&np->lock); 4406 for (i = 0;i <= np->register_size/sizeof(u32); i++) 4407 rbuf[i] = readl(base + i*sizeof(u32)); 4408 spin_unlock_irq(&np->lock); 4409} 4410 4411static int nv_nway_reset(struct net_device *dev) 4412{ 4413 struct fe_priv *np = netdev_priv(dev); 4414 int ret; 4415 4416 if (np->autoneg) { 4417 int bmcr; 4418 4419 netif_carrier_off(dev); 4420 if (netif_running(dev)) { 4421 nv_disable_irq(dev); 4422 netif_tx_lock_bh(dev); 4423 netif_addr_lock(dev); 4424 spin_lock(&np->lock); 4425 /* stop engines */ 4426 nv_stop_rxtx(dev); 4427 spin_unlock(&np->lock); 4428 netif_addr_unlock(dev); 4429 netif_tx_unlock_bh(dev); 4430 printk(KERN_INFO "%s: link down.\n", dev->name); 4431 } 4432 4433 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4434 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4435 bmcr |= BMCR_ANENABLE; 4436 /* reset the phy in order for settings to stick*/ 4437 if (phy_reset(dev, bmcr)) { 4438 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4439 return -EINVAL; 4440 } 4441 } else { 4442 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4443 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4444 } 4445 4446 if (netif_running(dev)) { 4447 nv_start_rxtx(dev); 4448 nv_enable_irq(dev); 4449 } 4450 ret = 0; 4451 } else { 4452 ret = -EINVAL; 4453 } 4454 4455 return ret; 4456} 4457 4458static int nv_set_tso(struct net_device *dev, u32 value) 4459{ 4460 struct fe_priv *np = netdev_priv(dev); 4461 4462 if ((np->driver_data & DEV_HAS_CHECKSUM)) 4463 return ethtool_op_set_tso(dev, value); 4464 else 4465 return -EOPNOTSUPP; 4466} 4467 4468static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4469{ 4470 struct fe_priv *np = netdev_priv(dev); 4471 4472 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4473 ring->rx_mini_max_pending = 0; 4474 ring->rx_jumbo_max_pending = 0; 4475 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4476 4477 ring->rx_pending = np->rx_ring_size; 4478 ring->rx_mini_pending = 0; 4479 ring->rx_jumbo_pending = 0; 4480 ring->tx_pending = np->tx_ring_size; 4481} 4482 4483static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4484{ 4485 struct fe_priv *np = netdev_priv(dev); 4486 u8 __iomem *base = get_hwbase(dev); 4487 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; 4488 dma_addr_t ring_addr; 4489 4490 if (ring->rx_pending < RX_RING_MIN || 4491 ring->tx_pending < TX_RING_MIN || 4492 ring->rx_mini_pending != 0 || 4493 ring->rx_jumbo_pending != 0 || 4494 (np->desc_ver == DESC_VER_1 && 4495 (ring->rx_pending > RING_MAX_DESC_VER_1 || 4496 ring->tx_pending > RING_MAX_DESC_VER_1)) || 4497 (np->desc_ver != DESC_VER_1 && 4498 (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 4499 ring->tx_pending > RING_MAX_DESC_VER_2_3))) { 4500 return -EINVAL; 4501 } 4502 4503 /* allocate new rings */ 4504 if (!nv_optimized(np)) { 4505 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4506 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4507 &ring_addr); 4508 } else { 4509 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4510 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4511 &ring_addr); 4512 } 4513 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); 4514 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); 4515 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { 4516 /* fall back to old rings */ 4517 if (!nv_optimized(np)) { 4518 if (rxtx_ring) 4519 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4520 rxtx_ring, ring_addr); 4521 } else { 4522 if (rxtx_ring) 4523 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4524 rxtx_ring, ring_addr); 4525 } 4526 if (rx_skbuff) 4527 kfree(rx_skbuff); 4528 if (tx_skbuff) 4529 kfree(tx_skbuff); 4530 goto exit; 4531 } 4532 4533 if (netif_running(dev)) { 4534 nv_disable_irq(dev); 4535 netif_tx_lock_bh(dev); 4536 netif_addr_lock(dev); 4537 spin_lock(&np->lock); 4538 /* stop engines */ 4539 nv_stop_rxtx(dev); 4540 nv_txrx_reset(dev); 4541 /* drain queues */ 4542 nv_drain_rxtx(dev); 4543 /* delete queues */ 4544 free_rings(dev); 4545 } 4546 4547 /* set new values */ 4548 np->rx_ring_size = ring->rx_pending; 4549 np->tx_ring_size = ring->tx_pending; 4550 4551 if (!nv_optimized(np)) { 4552 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4553 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4554 } else { 4555 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4556 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4557 } 4558 np->rx_skb = (struct nv_skb_map*)rx_skbuff; 4559 np->tx_skb = (struct nv_skb_map*)tx_skbuff; 4560 np->ring_addr = ring_addr; 4561 4562 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4563 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); 4564 4565 if (netif_running(dev)) { 4566 /* reinit driver view of the queues */ 4567 set_bufsize(dev); 4568 if (nv_init_ring(dev)) { 4569 if (!np->in_shutdown) 4570 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4571 } 4572 4573 /* reinit nic view of the queues */ 4574 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4575 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4576 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4577 base + NvRegRingSizes); 4578 pci_push(base); 4579 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4580 pci_push(base); 4581 4582 /* restart engines */ 4583 nv_start_rxtx(dev); 4584 spin_unlock(&np->lock); 4585 netif_addr_unlock(dev); 4586 netif_tx_unlock_bh(dev); 4587 nv_enable_irq(dev); 4588 } 4589 return 0; 4590exit: 4591 return -ENOMEM; 4592} 4593 4594static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4595{ 4596 struct fe_priv *np = netdev_priv(dev); 4597 4598 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; 4599 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; 4600 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; 4601} 4602 4603static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4604{ 4605 struct fe_priv *np = netdev_priv(dev); 4606 int adv, bmcr; 4607 4608 if ((!np->autoneg && np->duplex == 0) || 4609 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 4610 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 4611 dev->name); 4612 return -EINVAL; 4613 } 4614 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 4615 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); 4616 return -EINVAL; 4617 } 4618 4619 netif_carrier_off(dev); 4620 if (netif_running(dev)) { 4621 nv_disable_irq(dev); 4622 netif_tx_lock_bh(dev); 4623 netif_addr_lock(dev); 4624 spin_lock(&np->lock); 4625 /* stop engines */ 4626 nv_stop_rxtx(dev); 4627 spin_unlock(&np->lock); 4628 netif_addr_unlock(dev); 4629 netif_tx_unlock_bh(dev); 4630 } 4631 4632 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); 4633 if (pause->rx_pause) 4634 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; 4635 if (pause->tx_pause) 4636 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; 4637 4638 if (np->autoneg && pause->autoneg) { 4639 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; 4640 4641 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4642 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4643 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 4644 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4645 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4646 adv |= ADVERTISE_PAUSE_ASYM; 4647 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4648 4649 if (netif_running(dev)) 4650 printk(KERN_INFO "%s: link down.\n", dev->name); 4651 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4652 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4653 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4654 } else { 4655 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4656 if (pause->rx_pause) 4657 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4658 if (pause->tx_pause) 4659 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4660 4661 if (!netif_running(dev)) 4662 nv_update_linkspeed(dev); 4663 else 4664 nv_update_pause(dev, np->pause_flags); 4665 } 4666 4667 if (netif_running(dev)) { 4668 nv_start_rxtx(dev); 4669 nv_enable_irq(dev); 4670 } 4671 return 0; 4672} 4673 4674static u32 nv_get_rx_csum(struct net_device *dev) 4675{ 4676 struct fe_priv *np = netdev_priv(dev); 4677 return (np->rx_csum) != 0; 4678} 4679 4680static int nv_set_rx_csum(struct net_device *dev, u32 data) 4681{ 4682 struct fe_priv *np = netdev_priv(dev); 4683 u8 __iomem *base = get_hwbase(dev); 4684 int retcode = 0; 4685 4686 if (np->driver_data & DEV_HAS_CHECKSUM) { 4687 if (data) { 4688 np->rx_csum = 1; 4689 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4690 } else { 4691 np->rx_csum = 0; 4692 /* vlan is dependent on rx checksum offload */ 4693 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) 4694 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; 4695 } 4696 if (netif_running(dev)) { 4697 spin_lock_irq(&np->lock); 4698 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4699 spin_unlock_irq(&np->lock); 4700 } 4701 } else { 4702 return -EINVAL; 4703 } 4704 4705 return retcode; 4706} 4707 4708static int nv_set_tx_csum(struct net_device *dev, u32 data) 4709{ 4710 struct fe_priv *np = netdev_priv(dev); 4711 4712 if (np->driver_data & DEV_HAS_CHECKSUM) 4713 return ethtool_op_set_tx_hw_csum(dev, data); 4714 else 4715 return -EOPNOTSUPP; 4716} 4717 4718static int nv_set_sg(struct net_device *dev, u32 data) 4719{ 4720 struct fe_priv *np = netdev_priv(dev); 4721 4722 if (np->driver_data & DEV_HAS_CHECKSUM) 4723 return ethtool_op_set_sg(dev, data); 4724 else 4725 return -EOPNOTSUPP; 4726} 4727 4728static int nv_get_sset_count(struct net_device *dev, int sset) 4729{ 4730 struct fe_priv *np = netdev_priv(dev); 4731 4732 switch (sset) { 4733 case ETH_SS_TEST: 4734 if (np->driver_data & DEV_HAS_TEST_EXTENDED) 4735 return NV_TEST_COUNT_EXTENDED; 4736 else 4737 return NV_TEST_COUNT_BASE; 4738 case ETH_SS_STATS: 4739 if (np->driver_data & DEV_HAS_STATISTICS_V1) 4740 return NV_DEV_STATISTICS_V1_COUNT; 4741 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4742 return NV_DEV_STATISTICS_V2_COUNT; 4743 else 4744 return 0; 4745 default: 4746 return -EOPNOTSUPP; 4747 } 4748} 4749 4750static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) 4751{ 4752 struct fe_priv *np = netdev_priv(dev); 4753 4754 /* update stats */ 4755 nv_do_stats_poll((unsigned long)dev); 4756 4757 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); 4758} 4759 4760static int nv_link_test(struct net_device *dev) 4761{ 4762 struct fe_priv *np = netdev_priv(dev); 4763 int mii_status; 4764 4765 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4766 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4767 4768 /* check phy link status */ 4769 if (!(mii_status & BMSR_LSTATUS)) 4770 return 0; 4771 else 4772 return 1; 4773} 4774 4775static int nv_register_test(struct net_device *dev) 4776{ 4777 u8 __iomem *base = get_hwbase(dev); 4778 int i = 0; 4779 u32 orig_read, new_read; 4780 4781 do { 4782 orig_read = readl(base + nv_registers_test[i].reg); 4783 4784 /* xor with mask to toggle bits */ 4785 orig_read ^= nv_registers_test[i].mask; 4786 4787 writel(orig_read, base + nv_registers_test[i].reg); 4788 4789 new_read = readl(base + nv_registers_test[i].reg); 4790 4791 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) 4792 return 0; 4793 4794 /* restore original value */ 4795 orig_read ^= nv_registers_test[i].mask; 4796 writel(orig_read, base + nv_registers_test[i].reg); 4797 4798 } while (nv_registers_test[++i].reg != 0); 4799 4800 return 1; 4801} 4802 4803static int nv_interrupt_test(struct net_device *dev) 4804{ 4805 struct fe_priv *np = netdev_priv(dev); 4806 u8 __iomem *base = get_hwbase(dev); 4807 int ret = 1; 4808 int testcnt; 4809 u32 save_msi_flags, save_poll_interval = 0; 4810 4811 if (netif_running(dev)) { 4812 /* free current irq */ 4813 nv_free_irq(dev); 4814 save_poll_interval = readl(base+NvRegPollingInterval); 4815 } 4816 4817 /* flag to test interrupt handler */ 4818 np->intr_test = 0; 4819 4820 /* setup test irq */ 4821 save_msi_flags = np->msi_flags; 4822 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; 4823 np->msi_flags |= 0x001; /* setup 1 vector */ 4824 if (nv_request_irq(dev, 1)) 4825 return 0; 4826 4827 /* setup timer interrupt */ 4828 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 4829 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4830 4831 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4832 4833 /* wait for at least one interrupt */ 4834 msleep(100); 4835 4836 spin_lock_irq(&np->lock); 4837 4838 /* flag should be set within ISR */ 4839 testcnt = np->intr_test; 4840 if (!testcnt) 4841 ret = 2; 4842 4843 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4844 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 4845 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4846 else 4847 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4848 4849 spin_unlock_irq(&np->lock); 4850 4851 nv_free_irq(dev); 4852 4853 np->msi_flags = save_msi_flags; 4854 4855 if (netif_running(dev)) { 4856 writel(save_poll_interval, base + NvRegPollingInterval); 4857 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4858 /* restore original irq */ 4859 if (nv_request_irq(dev, 0)) 4860 return 0; 4861 } 4862 4863 return ret; 4864} 4865 4866static int nv_loopback_test(struct net_device *dev) 4867{ 4868 struct fe_priv *np = netdev_priv(dev); 4869 u8 __iomem *base = get_hwbase(dev); 4870 struct sk_buff *tx_skb, *rx_skb; 4871 dma_addr_t test_dma_addr; 4872 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 4873 u32 flags; 4874 int len, i, pkt_len; 4875 u8 *pkt_data; 4876 u32 filter_flags = 0; 4877 u32 misc1_flags = 0; 4878 int ret = 1; 4879 4880 if (netif_running(dev)) { 4881 nv_disable_irq(dev); 4882 filter_flags = readl(base + NvRegPacketFilterFlags); 4883 misc1_flags = readl(base + NvRegMisc1); 4884 } else { 4885 nv_txrx_reset(dev); 4886 } 4887 4888 /* reinit driver view of the rx queue */ 4889 set_bufsize(dev); 4890 nv_init_ring(dev); 4891 4892 /* setup hardware for loopback */ 4893 writel(NVREG_MISC1_FORCE, base + NvRegMisc1); 4894 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); 4895 4896 /* reinit nic view of the rx queue */ 4897 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4898 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4899 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4900 base + NvRegRingSizes); 4901 pci_push(base); 4902 4903 /* restart rx engine */ 4904 nv_start_rxtx(dev); 4905 4906 /* setup packet for tx */ 4907 pkt_len = ETH_DATA_LEN; 4908 tx_skb = dev_alloc_skb(pkt_len); 4909 if (!tx_skb) { 4910 printk(KERN_ERR "dev_alloc_skb() failed during loopback test" 4911 " of %s\n", dev->name); 4912 ret = 0; 4913 goto out; 4914 } 4915 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, 4916 skb_tailroom(tx_skb), 4917 PCI_DMA_FROMDEVICE); 4918 pkt_data = skb_put(tx_skb, pkt_len); 4919 for (i = 0; i < pkt_len; i++) 4920 pkt_data[i] = (u8)(i & 0xff); 4921 4922 if (!nv_optimized(np)) { 4923 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 4924 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4925 } else { 4926 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); 4927 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); 4928 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4929 } 4930 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4931 pci_push(get_hwbase(dev)); 4932 4933 msleep(500); 4934 4935 /* check for rx of the packet */ 4936 if (!nv_optimized(np)) { 4937 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); 4938 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 4939 4940 } else { 4941 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); 4942 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 4943 } 4944 4945 if (flags & NV_RX_AVAIL) { 4946 ret = 0; 4947 } else if (np->desc_ver == DESC_VER_1) { 4948 if (flags & NV_RX_ERROR) 4949 ret = 0; 4950 } else { 4951 if (flags & NV_RX2_ERROR) { 4952 ret = 0; 4953 } 4954 } 4955 4956 if (ret) { 4957 if (len != pkt_len) { 4958 ret = 0; 4959 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 4960 dev->name, len, pkt_len); 4961 } else { 4962 rx_skb = np->rx_skb[0].skb; 4963 for (i = 0; i < pkt_len; i++) { 4964 if (rx_skb->data[i] != (u8)(i & 0xff)) { 4965 ret = 0; 4966 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", 4967 dev->name, i); 4968 break; 4969 } 4970 } 4971 } 4972 } else { 4973 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); 4974 } 4975 4976 pci_unmap_page(np->pci_dev, test_dma_addr, 4977 (skb_end_pointer(tx_skb) - tx_skb->data), 4978 PCI_DMA_TODEVICE); 4979 dev_kfree_skb_any(tx_skb); 4980 out: 4981 /* stop engines */ 4982 nv_stop_rxtx(dev); 4983 nv_txrx_reset(dev); 4984 /* drain rx queue */ 4985 nv_drain_rxtx(dev); 4986 4987 if (netif_running(dev)) { 4988 writel(misc1_flags, base + NvRegMisc1); 4989 writel(filter_flags, base + NvRegPacketFilterFlags); 4990 nv_enable_irq(dev); 4991 } 4992 4993 return ret; 4994} 4995 4996static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) 4997{ 4998 struct fe_priv *np = netdev_priv(dev); 4999 u8 __iomem *base = get_hwbase(dev); 5000 int result; 5001 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); 5002 5003 if (!nv_link_test(dev)) { 5004 test->flags |= ETH_TEST_FL_FAILED; 5005 buffer[0] = 1; 5006 } 5007 5008 if (test->flags & ETH_TEST_FL_OFFLINE) { 5009 if (netif_running(dev)) { 5010 netif_stop_queue(dev); 5011#ifdef CONFIG_FORCEDETH_NAPI 5012 napi_disable(&np->napi); 5013#endif 5014 netif_tx_lock_bh(dev); 5015 netif_addr_lock(dev); 5016 spin_lock_irq(&np->lock); 5017 nv_disable_hw_interrupts(dev, np->irqmask); 5018 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 5019 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5020 } else { 5021 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 5022 } 5023 /* stop engines */ 5024 nv_stop_rxtx(dev); 5025 nv_txrx_reset(dev); 5026 /* drain rx queue */ 5027 nv_drain_rxtx(dev); 5028 spin_unlock_irq(&np->lock); 5029 netif_addr_unlock(dev); 5030 netif_tx_unlock_bh(dev); 5031 } 5032 5033 if (!nv_register_test(dev)) { 5034 test->flags |= ETH_TEST_FL_FAILED; 5035 buffer[1] = 1; 5036 } 5037 5038 result = nv_interrupt_test(dev); 5039 if (result != 1) { 5040 test->flags |= ETH_TEST_FL_FAILED; 5041 buffer[2] = 1; 5042 } 5043 if (result == 0) { 5044 /* bail out */ 5045 return; 5046 } 5047 5048 if (!nv_loopback_test(dev)) { 5049 test->flags |= ETH_TEST_FL_FAILED; 5050 buffer[3] = 1; 5051 } 5052 5053 if (netif_running(dev)) { 5054 /* reinit driver view of the rx queue */ 5055 set_bufsize(dev); 5056 if (nv_init_ring(dev)) { 5057 if (!np->in_shutdown) 5058 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5059 } 5060 /* reinit nic view of the rx queue */ 5061 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5062 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5063 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5064 base + NvRegRingSizes); 5065 pci_push(base); 5066 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5067 pci_push(base); 5068 /* restart rx engine */ 5069 nv_start_rxtx(dev); 5070 netif_start_queue(dev); 5071#ifdef CONFIG_FORCEDETH_NAPI 5072 napi_enable(&np->napi); 5073#endif 5074 nv_enable_hw_interrupts(dev, np->irqmask); 5075 } 5076 } 5077} 5078 5079static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) 5080{ 5081 switch (stringset) { 5082 case ETH_SS_STATS: 5083 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); 5084 break; 5085 case ETH_SS_TEST: 5086 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); 5087 break; 5088 } 5089} 5090 5091static const struct ethtool_ops ops = { 5092 .get_drvinfo = nv_get_drvinfo, 5093 .get_link = ethtool_op_get_link, 5094 .get_wol = nv_get_wol, 5095 .set_wol = nv_set_wol, 5096 .get_settings = nv_get_settings, 5097 .set_settings = nv_set_settings, 5098 .get_regs_len = nv_get_regs_len, 5099 .get_regs = nv_get_regs, 5100 .nway_reset = nv_nway_reset, 5101 .set_tso = nv_set_tso, 5102 .get_ringparam = nv_get_ringparam, 5103 .set_ringparam = nv_set_ringparam, 5104 .get_pauseparam = nv_get_pauseparam, 5105 .set_pauseparam = nv_set_pauseparam, 5106 .get_rx_csum = nv_get_rx_csum, 5107 .set_rx_csum = nv_set_rx_csum, 5108 .set_tx_csum = nv_set_tx_csum, 5109 .set_sg = nv_set_sg, 5110 .get_strings = nv_get_strings, 5111 .get_ethtool_stats = nv_get_ethtool_stats, 5112 .get_sset_count = nv_get_sset_count, 5113 .self_test = nv_self_test, 5114}; 5115 5116static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 5117{ 5118 struct fe_priv *np = get_nvpriv(dev); 5119 5120 spin_lock_irq(&np->lock); 5121 5122 /* save vlan group */ 5123 np->vlangrp = grp; 5124 5125 if (grp) { 5126 /* enable vlan on MAC */ 5127 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; 5128 } else { 5129 /* disable vlan on MAC */ 5130 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; 5131 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; 5132 } 5133 5134 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5135 5136 spin_unlock_irq(&np->lock); 5137} 5138 5139/* The mgmt unit and driver use a semaphore to access the phy during init */ 5140static int nv_mgmt_acquire_sema(struct net_device *dev) 5141{ 5142 u8 __iomem *base = get_hwbase(dev); 5143 int i; 5144 u32 tx_ctrl, mgmt_sema; 5145 5146 for (i = 0; i < 10; i++) { 5147 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; 5148 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) 5149 break; 5150 msleep(500); 5151 } 5152 5153 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) 5154 return 0; 5155 5156 for (i = 0; i < 2; i++) { 5157 tx_ctrl = readl(base + NvRegTransmitterControl); 5158 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; 5159 writel(tx_ctrl, base + NvRegTransmitterControl); 5160 5161 /* verify that semaphore was acquired */ 5162 tx_ctrl = readl(base + NvRegTransmitterControl); 5163 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && 5164 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) 5165 return 1; 5166 else 5167 udelay(50); 5168 } 5169 5170 return 0; 5171} 5172 5173static int nv_open(struct net_device *dev) 5174{ 5175 struct fe_priv *np = netdev_priv(dev); 5176 u8 __iomem *base = get_hwbase(dev); 5177 int ret = 1; 5178 int oom, i; 5179 u32 low; 5180 5181 dprintk(KERN_DEBUG "nv_open: begin\n"); 5182 5183 /* erase previous misconfiguration */ 5184 if (np->driver_data & DEV_HAS_POWER_CNTRL) 5185 nv_mac_reset(dev); 5186 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5187 writel(0, base + NvRegMulticastAddrB); 5188 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5189 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5190 writel(0, base + NvRegPacketFilterFlags); 5191 5192 writel(0, base + NvRegTransmitterControl); 5193 writel(0, base + NvRegReceiverControl); 5194 5195 writel(0, base + NvRegAdapterControl); 5196 5197 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 5198 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 5199 5200 /* initialize descriptor rings */ 5201 set_bufsize(dev); 5202 oom = nv_init_ring(dev); 5203 5204 writel(0, base + NvRegLinkSpeed); 5205 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5206 nv_txrx_reset(dev); 5207 writel(0, base + NvRegUnknownSetupReg6); 5208 5209 np->in_shutdown = 0; 5210 5211 /* give hw rings */ 5212 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5213 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5214 base + NvRegRingSizes); 5215 5216 writel(np->linkspeed, base + NvRegLinkSpeed); 5217 if (np->desc_ver == DESC_VER_1) 5218 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 5219 else 5220 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); 5221 writel(np->txrxctl_bits, base + NvRegTxRxControl); 5222 writel(np->vlanctl_bits, base + NvRegVlanControl); 5223 pci_push(base); 5224 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 5225 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 5226 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 5227 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 5228 5229 writel(0, base + NvRegMIIMask); 5230 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5231 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5232 5233 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 5234 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 5235 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 5236 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5237 5238 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 5239 5240 get_random_bytes(&low, sizeof(low)); 5241 low &= NVREG_SLOTTIME_MASK; 5242 if (np->desc_ver == DESC_VER_1) { 5243 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime); 5244 } else { 5245 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) { 5246 /* setup legacy backoff */ 5247 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime); 5248 } else { 5249 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime); 5250 nv_gear_backoff_reseed(dev); 5251 } 5252 } 5253 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); 5254 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); 5255 if (poll_interval == -1) { 5256 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 5257 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 5258 else 5259 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 5260 } 5261 else 5262 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 5263 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5264 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 5265 base + NvRegAdapterControl); 5266 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); 5267 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); 5268 if (np->wolenabled) 5269 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 5270 5271 i = readl(base + NvRegPowerState); 5272 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 5273 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 5274 5275 pci_push(base); 5276 udelay(10); 5277 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 5278 5279 nv_disable_hw_interrupts(dev, np->irqmask); 5280 pci_push(base); 5281 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5282 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5283 pci_push(base); 5284 5285 if (nv_request_irq(dev, 0)) { 5286 goto out_drain; 5287 } 5288 5289 /* ask for interrupts */ 5290 nv_enable_hw_interrupts(dev, np->irqmask); 5291 5292 spin_lock_irq(&np->lock); 5293 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5294 writel(0, base + NvRegMulticastAddrB); 5295 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5296 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5297 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5298 /* One manual link speed update: Interrupts are enabled, future link 5299 * speed changes cause interrupts and are handled by nv_link_irq(). 5300 */ 5301 { 5302 u32 miistat; 5303 miistat = readl(base + NvRegMIIStatus); 5304 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5305 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); 5306 } 5307 /* set linkspeed to invalid value, thus force nv_update_linkspeed 5308 * to init hw */ 5309 np->linkspeed = 0; 5310 ret = nv_update_linkspeed(dev); 5311 nv_start_rxtx(dev); 5312 netif_start_queue(dev); 5313#ifdef CONFIG_FORCEDETH_NAPI 5314 napi_enable(&np->napi); 5315#endif 5316 5317 if (ret) { 5318 netif_carrier_on(dev); 5319 } else { 5320 printk(KERN_INFO "%s: no link during initialization.\n", dev->name); 5321 netif_carrier_off(dev); 5322 } 5323 if (oom) 5324 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5325 5326 /* start statistics timer */ 5327 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) 5328 mod_timer(&np->stats_poll, 5329 round_jiffies(jiffies + STATS_INTERVAL)); 5330 5331 spin_unlock_irq(&np->lock); 5332 5333 return 0; 5334out_drain: 5335 nv_drain_rxtx(dev); 5336 return ret; 5337} 5338 5339static int nv_close(struct net_device *dev) 5340{ 5341 struct fe_priv *np = netdev_priv(dev); 5342 u8 __iomem *base; 5343 5344 spin_lock_irq(&np->lock); 5345 np->in_shutdown = 1; 5346 spin_unlock_irq(&np->lock); 5347#ifdef CONFIG_FORCEDETH_NAPI 5348 napi_disable(&np->napi); 5349#endif 5350 synchronize_irq(np->pci_dev->irq); 5351 5352 del_timer_sync(&np->oom_kick); 5353 del_timer_sync(&np->nic_poll); 5354 del_timer_sync(&np->stats_poll); 5355 5356 netif_stop_queue(dev); 5357 spin_lock_irq(&np->lock); 5358 nv_stop_rxtx(dev); 5359 nv_txrx_reset(dev); 5360 5361 /* disable interrupts on the nic or we will lock up */ 5362 base = get_hwbase(dev); 5363 nv_disable_hw_interrupts(dev, np->irqmask); 5364 pci_push(base); 5365 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 5366 5367 spin_unlock_irq(&np->lock); 5368 5369 nv_free_irq(dev); 5370 5371 nv_drain_rxtx(dev); 5372 5373 if (np->wolenabled) { 5374 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5375 nv_start_rx(dev); 5376 } 5377 5378 /* FIXME: power down nic */ 5379 5380 return 0; 5381} 5382 5383static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 5384{ 5385 struct net_device *dev; 5386 struct fe_priv *np; 5387 unsigned long addr; 5388 u8 __iomem *base; 5389 int err, i; 5390 u32 powerstate, txreg; 5391 u32 phystate_orig = 0, phystate; 5392 int phyinitialized = 0; 5393 DECLARE_MAC_BUF(mac); 5394 static int printed_version; 5395 5396 if (!printed_version++) 5397 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet" 5398 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION); 5399 5400 dev = alloc_etherdev(sizeof(struct fe_priv)); 5401 err = -ENOMEM; 5402 if (!dev) 5403 goto out; 5404 5405 np = netdev_priv(dev); 5406 np->dev = dev; 5407 np->pci_dev = pci_dev; 5408 spin_lock_init(&np->lock); 5409 SET_NETDEV_DEV(dev, &pci_dev->dev); 5410 5411 init_timer(&np->oom_kick); 5412 np->oom_kick.data = (unsigned long) dev; 5413 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ 5414 init_timer(&np->nic_poll); 5415 np->nic_poll.data = (unsigned long) dev; 5416 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ 5417 init_timer(&np->stats_poll); 5418 np->stats_poll.data = (unsigned long) dev; 5419 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ 5420 5421 err = pci_enable_device(pci_dev); 5422 if (err) 5423 goto out_free; 5424 5425 pci_set_master(pci_dev); 5426 5427 err = pci_request_regions(pci_dev, DRV_NAME); 5428 if (err < 0) 5429 goto out_disable; 5430 5431 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2)) 5432 np->register_size = NV_PCI_REGSZ_VER3; 5433 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 5434 np->register_size = NV_PCI_REGSZ_VER2; 5435 else 5436 np->register_size = NV_PCI_REGSZ_VER1; 5437 5438 err = -EINVAL; 5439 addr = 0; 5440 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5441 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", 5442 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), 5443 pci_resource_len(pci_dev, i), 5444 pci_resource_flags(pci_dev, i)); 5445 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 5446 pci_resource_len(pci_dev, i) >= np->register_size) { 5447 addr = pci_resource_start(pci_dev, i); 5448 break; 5449 } 5450 } 5451 if (i == DEVICE_COUNT_RESOURCE) { 5452 dev_printk(KERN_INFO, &pci_dev->dev, 5453 "Couldn't find register window\n"); 5454 goto out_relreg; 5455 } 5456 5457 /* copy of driver data */ 5458 np->driver_data = id->driver_data; 5459 /* copy of device id */ 5460 np->device_id = id->device; 5461 5462 /* handle different descriptor versions */ 5463 if (id->driver_data & DEV_HAS_HIGH_DMA) { 5464 /* packet format 3: supports 40-bit addressing */ 5465 np->desc_ver = DESC_VER_3; 5466 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 5467 if (dma_64bit) { 5468 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) 5469 dev_printk(KERN_INFO, &pci_dev->dev, 5470 "64-bit DMA failed, using 32-bit addressing\n"); 5471 else 5472 dev->features |= NETIF_F_HIGHDMA; 5473 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) { 5474 dev_printk(KERN_INFO, &pci_dev->dev, 5475 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); 5476 } 5477 } 5478 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 5479 /* packet format 2: supports jumbo frames */ 5480 np->desc_ver = DESC_VER_2; 5481 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; 5482 } else { 5483 /* original packet format */ 5484 np->desc_ver = DESC_VER_1; 5485 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; 5486 } 5487 5488 np->pkt_limit = NV_PKTLIMIT_1; 5489 if (id->driver_data & DEV_HAS_LARGEDESC) 5490 np->pkt_limit = NV_PKTLIMIT_2; 5491 5492 if (id->driver_data & DEV_HAS_CHECKSUM) { 5493 np->rx_csum = 1; 5494 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5495 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 5496 dev->features |= NETIF_F_TSO; 5497 } 5498 5499 np->vlanctl_bits = 0; 5500 if (id->driver_data & DEV_HAS_VLAN) { 5501 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 5502 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; 5503 dev->vlan_rx_register = nv_vlan_rx_register; 5504 } 5505 5506 np->msi_flags = 0; 5507 if ((id->driver_data & DEV_HAS_MSI) && msi) { 5508 np->msi_flags |= NV_MSI_CAPABLE; 5509 } 5510 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 5511 np->msi_flags |= NV_MSI_X_CAPABLE; 5512 } 5513 5514 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 5515 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || 5516 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || 5517 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) { 5518 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 5519 } 5520 5521 5522 err = -ENOMEM; 5523 np->base = ioremap(addr, np->register_size); 5524 if (!np->base) 5525 goto out_relreg; 5526 dev->base_addr = (unsigned long)np->base; 5527 5528 dev->irq = pci_dev->irq; 5529 5530 np->rx_ring_size = RX_RING_DEFAULT; 5531 np->tx_ring_size = TX_RING_DEFAULT; 5532 5533 if (!nv_optimized(np)) { 5534 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 5535 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 5536 &np->ring_addr); 5537 if (!np->rx_ring.orig) 5538 goto out_unmap; 5539 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 5540 } else { 5541 np->rx_ring.ex = pci_alloc_consistent(pci_dev, 5542 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 5543 &np->ring_addr); 5544 if (!np->rx_ring.ex) 5545 goto out_unmap; 5546 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 5547 } 5548 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5549 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5550 if (!np->rx_skb || !np->tx_skb) 5551 goto out_freering; 5552 5553 dev->open = nv_open; 5554 dev->stop = nv_close; 5555 5556 if (!nv_optimized(np)) 5557 dev->hard_start_xmit = nv_start_xmit; 5558 else 5559 dev->hard_start_xmit = nv_start_xmit_optimized; 5560 dev->get_stats = nv_get_stats; 5561 dev->change_mtu = nv_change_mtu; 5562 dev->set_mac_address = nv_set_mac_address; 5563 dev->set_multicast_list = nv_set_multicast; 5564#ifdef CONFIG_NET_POLL_CONTROLLER 5565 dev->poll_controller = nv_poll_controller; 5566#endif 5567#ifdef CONFIG_FORCEDETH_NAPI 5568 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5569#endif 5570 SET_ETHTOOL_OPS(dev, &ops); 5571 dev->tx_timeout = nv_tx_timeout; 5572 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5573 5574 pci_set_drvdata(pci_dev, dev); 5575 5576 /* read the mac address */ 5577 base = get_hwbase(dev); 5578 np->orig_mac[0] = readl(base + NvRegMacAddrA); 5579 np->orig_mac[1] = readl(base + NvRegMacAddrB); 5580 5581 /* check the workaround bit for correct mac address order */ 5582 txreg = readl(base + NvRegTransmitPoll); 5583 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) { 5584 /* mac address is already in correct order */ 5585 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5586 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5587 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5588 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5589 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5590 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5591 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { 5592 /* mac address is already in correct order */ 5593 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5594 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5595 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5596 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5597 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5598 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5599 /* 5600 * Set orig mac address back to the reversed version. 5601 * This flag will be cleared during low power transition. 5602 * Therefore, we should always put back the reversed address. 5603 */ 5604 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) + 5605 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24); 5606 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8); 5607 } else { 5608 /* need to reverse mac address to correct order */ 5609 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 5610 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 5611 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 5612 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 5613 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 5614 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 5615 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5616 } 5617 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 5618 5619 if (!is_valid_ether_addr(dev->perm_addr)) { 5620 /* 5621 * Bad mac address. At least one bios sets the mac address 5622 * to 01:23:45:67:89:ab 5623 */ 5624 dev_printk(KERN_ERR, &pci_dev->dev, 5625 "Invalid Mac address detected: %s\n", 5626 print_mac(mac, dev->dev_addr)); 5627 dev_printk(KERN_ERR, &pci_dev->dev, 5628 "Please complain to your hardware vendor. Switching to a random MAC.\n"); 5629 dev->dev_addr[0] = 0x00; 5630 dev->dev_addr[1] = 0x00; 5631 dev->dev_addr[2] = 0x6c; 5632 get_random_bytes(&dev->dev_addr[3], 3); 5633 } 5634 5635 dprintk(KERN_DEBUG "%s: MAC Address %s\n", 5636 pci_name(pci_dev), print_mac(mac, dev->dev_addr)); 5637 5638 /* set mac address */ 5639 nv_copy_mac_to_hw(dev); 5640 5641 /* Workaround current PCI init glitch: wakeup bits aren't 5642 * being set from PCI PM capability. 5643 */ 5644 device_init_wakeup(&pci_dev->dev, 1); 5645 5646 /* disable WOL */ 5647 writel(0, base + NvRegWakeUpFlags); 5648 np->wolenabled = 0; 5649 5650 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5651 5652 /* take phy and nic out of low power mode */ 5653 powerstate = readl(base + NvRegPowerState2); 5654 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; 5655 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || 5656 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && 5657 pci_dev->revision >= 0xA3) 5658 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; 5659 writel(powerstate, base + NvRegPowerState2); 5660 } 5661 5662 if (np->desc_ver == DESC_VER_1) { 5663 np->tx_flags = NV_TX_VALID; 5664 } else { 5665 np->tx_flags = NV_TX2_VALID; 5666 } 5667 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { 5668 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5669 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5670 np->msi_flags |= 0x0003; 5671 } else { 5672 np->irqmask = NVREG_IRQMASK_CPU; 5673 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5674 np->msi_flags |= 0x0001; 5675 } 5676 5677 if (id->driver_data & DEV_NEED_TIMERIRQ) 5678 np->irqmask |= NVREG_IRQ_TIMER; 5679 if (id->driver_data & DEV_NEED_LINKTIMER) { 5680 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); 5681 np->need_linktimer = 1; 5682 np->link_timeout = jiffies + LINK_TIMEOUT; 5683 } else { 5684 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); 5685 np->need_linktimer = 0; 5686 } 5687 5688 /* Limit the number of tx's outstanding for hw bug */ 5689 if (id->driver_data & DEV_NEED_TX_LIMIT) { 5690 np->tx_limit = 1; 5691 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_32 || 5692 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_33 || 5693 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_34 || 5694 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_35 || 5695 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_36 || 5696 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_37 || 5697 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_38 || 5698 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_39) && 5699 pci_dev->revision >= 0xA2) 5700 np->tx_limit = 0; 5701 } 5702 5703 /* clear phy state and temporarily halt phy interrupts */ 5704 writel(0, base + NvRegMIIMask); 5705 phystate = readl(base + NvRegAdapterControl); 5706 if (phystate & NVREG_ADAPTCTL_RUNNING) { 5707 phystate_orig = 1; 5708 phystate &= ~NVREG_ADAPTCTL_RUNNING; 5709 writel(phystate, base + NvRegAdapterControl); 5710 } 5711 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5712 5713 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 5714 /* management unit running on the mac? */ 5715 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) { 5716 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; 5717 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use); 5718 if (nv_mgmt_acquire_sema(dev)) { 5719 /* management unit setup the phy already? */ 5720 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 5721 NVREG_XMITCTL_SYNC_PHY_INIT) { 5722 /* phy is inited by mgmt unit */ 5723 phyinitialized = 1; 5724 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); 5725 } else { 5726 /* we need to init the phy */ 5727 } 5728 } 5729 } 5730 } 5731 5732 /* find a suitable phy */ 5733 for (i = 1; i <= 32; i++) { 5734 int id1, id2; 5735 int phyaddr = i & 0x1F; 5736 5737 spin_lock_irq(&np->lock); 5738 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); 5739 spin_unlock_irq(&np->lock); 5740 if (id1 < 0 || id1 == 0xffff) 5741 continue; 5742 spin_lock_irq(&np->lock); 5743 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); 5744 spin_unlock_irq(&np->lock); 5745 if (id2 < 0 || id2 == 0xffff) 5746 continue; 5747 5748 np->phy_model = id2 & PHYID2_MODEL_MASK; 5749 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 5750 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 5751 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 5752 pci_name(pci_dev), id1, id2, phyaddr); 5753 np->phyaddr = phyaddr; 5754 np->phy_oui = id1 | id2; 5755 5756 /* Realtek hardcoded phy id1 to all zero's on certain phys */ 5757 if (np->phy_oui == PHY_OUI_REALTEK2) 5758 np->phy_oui = PHY_OUI_REALTEK; 5759 /* Setup phy revision for Realtek */ 5760 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211) 5761 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK; 5762 5763 break; 5764 } 5765 if (i == 33) { 5766 dev_printk(KERN_INFO, &pci_dev->dev, 5767 "open: Could not find a valid PHY.\n"); 5768 goto out_error; 5769 } 5770 5771 if (!phyinitialized) { 5772 /* reset it */ 5773 phy_init(dev); 5774 } else { 5775 /* see if it is a gigabit phy */ 5776 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 5777 if (mii_status & PHY_GIGABIT) { 5778 np->gigabit = PHY_GIGABIT; 5779 } 5780 } 5781 5782 /* set default link speed settings */ 5783 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 5784 np->duplex = 0; 5785 np->autoneg = 1; 5786 5787 err = register_netdev(dev); 5788 if (err) { 5789 dev_printk(KERN_INFO, &pci_dev->dev, 5790 "unable to register netdev: %d\n", err); 5791 goto out_error; 5792 } 5793 5794 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, " 5795 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 5796 dev->name, 5797 np->phy_oui, 5798 np->phyaddr, 5799 dev->dev_addr[0], 5800 dev->dev_addr[1], 5801 dev->dev_addr[2], 5802 dev->dev_addr[3], 5803 dev->dev_addr[4], 5804 dev->dev_addr[5]); 5805 5806 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 5807 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5808 dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ? 5809 "csum " : "", 5810 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 5811 "vlan " : "", 5812 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", 5813 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", 5814 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", 5815 np->gigabit == PHY_GIGABIT ? "gbit " : "", 5816 np->need_linktimer ? "lnktim " : "", 5817 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", 5818 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", 5819 np->desc_ver); 5820 5821 return 0; 5822 5823out_error: 5824 if (phystate_orig) 5825 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 5826 pci_set_drvdata(pci_dev, NULL); 5827out_freering: 5828 free_rings(dev); 5829out_unmap: 5830 iounmap(get_hwbase(dev)); 5831out_relreg: 5832 pci_release_regions(pci_dev); 5833out_disable: 5834 pci_disable_device(pci_dev); 5835out_free: 5836 free_netdev(dev); 5837out: 5838 return err; 5839} 5840 5841static void nv_restore_phy(struct net_device *dev) 5842{ 5843 struct fe_priv *np = netdev_priv(dev); 5844 u16 phy_reserved, mii_control; 5845 5846 if (np->phy_oui == PHY_OUI_REALTEK && 5847 np->phy_model == PHY_MODEL_REALTEK_8201 && 5848 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 5849 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3); 5850 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); 5851 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 5852 phy_reserved |= PHY_REALTEK_INIT8; 5853 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved); 5854 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1); 5855 5856 /* restart auto negotiation */ 5857 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 5858 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 5859 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control); 5860 } 5861} 5862 5863static void __devexit nv_remove(struct pci_dev *pci_dev) 5864{ 5865 struct net_device *dev = pci_get_drvdata(pci_dev); 5866 struct fe_priv *np = netdev_priv(dev); 5867 u8 __iomem *base = get_hwbase(dev); 5868 5869 unregister_netdev(dev); 5870 5871 /* special op: write back the misordered MAC address - otherwise 5872 * the next nv_probe would see a wrong address. 5873 */ 5874 writel(np->orig_mac[0], base + NvRegMacAddrA); 5875 writel(np->orig_mac[1], base + NvRegMacAddrB); 5876 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV, 5877 base + NvRegTransmitPoll); 5878 5879 /* restore any phy related changes */ 5880 nv_restore_phy(dev); 5881 5882 /* free all structures */ 5883 free_rings(dev); 5884 iounmap(get_hwbase(dev)); 5885 pci_release_regions(pci_dev); 5886 pci_disable_device(pci_dev); 5887 free_netdev(dev); 5888 pci_set_drvdata(pci_dev, NULL); 5889} 5890 5891#ifdef CONFIG_PM 5892static int nv_suspend(struct pci_dev *pdev, pm_message_t state) 5893{ 5894 struct net_device *dev = pci_get_drvdata(pdev); 5895 struct fe_priv *np = netdev_priv(dev); 5896 u8 __iomem *base = get_hwbase(dev); 5897 int i; 5898 5899 if (netif_running(dev)) { 5900 // Gross. 5901 nv_close(dev); 5902 } 5903 netif_device_detach(dev); 5904 5905 /* save non-pci configuration space */ 5906 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5907 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 5908 5909 pci_save_state(pdev); 5910 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); 5911 pci_disable_device(pdev); 5912 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5913 return 0; 5914} 5915 5916static int nv_resume(struct pci_dev *pdev) 5917{ 5918 struct net_device *dev = pci_get_drvdata(pdev); 5919 struct fe_priv *np = netdev_priv(dev); 5920 u8 __iomem *base = get_hwbase(dev); 5921 int i, rc = 0; 5922 5923 pci_set_power_state(pdev, PCI_D0); 5924 pci_restore_state(pdev); 5925 /* ack any pending wake events, disable PME */ 5926 pci_enable_wake(pdev, PCI_D0, 0); 5927 5928 /* restore non-pci configuration space */ 5929 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5930 writel(np->saved_config_space[i], base+i*sizeof(u32)); 5931 5932 netif_device_attach(dev); 5933 if (netif_running(dev)) { 5934 rc = nv_open(dev); 5935 nv_set_multicast(dev); 5936 } 5937 return rc; 5938} 5939 5940static void nv_shutdown(struct pci_dev *pdev) 5941{ 5942 struct net_device *dev = pci_get_drvdata(pdev); 5943 struct fe_priv *np = netdev_priv(dev); 5944 5945 if (netif_running(dev)) 5946 nv_close(dev); 5947 5948 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled); 5949 pci_enable_wake(pdev, PCI_D3cold, np->wolenabled); 5950 pci_disable_device(pdev); 5951 pci_set_power_state(pdev, PCI_D3hot); 5952} 5953#else 5954#define nv_suspend NULL 5955#define nv_shutdown NULL 5956#define nv_resume NULL 5957#endif /* CONFIG_PM */ 5958 5959static struct pci_device_id pci_tbl[] = { 5960 { /* nForce Ethernet Controller */ 5961 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), 5962 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5963 }, 5964 { /* nForce2 Ethernet Controller */ 5965 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), 5966 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5967 }, 5968 { /* nForce3 Ethernet Controller */ 5969 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), 5970 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5971 }, 5972 { /* nForce3 Ethernet Controller */ 5973 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), 5974 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5975 }, 5976 { /* nForce3 Ethernet Controller */ 5977 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), 5978 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5979 }, 5980 { /* nForce3 Ethernet Controller */ 5981 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), 5982 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5983 }, 5984 { /* nForce3 Ethernet Controller */ 5985 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), 5986 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 5987 }, 5988 { /* CK804 Ethernet Controller */ 5989 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), 5990 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 5991 }, 5992 { /* CK804 Ethernet Controller */ 5993 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), 5994 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 5995 }, 5996 { /* MCP04 Ethernet Controller */ 5997 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), 5998 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 5999 }, 6000 { /* MCP04 Ethernet Controller */ 6001 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), 6002 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6003 }, 6004 { /* MCP51 Ethernet Controller */ 6005 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 6006 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, 6007 }, 6008 { /* MCP51 Ethernet Controller */ 6009 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 6010 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, 6011 }, 6012 { /* MCP55 Ethernet Controller */ 6013 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 6014 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT, 6015 }, 6016 { /* MCP55 Ethernet Controller */ 6017 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 6018 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT, 6019 }, 6020 { /* MCP61 Ethernet Controller */ 6021 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 6022 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 6023 }, 6024 { /* MCP61 Ethernet Controller */ 6025 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 6026 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 6027 }, 6028 { /* MCP61 Ethernet Controller */ 6029 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 6030 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 6031 }, 6032 { /* MCP61 Ethernet Controller */ 6033 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 6034 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 6035 }, 6036 { /* MCP65 Ethernet Controller */ 6037 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 6038 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6039 }, 6040 { /* MCP65 Ethernet Controller */ 6041 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 6042 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6043 }, 6044 { /* MCP65 Ethernet Controller */ 6045 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 6046 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6047 }, 6048 { /* MCP65 Ethernet Controller */ 6049 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 6050 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6051 }, 6052 { /* MCP67 Ethernet Controller */ 6053 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 6054 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 6055 }, 6056 { /* MCP67 Ethernet Controller */ 6057 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 6058 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 6059 }, 6060 { /* MCP67 Ethernet Controller */ 6061 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 6062 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 6063 }, 6064 { /* MCP67 Ethernet Controller */ 6065 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 6066 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 6067 }, 6068 { /* MCP73 Ethernet Controller */ 6069 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28), 6070 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 6071 }, 6072 { /* MCP73 Ethernet Controller */ 6073 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29), 6074 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 6075 }, 6076 { /* MCP73 Ethernet Controller */ 6077 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30), 6078 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 6079 }, 6080 { /* MCP73 Ethernet Controller */ 6081 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), 6082 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 6083 }, 6084 { /* MCP77 Ethernet Controller */ 6085 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 6086 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6087 }, 6088 { /* MCP77 Ethernet Controller */ 6089 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 6090 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6091 }, 6092 { /* MCP77 Ethernet Controller */ 6093 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 6094 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6095 }, 6096 { /* MCP77 Ethernet Controller */ 6097 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 6098 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6099 }, 6100 { /* MCP79 Ethernet Controller */ 6101 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 6102 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6103 }, 6104 { /* MCP79 Ethernet Controller */ 6105 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 6106 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6107 }, 6108 { /* MCP79 Ethernet Controller */ 6109 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 6110 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6111 }, 6112 { /* MCP79 Ethernet Controller */ 6113 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 6114 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6115 }, 6116 {0,}, 6117}; 6118 6119static struct pci_driver driver = { 6120 .name = DRV_NAME, 6121 .id_table = pci_tbl, 6122 .probe = nv_probe, 6123 .remove = __devexit_p(nv_remove), 6124 .suspend = nv_suspend, 6125 .resume = nv_resume, 6126 .shutdown = nv_shutdown, 6127}; 6128 6129static int __init init_nic(void) 6130{ 6131 return pci_register_driver(&driver); 6132} 6133 6134static void __exit exit_nic(void) 6135{ 6136 pci_unregister_driver(&driver); 6137} 6138 6139module_param(max_interrupt_work, int, 0); 6140MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 6141module_param(optimization_mode, int, 0); 6142MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); 6143module_param(poll_interval, int, 0); 6144MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 6145module_param(msi, int, 0); 6146MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); 6147module_param(msix, int, 0); 6148MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); 6149module_param(dma_64bit, int, 0); 6150MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 6151module_param(phy_cross, int, 0); 6152MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); 6153 6154MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 6155MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 6156MODULE_LICENSE("GPL"); 6157 6158MODULE_DEVICE_TABLE(pci, pci_tbl); 6159 6160module_init(init_nic); 6161module_exit(exit_nic);