Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.30-rc5 6378 lines 197 kB view raw
1/* 2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. 3 * 4 * Note: This driver is a cleanroom reimplementation based on reverse 5 * engineered documentation written by Carl-Daniel Hailfinger 6 * and Andrew de Quincey. 7 * 8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered 9 * trademarks of NVIDIA Corporation in the United States and other 10 * countries. 11 * 12 * Copyright (C) 2003,4,5 Manfred Spraul 13 * Copyright (C) 2004 Andrew de Quincey (wol support) 14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 15 * IRQ rate fixes, bigendian fixes, cleanups, verification) 16 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation 17 * 18 * This program is free software; you can redistribute it and/or modify 19 * it under the terms of the GNU General Public License as published by 20 * the Free Software Foundation; either version 2 of the License, or 21 * (at your option) any later version. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, write to the Free Software 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 31 * 32 * Known bugs: 33 * We suspect that on some hardware no TX done interrupts are generated. 34 * This means recovery from netif_stop_queue only happens if the hw timer 35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) 36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance. 37 * If your hardware reliably generates tx done interrupts, then you can remove 38 * DEV_NEED_TIMERIRQ from the driver_data flags. 39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 40 * superfluous timer interrupts from the nic. 41 */ 42#define FORCEDETH_VERSION "0.64" 43#define DRV_NAME "forcedeth" 44 45#include <linux/module.h> 46#include <linux/types.h> 47#include <linux/pci.h> 48#include <linux/interrupt.h> 49#include <linux/netdevice.h> 50#include <linux/etherdevice.h> 51#include <linux/delay.h> 52#include <linux/spinlock.h> 53#include <linux/ethtool.h> 54#include <linux/timer.h> 55#include <linux/skbuff.h> 56#include <linux/mii.h> 57#include <linux/random.h> 58#include <linux/init.h> 59#include <linux/if_vlan.h> 60#include <linux/dma-mapping.h> 61 62#include <asm/irq.h> 63#include <asm/io.h> 64#include <asm/uaccess.h> 65#include <asm/system.h> 66 67#if 0 68#define dprintk printk 69#else 70#define dprintk(x...) do { } while (0) 71#endif 72 73#define TX_WORK_PER_LOOP 64 74#define RX_WORK_PER_LOOP 64 75 76/* 77 * Hardware access: 78 */ 79 80#define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */ 81#define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */ 82#define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */ 83#define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */ 84#define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */ 85#define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */ 86#define DEV_HAS_MSI 0x000040 /* device supports MSI */ 87#define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */ 88#define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */ 89#define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */ 90#define DEV_HAS_STATISTICS_V2 0x000600 /* device supports hw statistics version 2 */ 91#define DEV_HAS_STATISTICS_V3 0x000e00 /* device supports hw statistics version 3 */ 92#define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */ 93#define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */ 94#define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */ 95#define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */ 96#define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */ 97#define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */ 98#define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */ 99#define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */ 100#define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */ 101 102enum { 103 NvRegIrqStatus = 0x000, 104#define NVREG_IRQSTAT_MIIEVENT 0x040 105#define NVREG_IRQSTAT_MASK 0x83ff 106 NvRegIrqMask = 0x004, 107#define NVREG_IRQ_RX_ERROR 0x0001 108#define NVREG_IRQ_RX 0x0002 109#define NVREG_IRQ_RX_NOBUF 0x0004 110#define NVREG_IRQ_TX_ERR 0x0008 111#define NVREG_IRQ_TX_OK 0x0010 112#define NVREG_IRQ_TIMER 0x0020 113#define NVREG_IRQ_LINK 0x0040 114#define NVREG_IRQ_RX_FORCED 0x0080 115#define NVREG_IRQ_TX_FORCED 0x0100 116#define NVREG_IRQ_RECOVER_ERROR 0x8200 117#define NVREG_IRQMASK_THROUGHPUT 0x00df 118#define NVREG_IRQMASK_CPU 0x0060 119#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) 120#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) 121#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) 122 123 NvRegUnknownSetupReg6 = 0x008, 124#define NVREG_UNKSETUP6_VAL 3 125 126/* 127 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic 128 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 129 */ 130 NvRegPollingInterval = 0x00c, 131#define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */ 132#define NVREG_POLL_DEFAULT_CPU 13 133 NvRegMSIMap0 = 0x020, 134 NvRegMSIMap1 = 0x024, 135 NvRegMSIIrqMask = 0x030, 136#define NVREG_MSI_VECTOR_0_ENABLED 0x01 137 NvRegMisc1 = 0x080, 138#define NVREG_MISC1_PAUSE_TX 0x01 139#define NVREG_MISC1_HD 0x02 140#define NVREG_MISC1_FORCE 0x3b0f3c 141 142 NvRegMacReset = 0x34, 143#define NVREG_MAC_RESET_ASSERT 0x0F3 144 NvRegTransmitterControl = 0x084, 145#define NVREG_XMITCTL_START 0x01 146#define NVREG_XMITCTL_MGMT_ST 0x40000000 147#define NVREG_XMITCTL_SYNC_MASK 0x000f0000 148#define NVREG_XMITCTL_SYNC_NOT_READY 0x0 149#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 150#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 151#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 152#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 153#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 154#define NVREG_XMITCTL_HOST_LOADED 0x00004000 155#define NVREG_XMITCTL_TX_PATH_EN 0x01000000 156#define NVREG_XMITCTL_DATA_START 0x00100000 157#define NVREG_XMITCTL_DATA_READY 0x00010000 158#define NVREG_XMITCTL_DATA_ERROR 0x00020000 159 NvRegTransmitterStatus = 0x088, 160#define NVREG_XMITSTAT_BUSY 0x01 161 162 NvRegPacketFilterFlags = 0x8c, 163#define NVREG_PFF_PAUSE_RX 0x08 164#define NVREG_PFF_ALWAYS 0x7F0000 165#define NVREG_PFF_PROMISC 0x80 166#define NVREG_PFF_MYADDR 0x20 167#define NVREG_PFF_LOOPBACK 0x10 168 169 NvRegOffloadConfig = 0x90, 170#define NVREG_OFFLOAD_HOMEPHY 0x601 171#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 172 NvRegReceiverControl = 0x094, 173#define NVREG_RCVCTL_START 0x01 174#define NVREG_RCVCTL_RX_PATH_EN 0x01000000 175 NvRegReceiverStatus = 0x98, 176#define NVREG_RCVSTAT_BUSY 0x01 177 178 NvRegSlotTime = 0x9c, 179#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 180#define NVREG_SLOTTIME_10_100_FULL 0x00007f00 181#define NVREG_SLOTTIME_1000_FULL 0x0003ff00 182#define NVREG_SLOTTIME_HALF 0x0000ff00 183#define NVREG_SLOTTIME_DEFAULT 0x00007f00 184#define NVREG_SLOTTIME_MASK 0x000000ff 185 186 NvRegTxDeferral = 0xA0, 187#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 188#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 189#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 190#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f 191#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f 192#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000 193 NvRegRxDeferral = 0xA4, 194#define NVREG_RX_DEFERRAL_DEFAULT 0x16 195 NvRegMacAddrA = 0xA8, 196 NvRegMacAddrB = 0xAC, 197 NvRegMulticastAddrA = 0xB0, 198#define NVREG_MCASTADDRA_FORCE 0x01 199 NvRegMulticastAddrB = 0xB4, 200 NvRegMulticastMaskA = 0xB8, 201#define NVREG_MCASTMASKA_NONE 0xffffffff 202 NvRegMulticastMaskB = 0xBC, 203#define NVREG_MCASTMASKB_NONE 0xffff 204 205 NvRegPhyInterface = 0xC0, 206#define PHY_RGMII 0x10000000 207 NvRegBackOffControl = 0xC4, 208#define NVREG_BKOFFCTRL_DEFAULT 0x70000000 209#define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff 210#define NVREG_BKOFFCTRL_SELECT 24 211#define NVREG_BKOFFCTRL_GEAR 12 212 213 NvRegTxRingPhysAddr = 0x100, 214 NvRegRxRingPhysAddr = 0x104, 215 NvRegRingSizes = 0x108, 216#define NVREG_RINGSZ_TXSHIFT 0 217#define NVREG_RINGSZ_RXSHIFT 16 218 NvRegTransmitPoll = 0x10c, 219#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 220 NvRegLinkSpeed = 0x110, 221#define NVREG_LINKSPEED_FORCE 0x10000 222#define NVREG_LINKSPEED_10 1000 223#define NVREG_LINKSPEED_100 100 224#define NVREG_LINKSPEED_1000 50 225#define NVREG_LINKSPEED_MASK (0xFFF) 226 NvRegUnknownSetupReg5 = 0x130, 227#define NVREG_UNKSETUP5_BIT31 (1<<31) 228 NvRegTxWatermark = 0x13c, 229#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 230#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 231#define NVREG_TX_WM_DESC2_3_1000 0xfe08000 232 NvRegTxRxControl = 0x144, 233#define NVREG_TXRXCTL_KICK 0x0001 234#define NVREG_TXRXCTL_BIT1 0x0002 235#define NVREG_TXRXCTL_BIT2 0x0004 236#define NVREG_TXRXCTL_IDLE 0x0008 237#define NVREG_TXRXCTL_RESET 0x0010 238#define NVREG_TXRXCTL_RXCHECK 0x0400 239#define NVREG_TXRXCTL_DESC_1 0 240#define NVREG_TXRXCTL_DESC_2 0x002100 241#define NVREG_TXRXCTL_DESC_3 0xc02200 242#define NVREG_TXRXCTL_VLANSTRIP 0x00040 243#define NVREG_TXRXCTL_VLANINS 0x00080 244 NvRegTxRingPhysAddrHigh = 0x148, 245 NvRegRxRingPhysAddrHigh = 0x14C, 246 NvRegTxPauseFrame = 0x170, 247#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080 248#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 249#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 250#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 251 NvRegTxPauseFrameLimit = 0x174, 252#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000 253 NvRegMIIStatus = 0x180, 254#define NVREG_MIISTAT_ERROR 0x0001 255#define NVREG_MIISTAT_LINKCHANGE 0x0008 256#define NVREG_MIISTAT_MASK_RW 0x0007 257#define NVREG_MIISTAT_MASK_ALL 0x000f 258 NvRegMIIMask = 0x184, 259#define NVREG_MII_LINKCHANGE 0x0008 260 261 NvRegAdapterControl = 0x188, 262#define NVREG_ADAPTCTL_START 0x02 263#define NVREG_ADAPTCTL_LINKUP 0x04 264#define NVREG_ADAPTCTL_PHYVALID 0x40000 265#define NVREG_ADAPTCTL_RUNNING 0x100000 266#define NVREG_ADAPTCTL_PHYSHIFT 24 267 NvRegMIISpeed = 0x18c, 268#define NVREG_MIISPEED_BIT8 (1<<8) 269#define NVREG_MIIDELAY 5 270 NvRegMIIControl = 0x190, 271#define NVREG_MIICTL_INUSE 0x08000 272#define NVREG_MIICTL_WRITE 0x00400 273#define NVREG_MIICTL_ADDRSHIFT 5 274 NvRegMIIData = 0x194, 275 NvRegTxUnicast = 0x1a0, 276 NvRegTxMulticast = 0x1a4, 277 NvRegTxBroadcast = 0x1a8, 278 NvRegWakeUpFlags = 0x200, 279#define NVREG_WAKEUPFLAGS_VAL 0x7770 280#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 281#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 282#define NVREG_WAKEUPFLAGS_D3SHIFT 12 283#define NVREG_WAKEUPFLAGS_D2SHIFT 8 284#define NVREG_WAKEUPFLAGS_D1SHIFT 4 285#define NVREG_WAKEUPFLAGS_D0SHIFT 0 286#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 287#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 288#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 289#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 290 291 NvRegMgmtUnitGetVersion = 0x204, 292#define NVREG_MGMTUNITGETVERSION 0x01 293 NvRegMgmtUnitVersion = 0x208, 294#define NVREG_MGMTUNITVERSION 0x08 295 NvRegPowerCap = 0x268, 296#define NVREG_POWERCAP_D3SUPP (1<<30) 297#define NVREG_POWERCAP_D2SUPP (1<<26) 298#define NVREG_POWERCAP_D1SUPP (1<<25) 299 NvRegPowerState = 0x26c, 300#define NVREG_POWERSTATE_POWEREDUP 0x8000 301#define NVREG_POWERSTATE_VALID 0x0100 302#define NVREG_POWERSTATE_MASK 0x0003 303#define NVREG_POWERSTATE_D0 0x0000 304#define NVREG_POWERSTATE_D1 0x0001 305#define NVREG_POWERSTATE_D2 0x0002 306#define NVREG_POWERSTATE_D3 0x0003 307 NvRegMgmtUnitControl = 0x278, 308#define NVREG_MGMTUNITCONTROL_INUSE 0x20000 309 NvRegTxCnt = 0x280, 310 NvRegTxZeroReXmt = 0x284, 311 NvRegTxOneReXmt = 0x288, 312 NvRegTxManyReXmt = 0x28c, 313 NvRegTxLateCol = 0x290, 314 NvRegTxUnderflow = 0x294, 315 NvRegTxLossCarrier = 0x298, 316 NvRegTxExcessDef = 0x29c, 317 NvRegTxRetryErr = 0x2a0, 318 NvRegRxFrameErr = 0x2a4, 319 NvRegRxExtraByte = 0x2a8, 320 NvRegRxLateCol = 0x2ac, 321 NvRegRxRunt = 0x2b0, 322 NvRegRxFrameTooLong = 0x2b4, 323 NvRegRxOverflow = 0x2b8, 324 NvRegRxFCSErr = 0x2bc, 325 NvRegRxFrameAlignErr = 0x2c0, 326 NvRegRxLenErr = 0x2c4, 327 NvRegRxUnicast = 0x2c8, 328 NvRegRxMulticast = 0x2cc, 329 NvRegRxBroadcast = 0x2d0, 330 NvRegTxDef = 0x2d4, 331 NvRegTxFrame = 0x2d8, 332 NvRegRxCnt = 0x2dc, 333 NvRegTxPause = 0x2e0, 334 NvRegRxPause = 0x2e4, 335 NvRegRxDropFrame = 0x2e8, 336 NvRegVlanControl = 0x300, 337#define NVREG_VLANCONTROL_ENABLE 0x2000 338 NvRegMSIXMap0 = 0x3e0, 339 NvRegMSIXMap1 = 0x3e4, 340 NvRegMSIXIrqStatus = 0x3f0, 341 342 NvRegPowerState2 = 0x600, 343#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15 344#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 345#define NVREG_POWERSTATE2_PHY_RESET 0x0004 346}; 347 348/* Big endian: should work, but is untested */ 349struct ring_desc { 350 __le32 buf; 351 __le32 flaglen; 352}; 353 354struct ring_desc_ex { 355 __le32 bufhigh; 356 __le32 buflow; 357 __le32 txvlan; 358 __le32 flaglen; 359}; 360 361union ring_type { 362 struct ring_desc* orig; 363 struct ring_desc_ex* ex; 364}; 365 366#define FLAG_MASK_V1 0xffff0000 367#define FLAG_MASK_V2 0xffffc000 368#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 369#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) 370 371#define NV_TX_LASTPACKET (1<<16) 372#define NV_TX_RETRYERROR (1<<19) 373#define NV_TX_RETRYCOUNT_MASK (0xF<<20) 374#define NV_TX_FORCED_INTERRUPT (1<<24) 375#define NV_TX_DEFERRED (1<<26) 376#define NV_TX_CARRIERLOST (1<<27) 377#define NV_TX_LATECOLLISION (1<<28) 378#define NV_TX_UNDERFLOW (1<<29) 379#define NV_TX_ERROR (1<<30) 380#define NV_TX_VALID (1<<31) 381 382#define NV_TX2_LASTPACKET (1<<29) 383#define NV_TX2_RETRYERROR (1<<18) 384#define NV_TX2_RETRYCOUNT_MASK (0xF<<19) 385#define NV_TX2_FORCED_INTERRUPT (1<<30) 386#define NV_TX2_DEFERRED (1<<25) 387#define NV_TX2_CARRIERLOST (1<<26) 388#define NV_TX2_LATECOLLISION (1<<27) 389#define NV_TX2_UNDERFLOW (1<<28) 390/* error and valid are the same for both */ 391#define NV_TX2_ERROR (1<<30) 392#define NV_TX2_VALID (1<<31) 393#define NV_TX2_TSO (1<<28) 394#define NV_TX2_TSO_SHIFT 14 395#define NV_TX2_TSO_MAX_SHIFT 14 396#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) 397#define NV_TX2_CHECKSUM_L3 (1<<27) 398#define NV_TX2_CHECKSUM_L4 (1<<26) 399 400#define NV_TX3_VLAN_TAG_PRESENT (1<<18) 401 402#define NV_RX_DESCRIPTORVALID (1<<16) 403#define NV_RX_MISSEDFRAME (1<<17) 404#define NV_RX_SUBSTRACT1 (1<<18) 405#define NV_RX_ERROR1 (1<<23) 406#define NV_RX_ERROR2 (1<<24) 407#define NV_RX_ERROR3 (1<<25) 408#define NV_RX_ERROR4 (1<<26) 409#define NV_RX_CRCERR (1<<27) 410#define NV_RX_OVERFLOW (1<<28) 411#define NV_RX_FRAMINGERR (1<<29) 412#define NV_RX_ERROR (1<<30) 413#define NV_RX_AVAIL (1<<31) 414#define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR) 415 416#define NV_RX2_CHECKSUMMASK (0x1C000000) 417#define NV_RX2_CHECKSUM_IP (0x10000000) 418#define NV_RX2_CHECKSUM_IP_TCP (0x14000000) 419#define NV_RX2_CHECKSUM_IP_UDP (0x18000000) 420#define NV_RX2_DESCRIPTORVALID (1<<29) 421#define NV_RX2_SUBSTRACT1 (1<<25) 422#define NV_RX2_ERROR1 (1<<18) 423#define NV_RX2_ERROR2 (1<<19) 424#define NV_RX2_ERROR3 (1<<20) 425#define NV_RX2_ERROR4 (1<<21) 426#define NV_RX2_CRCERR (1<<22) 427#define NV_RX2_OVERFLOW (1<<23) 428#define NV_RX2_FRAMINGERR (1<<24) 429/* error and avail are the same for both */ 430#define NV_RX2_ERROR (1<<30) 431#define NV_RX2_AVAIL (1<<31) 432#define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR) 433 434#define NV_RX3_VLAN_TAG_PRESENT (1<<16) 435#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 436 437/* Miscelaneous hardware related defines: */ 438#define NV_PCI_REGSZ_VER1 0x270 439#define NV_PCI_REGSZ_VER2 0x2d4 440#define NV_PCI_REGSZ_VER3 0x604 441#define NV_PCI_REGSZ_MAX 0x604 442 443/* various timeout delays: all in usec */ 444#define NV_TXRX_RESET_DELAY 4 445#define NV_TXSTOP_DELAY1 10 446#define NV_TXSTOP_DELAY1MAX 500000 447#define NV_TXSTOP_DELAY2 100 448#define NV_RXSTOP_DELAY1 10 449#define NV_RXSTOP_DELAY1MAX 500000 450#define NV_RXSTOP_DELAY2 100 451#define NV_SETUP5_DELAY 5 452#define NV_SETUP5_DELAYMAX 50000 453#define NV_POWERUP_DELAY 5 454#define NV_POWERUP_DELAYMAX 5000 455#define NV_MIIBUSY_DELAY 50 456#define NV_MIIPHY_DELAY 10 457#define NV_MIIPHY_DELAYMAX 10000 458#define NV_MAC_RESET_DELAY 64 459 460#define NV_WAKEUPPATTERNS 5 461#define NV_WAKEUPMASKENTRIES 4 462 463/* General driver defaults */ 464#define NV_WATCHDOG_TIMEO (5*HZ) 465 466#define RX_RING_DEFAULT 512 467#define TX_RING_DEFAULT 256 468#define RX_RING_MIN 128 469#define TX_RING_MIN 64 470#define RING_MAX_DESC_VER_1 1024 471#define RING_MAX_DESC_VER_2_3 16384 472 473/* rx/tx mac addr + type + vlan + align + slack*/ 474#define NV_RX_HEADERS (64) 475/* even more slack. */ 476#define NV_RX_ALLOC_PAD (64) 477 478/* maximum mtu size */ 479#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ 480#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ 481 482#define OOM_REFILL (1+HZ/20) 483#define POLL_WAIT (1+HZ/100) 484#define LINK_TIMEOUT (3*HZ) 485#define STATS_INTERVAL (10*HZ) 486 487/* 488 * desc_ver values: 489 * The nic supports three different descriptor types: 490 * - DESC_VER_1: Original 491 * - DESC_VER_2: support for jumbo frames. 492 * - DESC_VER_3: 64-bit format. 493 */ 494#define DESC_VER_1 1 495#define DESC_VER_2 2 496#define DESC_VER_3 3 497 498/* PHY defines */ 499#define PHY_OUI_MARVELL 0x5043 500#define PHY_OUI_CICADA 0x03f1 501#define PHY_OUI_VITESSE 0x01c1 502#define PHY_OUI_REALTEK 0x0732 503#define PHY_OUI_REALTEK2 0x0020 504#define PHYID1_OUI_MASK 0x03ff 505#define PHYID1_OUI_SHFT 6 506#define PHYID2_OUI_MASK 0xfc00 507#define PHYID2_OUI_SHFT 10 508#define PHYID2_MODEL_MASK 0x03f0 509#define PHY_MODEL_REALTEK_8211 0x0110 510#define PHY_REV_MASK 0x0001 511#define PHY_REV_REALTEK_8211B 0x0000 512#define PHY_REV_REALTEK_8211C 0x0001 513#define PHY_MODEL_REALTEK_8201 0x0200 514#define PHY_MODEL_MARVELL_E3016 0x0220 515#define PHY_MARVELL_E3016_INITMASK 0x0300 516#define PHY_CICADA_INIT1 0x0f000 517#define PHY_CICADA_INIT2 0x0e00 518#define PHY_CICADA_INIT3 0x01000 519#define PHY_CICADA_INIT4 0x0200 520#define PHY_CICADA_INIT5 0x0004 521#define PHY_CICADA_INIT6 0x02000 522#define PHY_VITESSE_INIT_REG1 0x1f 523#define PHY_VITESSE_INIT_REG2 0x10 524#define PHY_VITESSE_INIT_REG3 0x11 525#define PHY_VITESSE_INIT_REG4 0x12 526#define PHY_VITESSE_INIT_MSK1 0xc 527#define PHY_VITESSE_INIT_MSK2 0x0180 528#define PHY_VITESSE_INIT1 0x52b5 529#define PHY_VITESSE_INIT2 0xaf8a 530#define PHY_VITESSE_INIT3 0x8 531#define PHY_VITESSE_INIT4 0x8f8a 532#define PHY_VITESSE_INIT5 0xaf86 533#define PHY_VITESSE_INIT6 0x8f86 534#define PHY_VITESSE_INIT7 0xaf82 535#define PHY_VITESSE_INIT8 0x0100 536#define PHY_VITESSE_INIT9 0x8f82 537#define PHY_VITESSE_INIT10 0x0 538#define PHY_REALTEK_INIT_REG1 0x1f 539#define PHY_REALTEK_INIT_REG2 0x19 540#define PHY_REALTEK_INIT_REG3 0x13 541#define PHY_REALTEK_INIT_REG4 0x14 542#define PHY_REALTEK_INIT_REG5 0x18 543#define PHY_REALTEK_INIT_REG6 0x11 544#define PHY_REALTEK_INIT_REG7 0x01 545#define PHY_REALTEK_INIT1 0x0000 546#define PHY_REALTEK_INIT2 0x8e00 547#define PHY_REALTEK_INIT3 0x0001 548#define PHY_REALTEK_INIT4 0xad17 549#define PHY_REALTEK_INIT5 0xfb54 550#define PHY_REALTEK_INIT6 0xf5c7 551#define PHY_REALTEK_INIT7 0x1000 552#define PHY_REALTEK_INIT8 0x0003 553#define PHY_REALTEK_INIT9 0x0008 554#define PHY_REALTEK_INIT10 0x0005 555#define PHY_REALTEK_INIT11 0x0200 556#define PHY_REALTEK_INIT_MSK1 0x0003 557 558#define PHY_GIGABIT 0x0100 559 560#define PHY_TIMEOUT 0x1 561#define PHY_ERROR 0x2 562 563#define PHY_100 0x1 564#define PHY_1000 0x2 565#define PHY_HALF 0x100 566 567#define NV_PAUSEFRAME_RX_CAPABLE 0x0001 568#define NV_PAUSEFRAME_TX_CAPABLE 0x0002 569#define NV_PAUSEFRAME_RX_ENABLE 0x0004 570#define NV_PAUSEFRAME_TX_ENABLE 0x0008 571#define NV_PAUSEFRAME_RX_REQ 0x0010 572#define NV_PAUSEFRAME_TX_REQ 0x0020 573#define NV_PAUSEFRAME_AUTONEG 0x0040 574 575/* MSI/MSI-X defines */ 576#define NV_MSI_X_MAX_VECTORS 8 577#define NV_MSI_X_VECTORS_MASK 0x000f 578#define NV_MSI_CAPABLE 0x0010 579#define NV_MSI_X_CAPABLE 0x0020 580#define NV_MSI_ENABLED 0x0040 581#define NV_MSI_X_ENABLED 0x0080 582 583#define NV_MSI_X_VECTOR_ALL 0x0 584#define NV_MSI_X_VECTOR_RX 0x0 585#define NV_MSI_X_VECTOR_TX 0x1 586#define NV_MSI_X_VECTOR_OTHER 0x2 587 588#define NV_MSI_PRIV_OFFSET 0x68 589#define NV_MSI_PRIV_VALUE 0xffffffff 590 591#define NV_RESTART_TX 0x1 592#define NV_RESTART_RX 0x2 593 594#define NV_TX_LIMIT_COUNT 16 595 596#define NV_DYNAMIC_THRESHOLD 4 597#define NV_DYNAMIC_MAX_QUIET_COUNT 2048 598 599/* statistics */ 600struct nv_ethtool_str { 601 char name[ETH_GSTRING_LEN]; 602}; 603 604static const struct nv_ethtool_str nv_estats_str[] = { 605 { "tx_bytes" }, 606 { "tx_zero_rexmt" }, 607 { "tx_one_rexmt" }, 608 { "tx_many_rexmt" }, 609 { "tx_late_collision" }, 610 { "tx_fifo_errors" }, 611 { "tx_carrier_errors" }, 612 { "tx_excess_deferral" }, 613 { "tx_retry_error" }, 614 { "rx_frame_error" }, 615 { "rx_extra_byte" }, 616 { "rx_late_collision" }, 617 { "rx_runt" }, 618 { "rx_frame_too_long" }, 619 { "rx_over_errors" }, 620 { "rx_crc_errors" }, 621 { "rx_frame_align_error" }, 622 { "rx_length_error" }, 623 { "rx_unicast" }, 624 { "rx_multicast" }, 625 { "rx_broadcast" }, 626 { "rx_packets" }, 627 { "rx_errors_total" }, 628 { "tx_errors_total" }, 629 630 /* version 2 stats */ 631 { "tx_deferral" }, 632 { "tx_packets" }, 633 { "rx_bytes" }, 634 { "tx_pause" }, 635 { "rx_pause" }, 636 { "rx_drop_frame" }, 637 638 /* version 3 stats */ 639 { "tx_unicast" }, 640 { "tx_multicast" }, 641 { "tx_broadcast" } 642}; 643 644struct nv_ethtool_stats { 645 u64 tx_bytes; 646 u64 tx_zero_rexmt; 647 u64 tx_one_rexmt; 648 u64 tx_many_rexmt; 649 u64 tx_late_collision; 650 u64 tx_fifo_errors; 651 u64 tx_carrier_errors; 652 u64 tx_excess_deferral; 653 u64 tx_retry_error; 654 u64 rx_frame_error; 655 u64 rx_extra_byte; 656 u64 rx_late_collision; 657 u64 rx_runt; 658 u64 rx_frame_too_long; 659 u64 rx_over_errors; 660 u64 rx_crc_errors; 661 u64 rx_frame_align_error; 662 u64 rx_length_error; 663 u64 rx_unicast; 664 u64 rx_multicast; 665 u64 rx_broadcast; 666 u64 rx_packets; 667 u64 rx_errors_total; 668 u64 tx_errors_total; 669 670 /* version 2 stats */ 671 u64 tx_deferral; 672 u64 tx_packets; 673 u64 rx_bytes; 674 u64 tx_pause; 675 u64 rx_pause; 676 u64 rx_drop_frame; 677 678 /* version 3 stats */ 679 u64 tx_unicast; 680 u64 tx_multicast; 681 u64 tx_broadcast; 682}; 683 684#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 685#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3) 686#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 687 688/* diagnostics */ 689#define NV_TEST_COUNT_BASE 3 690#define NV_TEST_COUNT_EXTENDED 4 691 692static const struct nv_ethtool_str nv_etests_str[] = { 693 { "link (online/offline)" }, 694 { "register (offline) " }, 695 { "interrupt (offline) " }, 696 { "loopback (offline) " } 697}; 698 699struct register_test { 700 __u32 reg; 701 __u32 mask; 702}; 703 704static const struct register_test nv_registers_test[] = { 705 { NvRegUnknownSetupReg6, 0x01 }, 706 { NvRegMisc1, 0x03c }, 707 { NvRegOffloadConfig, 0x03ff }, 708 { NvRegMulticastAddrA, 0xffffffff }, 709 { NvRegTxWatermark, 0x0ff }, 710 { NvRegWakeUpFlags, 0x07777 }, 711 { 0,0 } 712}; 713 714struct nv_skb_map { 715 struct sk_buff *skb; 716 dma_addr_t dma; 717 unsigned int dma_len; 718 struct ring_desc_ex *first_tx_desc; 719 struct nv_skb_map *next_tx_ctx; 720}; 721 722/* 723 * SMP locking: 724 * All hardware access under netdev_priv(dev)->lock, except the performance 725 * critical parts: 726 * - rx is (pseudo-) lockless: it relies on the single-threading provided 727 * by the arch code for interrupts. 728 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 729 * needs netdev_priv(dev)->lock :-( 730 * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 731 */ 732 733/* in dev: base, irq */ 734struct fe_priv { 735 spinlock_t lock; 736 737 struct net_device *dev; 738 struct napi_struct napi; 739 740 /* General data: 741 * Locking: spin_lock(&np->lock); */ 742 struct nv_ethtool_stats estats; 743 int in_shutdown; 744 u32 linkspeed; 745 int duplex; 746 int autoneg; 747 int fixed_mode; 748 int phyaddr; 749 int wolenabled; 750 unsigned int phy_oui; 751 unsigned int phy_model; 752 unsigned int phy_rev; 753 u16 gigabit; 754 int intr_test; 755 int recover_error; 756 int quiet_count; 757 758 /* General data: RO fields */ 759 dma_addr_t ring_addr; 760 struct pci_dev *pci_dev; 761 u32 orig_mac[2]; 762 u32 events; 763 u32 irqmask; 764 u32 desc_ver; 765 u32 txrxctl_bits; 766 u32 vlanctl_bits; 767 u32 driver_data; 768 u32 device_id; 769 u32 register_size; 770 int rx_csum; 771 u32 mac_in_use; 772 int mgmt_version; 773 int mgmt_sema; 774 775 void __iomem *base; 776 777 /* rx specific fields. 778 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 779 */ 780 union ring_type get_rx, put_rx, first_rx, last_rx; 781 struct nv_skb_map *get_rx_ctx, *put_rx_ctx; 782 struct nv_skb_map *first_rx_ctx, *last_rx_ctx; 783 struct nv_skb_map *rx_skb; 784 785 union ring_type rx_ring; 786 unsigned int rx_buf_sz; 787 unsigned int pkt_limit; 788 struct timer_list oom_kick; 789 struct timer_list nic_poll; 790 struct timer_list stats_poll; 791 u32 nic_poll_irq; 792 int rx_ring_size; 793 794 /* media detection workaround. 795 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 796 */ 797 int need_linktimer; 798 unsigned long link_timeout; 799 /* 800 * tx specific fields. 801 */ 802 union ring_type get_tx, put_tx, first_tx, last_tx; 803 struct nv_skb_map *get_tx_ctx, *put_tx_ctx; 804 struct nv_skb_map *first_tx_ctx, *last_tx_ctx; 805 struct nv_skb_map *tx_skb; 806 807 union ring_type tx_ring; 808 u32 tx_flags; 809 int tx_ring_size; 810 int tx_limit; 811 u32 tx_pkts_in_progress; 812 struct nv_skb_map *tx_change_owner; 813 struct nv_skb_map *tx_end_flip; 814 int tx_stop; 815 816 /* vlan fields */ 817 struct vlan_group *vlangrp; 818 819 /* msi/msi-x fields */ 820 u32 msi_flags; 821 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; 822 823 /* flow control */ 824 u32 pause_flags; 825 826 /* power saved state */ 827 u32 saved_config_space[NV_PCI_REGSZ_MAX/4]; 828 829 /* for different msi-x irq type */ 830 char name_rx[IFNAMSIZ + 3]; /* -rx */ 831 char name_tx[IFNAMSIZ + 3]; /* -tx */ 832 char name_other[IFNAMSIZ + 6]; /* -other */ 833}; 834 835/* 836 * Maximum number of loops until we assume that a bit in the irq mask 837 * is stuck. Overridable with module param. 838 */ 839static int max_interrupt_work = 4; 840 841/* 842 * Optimization can be either throuput mode or cpu mode 843 * 844 * Throughput Mode: Every tx and rx packet will generate an interrupt. 845 * CPU Mode: Interrupts are controlled by a timer. 846 */ 847enum { 848 NV_OPTIMIZATION_MODE_THROUGHPUT, 849 NV_OPTIMIZATION_MODE_CPU, 850 NV_OPTIMIZATION_MODE_DYNAMIC 851}; 852static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC; 853 854/* 855 * Poll interval for timer irq 856 * 857 * This interval determines how frequent an interrupt is generated. 858 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] 859 * Min = 0, and Max = 65535 860 */ 861static int poll_interval = -1; 862 863/* 864 * MSI interrupts 865 */ 866enum { 867 NV_MSI_INT_DISABLED, 868 NV_MSI_INT_ENABLED 869}; 870static int msi = NV_MSI_INT_ENABLED; 871 872/* 873 * MSIX interrupts 874 */ 875enum { 876 NV_MSIX_INT_DISABLED, 877 NV_MSIX_INT_ENABLED 878}; 879static int msix = NV_MSIX_INT_ENABLED; 880 881/* 882 * DMA 64bit 883 */ 884enum { 885 NV_DMA_64BIT_DISABLED, 886 NV_DMA_64BIT_ENABLED 887}; 888static int dma_64bit = NV_DMA_64BIT_ENABLED; 889 890/* 891 * Crossover Detection 892 * Realtek 8201 phy + some OEM boards do not work properly. 893 */ 894enum { 895 NV_CROSSOVER_DETECTION_DISABLED, 896 NV_CROSSOVER_DETECTION_ENABLED 897}; 898static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; 899 900static inline struct fe_priv *get_nvpriv(struct net_device *dev) 901{ 902 return netdev_priv(dev); 903} 904 905static inline u8 __iomem *get_hwbase(struct net_device *dev) 906{ 907 return ((struct fe_priv *)netdev_priv(dev))->base; 908} 909 910static inline void pci_push(u8 __iomem *base) 911{ 912 /* force out pending posted writes */ 913 readl(base); 914} 915 916static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 917{ 918 return le32_to_cpu(prd->flaglen) 919 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 920} 921 922static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 923{ 924 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; 925} 926 927static bool nv_optimized(struct fe_priv *np) 928{ 929 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 930 return false; 931 return true; 932} 933 934static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 935 int delay, int delaymax, const char *msg) 936{ 937 u8 __iomem *base = get_hwbase(dev); 938 939 pci_push(base); 940 do { 941 udelay(delay); 942 delaymax -= delay; 943 if (delaymax < 0) { 944 if (msg) 945 printk("%s", msg); 946 return 1; 947 } 948 } while ((readl(base + offset) & mask) != target); 949 return 0; 950} 951 952#define NV_SETUP_RX_RING 0x01 953#define NV_SETUP_TX_RING 0x02 954 955static inline u32 dma_low(dma_addr_t addr) 956{ 957 return addr; 958} 959 960static inline u32 dma_high(dma_addr_t addr) 961{ 962 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */ 963} 964 965static void setup_hw_rings(struct net_device *dev, int rxtx_flags) 966{ 967 struct fe_priv *np = get_nvpriv(dev); 968 u8 __iomem *base = get_hwbase(dev); 969 970 if (!nv_optimized(np)) { 971 if (rxtx_flags & NV_SETUP_RX_RING) { 972 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 973 } 974 if (rxtx_flags & NV_SETUP_TX_RING) { 975 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 976 } 977 } else { 978 if (rxtx_flags & NV_SETUP_RX_RING) { 979 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 980 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); 981 } 982 if (rxtx_flags & NV_SETUP_TX_RING) { 983 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); 984 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh); 985 } 986 } 987} 988 989static void free_rings(struct net_device *dev) 990{ 991 struct fe_priv *np = get_nvpriv(dev); 992 993 if (!nv_optimized(np)) { 994 if (np->rx_ring.orig) 995 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 996 np->rx_ring.orig, np->ring_addr); 997 } else { 998 if (np->rx_ring.ex) 999 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 1000 np->rx_ring.ex, np->ring_addr); 1001 } 1002 if (np->rx_skb) 1003 kfree(np->rx_skb); 1004 if (np->tx_skb) 1005 kfree(np->tx_skb); 1006} 1007 1008static int using_multi_irqs(struct net_device *dev) 1009{ 1010 struct fe_priv *np = get_nvpriv(dev); 1011 1012 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1013 ((np->msi_flags & NV_MSI_X_ENABLED) && 1014 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) 1015 return 0; 1016 else 1017 return 1; 1018} 1019 1020static void nv_enable_irq(struct net_device *dev) 1021{ 1022 struct fe_priv *np = get_nvpriv(dev); 1023 1024 if (!using_multi_irqs(dev)) { 1025 if (np->msi_flags & NV_MSI_X_ENABLED) 1026 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1027 else 1028 enable_irq(np->pci_dev->irq); 1029 } else { 1030 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1031 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1032 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1033 } 1034} 1035 1036static void nv_disable_irq(struct net_device *dev) 1037{ 1038 struct fe_priv *np = get_nvpriv(dev); 1039 1040 if (!using_multi_irqs(dev)) { 1041 if (np->msi_flags & NV_MSI_X_ENABLED) 1042 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1043 else 1044 disable_irq(np->pci_dev->irq); 1045 } else { 1046 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1047 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1048 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1049 } 1050} 1051 1052/* In MSIX mode, a write to irqmask behaves as XOR */ 1053static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) 1054{ 1055 u8 __iomem *base = get_hwbase(dev); 1056 1057 writel(mask, base + NvRegIrqMask); 1058} 1059 1060static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) 1061{ 1062 struct fe_priv *np = get_nvpriv(dev); 1063 u8 __iomem *base = get_hwbase(dev); 1064 1065 if (np->msi_flags & NV_MSI_X_ENABLED) { 1066 writel(mask, base + NvRegIrqMask); 1067 } else { 1068 if (np->msi_flags & NV_MSI_ENABLED) 1069 writel(0, base + NvRegMSIIrqMask); 1070 writel(0, base + NvRegIrqMask); 1071 } 1072} 1073 1074static void nv_napi_enable(struct net_device *dev) 1075{ 1076#ifdef CONFIG_FORCEDETH_NAPI 1077 struct fe_priv *np = get_nvpriv(dev); 1078 1079 napi_enable(&np->napi); 1080#endif 1081} 1082 1083static void nv_napi_disable(struct net_device *dev) 1084{ 1085#ifdef CONFIG_FORCEDETH_NAPI 1086 struct fe_priv *np = get_nvpriv(dev); 1087 1088 napi_disable(&np->napi); 1089#endif 1090} 1091 1092#define MII_READ (-1) 1093/* mii_rw: read/write a register on the PHY. 1094 * 1095 * Caller must guarantee serialization 1096 */ 1097static int mii_rw(struct net_device *dev, int addr, int miireg, int value) 1098{ 1099 u8 __iomem *base = get_hwbase(dev); 1100 u32 reg; 1101 int retval; 1102 1103 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus); 1104 1105 reg = readl(base + NvRegMIIControl); 1106 if (reg & NVREG_MIICTL_INUSE) { 1107 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); 1108 udelay(NV_MIIBUSY_DELAY); 1109 } 1110 1111 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; 1112 if (value != MII_READ) { 1113 writel(value, base + NvRegMIIData); 1114 reg |= NVREG_MIICTL_WRITE; 1115 } 1116 writel(reg, base + NvRegMIIControl); 1117 1118 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1119 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { 1120 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", 1121 dev->name, miireg, addr); 1122 retval = -1; 1123 } else if (value != MII_READ) { 1124 /* it was a write operation - fewer failures are detectable */ 1125 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", 1126 dev->name, value, miireg, addr); 1127 retval = 0; 1128 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1129 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", 1130 dev->name, miireg, addr); 1131 retval = -1; 1132 } else { 1133 retval = readl(base + NvRegMIIData); 1134 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", 1135 dev->name, miireg, addr, retval); 1136 } 1137 1138 return retval; 1139} 1140 1141static int phy_reset(struct net_device *dev, u32 bmcr_setup) 1142{ 1143 struct fe_priv *np = netdev_priv(dev); 1144 u32 miicontrol; 1145 unsigned int tries = 0; 1146 1147 miicontrol = BMCR_RESET | bmcr_setup; 1148 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1149 return -1; 1150 } 1151 1152 /* wait for 500ms */ 1153 msleep(500); 1154 1155 /* must wait till reset is deasserted */ 1156 while (miicontrol & BMCR_RESET) { 1157 msleep(10); 1158 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1159 /* FIXME: 100 tries seem excessive */ 1160 if (tries++ > 100) 1161 return -1; 1162 } 1163 return 0; 1164} 1165 1166static int phy_init(struct net_device *dev) 1167{ 1168 struct fe_priv *np = get_nvpriv(dev); 1169 u8 __iomem *base = get_hwbase(dev); 1170 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1171 1172 /* phy errata for E3016 phy */ 1173 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1174 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1175 reg &= ~PHY_MARVELL_E3016_INITMASK; 1176 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { 1177 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); 1178 return PHY_ERROR; 1179 } 1180 } 1181 if (np->phy_oui == PHY_OUI_REALTEK) { 1182 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1183 np->phy_rev == PHY_REV_REALTEK_8211B) { 1184 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1185 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1186 return PHY_ERROR; 1187 } 1188 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1189 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1190 return PHY_ERROR; 1191 } 1192 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1193 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1194 return PHY_ERROR; 1195 } 1196 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1197 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1198 return PHY_ERROR; 1199 } 1200 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1201 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1202 return PHY_ERROR; 1203 } 1204 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) { 1205 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1206 return PHY_ERROR; 1207 } 1208 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1209 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1210 return PHY_ERROR; 1211 } 1212 } 1213 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1214 np->phy_rev == PHY_REV_REALTEK_8211C) { 1215 u32 powerstate = readl(base + NvRegPowerState2); 1216 1217 /* need to perform hw phy reset */ 1218 powerstate |= NVREG_POWERSTATE2_PHY_RESET; 1219 writel(powerstate, base + NvRegPowerState2); 1220 msleep(25); 1221 1222 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET; 1223 writel(powerstate, base + NvRegPowerState2); 1224 msleep(25); 1225 1226 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1227 reg |= PHY_REALTEK_INIT9; 1228 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) { 1229 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1230 return PHY_ERROR; 1231 } 1232 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) { 1233 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1234 return PHY_ERROR; 1235 } 1236 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ); 1237 if (!(reg & PHY_REALTEK_INIT11)) { 1238 reg |= PHY_REALTEK_INIT11; 1239 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) { 1240 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1241 return PHY_ERROR; 1242 } 1243 } 1244 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1245 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1246 return PHY_ERROR; 1247 } 1248 } 1249 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1250 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 || 1251 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 || 1252 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 || 1253 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 || 1254 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 || 1255 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 || 1256 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 || 1257 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) { 1258 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1259 phy_reserved |= PHY_REALTEK_INIT7; 1260 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) { 1261 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1262 return PHY_ERROR; 1263 } 1264 } 1265 } 1266 } 1267 1268 /* set advertise register */ 1269 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1270 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1271 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1272 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); 1273 return PHY_ERROR; 1274 } 1275 1276 /* get phy interface type */ 1277 phyinterface = readl(base + NvRegPhyInterface); 1278 1279 /* see if gigabit phy */ 1280 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1281 if (mii_status & PHY_GIGABIT) { 1282 np->gigabit = PHY_GIGABIT; 1283 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 1284 mii_control_1000 &= ~ADVERTISE_1000HALF; 1285 if (phyinterface & PHY_RGMII) 1286 mii_control_1000 |= ADVERTISE_1000FULL; 1287 else 1288 mii_control_1000 &= ~ADVERTISE_1000FULL; 1289 1290 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1291 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1292 return PHY_ERROR; 1293 } 1294 } 1295 else 1296 np->gigabit = 0; 1297 1298 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1299 mii_control |= BMCR_ANENABLE; 1300 1301 if (np->phy_oui == PHY_OUI_REALTEK && 1302 np->phy_model == PHY_MODEL_REALTEK_8211 && 1303 np->phy_rev == PHY_REV_REALTEK_8211C) { 1304 /* start autoneg since we already performed hw reset above */ 1305 mii_control |= BMCR_ANRESTART; 1306 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1307 printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev)); 1308 return PHY_ERROR; 1309 } 1310 } else { 1311 /* reset the phy 1312 * (certain phys need bmcr to be setup with reset) 1313 */ 1314 if (phy_reset(dev, mii_control)) { 1315 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1316 return PHY_ERROR; 1317 } 1318 } 1319 1320 /* phy vendor specific configuration */ 1321 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1322 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1323 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1324 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1325 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { 1326 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1327 return PHY_ERROR; 1328 } 1329 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1330 phy_reserved |= PHY_CICADA_INIT5; 1331 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { 1332 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1333 return PHY_ERROR; 1334 } 1335 } 1336 if (np->phy_oui == PHY_OUI_CICADA) { 1337 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); 1338 phy_reserved |= PHY_CICADA_INIT6; 1339 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { 1340 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1341 return PHY_ERROR; 1342 } 1343 } 1344 if (np->phy_oui == PHY_OUI_VITESSE) { 1345 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) { 1346 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1347 return PHY_ERROR; 1348 } 1349 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) { 1350 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1351 return PHY_ERROR; 1352 } 1353 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1354 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1355 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1356 return PHY_ERROR; 1357 } 1358 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1359 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1360 phy_reserved |= PHY_VITESSE_INIT3; 1361 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1362 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1363 return PHY_ERROR; 1364 } 1365 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) { 1366 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1367 return PHY_ERROR; 1368 } 1369 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) { 1370 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1371 return PHY_ERROR; 1372 } 1373 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1374 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1375 phy_reserved |= PHY_VITESSE_INIT3; 1376 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1377 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1378 return PHY_ERROR; 1379 } 1380 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1381 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1382 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1383 return PHY_ERROR; 1384 } 1385 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { 1386 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1387 return PHY_ERROR; 1388 } 1389 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { 1390 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1391 return PHY_ERROR; 1392 } 1393 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1394 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1395 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1396 return PHY_ERROR; 1397 } 1398 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1399 phy_reserved &= ~PHY_VITESSE_INIT_MSK2; 1400 phy_reserved |= PHY_VITESSE_INIT8; 1401 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1402 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1403 return PHY_ERROR; 1404 } 1405 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) { 1406 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1407 return PHY_ERROR; 1408 } 1409 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) { 1410 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1411 return PHY_ERROR; 1412 } 1413 } 1414 if (np->phy_oui == PHY_OUI_REALTEK) { 1415 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1416 np->phy_rev == PHY_REV_REALTEK_8211B) { 1417 /* reset could have cleared these out, set them back */ 1418 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1419 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1420 return PHY_ERROR; 1421 } 1422 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1423 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1424 return PHY_ERROR; 1425 } 1426 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1427 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1428 return PHY_ERROR; 1429 } 1430 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1431 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1432 return PHY_ERROR; 1433 } 1434 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1435 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1436 return PHY_ERROR; 1437 } 1438 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) { 1439 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1440 return PHY_ERROR; 1441 } 1442 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1443 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1444 return PHY_ERROR; 1445 } 1446 } 1447 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1448 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 || 1449 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 || 1450 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 || 1451 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 || 1452 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 || 1453 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 || 1454 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 || 1455 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) { 1456 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1457 phy_reserved |= PHY_REALTEK_INIT7; 1458 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) { 1459 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1460 return PHY_ERROR; 1461 } 1462 } 1463 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 1464 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1465 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1466 return PHY_ERROR; 1467 } 1468 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); 1469 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 1470 phy_reserved |= PHY_REALTEK_INIT3; 1471 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) { 1472 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1473 return PHY_ERROR; 1474 } 1475 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1476 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1477 return PHY_ERROR; 1478 } 1479 } 1480 } 1481 } 1482 1483 /* some phys clear out pause advertisment on reset, set it back */ 1484 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); 1485 1486 /* restart auto negotiation, power down phy */ 1487 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1488 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE | BMCR_PDOWN); 1489 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1490 return PHY_ERROR; 1491 } 1492 1493 return 0; 1494} 1495 1496static void nv_start_rx(struct net_device *dev) 1497{ 1498 struct fe_priv *np = netdev_priv(dev); 1499 u8 __iomem *base = get_hwbase(dev); 1500 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1501 1502 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 1503 /* Already running? Stop it. */ 1504 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1505 rx_ctrl &= ~NVREG_RCVCTL_START; 1506 writel(rx_ctrl, base + NvRegReceiverControl); 1507 pci_push(base); 1508 } 1509 writel(np->linkspeed, base + NvRegLinkSpeed); 1510 pci_push(base); 1511 rx_ctrl |= NVREG_RCVCTL_START; 1512 if (np->mac_in_use) 1513 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1514 writel(rx_ctrl, base + NvRegReceiverControl); 1515 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1516 dev->name, np->duplex, np->linkspeed); 1517 pci_push(base); 1518} 1519 1520static void nv_stop_rx(struct net_device *dev) 1521{ 1522 struct fe_priv *np = netdev_priv(dev); 1523 u8 __iomem *base = get_hwbase(dev); 1524 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1525 1526 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 1527 if (!np->mac_in_use) 1528 rx_ctrl &= ~NVREG_RCVCTL_START; 1529 else 1530 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1531 writel(rx_ctrl, base + NvRegReceiverControl); 1532 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1533 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1534 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1535 1536 udelay(NV_RXSTOP_DELAY2); 1537 if (!np->mac_in_use) 1538 writel(0, base + NvRegLinkSpeed); 1539} 1540 1541static void nv_start_tx(struct net_device *dev) 1542{ 1543 struct fe_priv *np = netdev_priv(dev); 1544 u8 __iomem *base = get_hwbase(dev); 1545 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1546 1547 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 1548 tx_ctrl |= NVREG_XMITCTL_START; 1549 if (np->mac_in_use) 1550 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1551 writel(tx_ctrl, base + NvRegTransmitterControl); 1552 pci_push(base); 1553} 1554 1555static void nv_stop_tx(struct net_device *dev) 1556{ 1557 struct fe_priv *np = netdev_priv(dev); 1558 u8 __iomem *base = get_hwbase(dev); 1559 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1560 1561 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 1562 if (!np->mac_in_use) 1563 tx_ctrl &= ~NVREG_XMITCTL_START; 1564 else 1565 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1566 writel(tx_ctrl, base + NvRegTransmitterControl); 1567 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1568 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1569 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1570 1571 udelay(NV_TXSTOP_DELAY2); 1572 if (!np->mac_in_use) 1573 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, 1574 base + NvRegTransmitPoll); 1575} 1576 1577static void nv_start_rxtx(struct net_device *dev) 1578{ 1579 nv_start_rx(dev); 1580 nv_start_tx(dev); 1581} 1582 1583static void nv_stop_rxtx(struct net_device *dev) 1584{ 1585 nv_stop_rx(dev); 1586 nv_stop_tx(dev); 1587} 1588 1589static void nv_txrx_reset(struct net_device *dev) 1590{ 1591 struct fe_priv *np = netdev_priv(dev); 1592 u8 __iomem *base = get_hwbase(dev); 1593 1594 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); 1595 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1596 pci_push(base); 1597 udelay(NV_TXRX_RESET_DELAY); 1598 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1599 pci_push(base); 1600} 1601 1602static void nv_mac_reset(struct net_device *dev) 1603{ 1604 struct fe_priv *np = netdev_priv(dev); 1605 u8 __iomem *base = get_hwbase(dev); 1606 u32 temp1, temp2, temp3; 1607 1608 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); 1609 1610 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1611 pci_push(base); 1612 1613 /* save registers since they will be cleared on reset */ 1614 temp1 = readl(base + NvRegMacAddrA); 1615 temp2 = readl(base + NvRegMacAddrB); 1616 temp3 = readl(base + NvRegTransmitPoll); 1617 1618 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); 1619 pci_push(base); 1620 udelay(NV_MAC_RESET_DELAY); 1621 writel(0, base + NvRegMacReset); 1622 pci_push(base); 1623 udelay(NV_MAC_RESET_DELAY); 1624 1625 /* restore saved registers */ 1626 writel(temp1, base + NvRegMacAddrA); 1627 writel(temp2, base + NvRegMacAddrB); 1628 writel(temp3, base + NvRegTransmitPoll); 1629 1630 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1631 pci_push(base); 1632} 1633 1634static void nv_get_hw_stats(struct net_device *dev) 1635{ 1636 struct fe_priv *np = netdev_priv(dev); 1637 u8 __iomem *base = get_hwbase(dev); 1638 1639 np->estats.tx_bytes += readl(base + NvRegTxCnt); 1640 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); 1641 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); 1642 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); 1643 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); 1644 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); 1645 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); 1646 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); 1647 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); 1648 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); 1649 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); 1650 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); 1651 np->estats.rx_runt += readl(base + NvRegRxRunt); 1652 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); 1653 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); 1654 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); 1655 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); 1656 np->estats.rx_length_error += readl(base + NvRegRxLenErr); 1657 np->estats.rx_unicast += readl(base + NvRegRxUnicast); 1658 np->estats.rx_multicast += readl(base + NvRegRxMulticast); 1659 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); 1660 np->estats.rx_packets = 1661 np->estats.rx_unicast + 1662 np->estats.rx_multicast + 1663 np->estats.rx_broadcast; 1664 np->estats.rx_errors_total = 1665 np->estats.rx_crc_errors + 1666 np->estats.rx_over_errors + 1667 np->estats.rx_frame_error + 1668 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + 1669 np->estats.rx_late_collision + 1670 np->estats.rx_runt + 1671 np->estats.rx_frame_too_long; 1672 np->estats.tx_errors_total = 1673 np->estats.tx_late_collision + 1674 np->estats.tx_fifo_errors + 1675 np->estats.tx_carrier_errors + 1676 np->estats.tx_excess_deferral + 1677 np->estats.tx_retry_error; 1678 1679 if (np->driver_data & DEV_HAS_STATISTICS_V2) { 1680 np->estats.tx_deferral += readl(base + NvRegTxDef); 1681 np->estats.tx_packets += readl(base + NvRegTxFrame); 1682 np->estats.rx_bytes += readl(base + NvRegRxCnt); 1683 np->estats.tx_pause += readl(base + NvRegTxPause); 1684 np->estats.rx_pause += readl(base + NvRegRxPause); 1685 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1686 } 1687 1688 if (np->driver_data & DEV_HAS_STATISTICS_V3) { 1689 np->estats.tx_unicast += readl(base + NvRegTxUnicast); 1690 np->estats.tx_multicast += readl(base + NvRegTxMulticast); 1691 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast); 1692 } 1693} 1694 1695/* 1696 * nv_get_stats: dev->get_stats function 1697 * Get latest stats value from the nic. 1698 * Called with read_lock(&dev_base_lock) held for read - 1699 * only synchronized against unregister_netdevice. 1700 */ 1701static struct net_device_stats *nv_get_stats(struct net_device *dev) 1702{ 1703 struct fe_priv *np = netdev_priv(dev); 1704 1705 /* If the nic supports hw counters then retrieve latest values */ 1706 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) { 1707 nv_get_hw_stats(dev); 1708 1709 /* copy to net_device stats */ 1710 dev->stats.tx_bytes = np->estats.tx_bytes; 1711 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1712 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1713 dev->stats.rx_crc_errors = np->estats.rx_crc_errors; 1714 dev->stats.rx_over_errors = np->estats.rx_over_errors; 1715 dev->stats.rx_errors = np->estats.rx_errors_total; 1716 dev->stats.tx_errors = np->estats.tx_errors_total; 1717 } 1718 1719 return &dev->stats; 1720} 1721 1722/* 1723 * nv_alloc_rx: fill rx ring entries. 1724 * Return 1 if the allocations for the skbs failed and the 1725 * rx engine is without Available descriptors 1726 */ 1727static int nv_alloc_rx(struct net_device *dev) 1728{ 1729 struct fe_priv *np = netdev_priv(dev); 1730 struct ring_desc* less_rx; 1731 1732 less_rx = np->get_rx.orig; 1733 if (less_rx-- == np->first_rx.orig) 1734 less_rx = np->last_rx.orig; 1735 1736 while (np->put_rx.orig != less_rx) { 1737 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1738 if (skb) { 1739 np->put_rx_ctx->skb = skb; 1740 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1741 skb->data, 1742 skb_tailroom(skb), 1743 PCI_DMA_FROMDEVICE); 1744 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1745 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); 1746 wmb(); 1747 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1748 if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) 1749 np->put_rx.orig = np->first_rx.orig; 1750 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1751 np->put_rx_ctx = np->first_rx_ctx; 1752 } else { 1753 return 1; 1754 } 1755 } 1756 return 0; 1757} 1758 1759static int nv_alloc_rx_optimized(struct net_device *dev) 1760{ 1761 struct fe_priv *np = netdev_priv(dev); 1762 struct ring_desc_ex* less_rx; 1763 1764 less_rx = np->get_rx.ex; 1765 if (less_rx-- == np->first_rx.ex) 1766 less_rx = np->last_rx.ex; 1767 1768 while (np->put_rx.ex != less_rx) { 1769 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1770 if (skb) { 1771 np->put_rx_ctx->skb = skb; 1772 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1773 skb->data, 1774 skb_tailroom(skb), 1775 PCI_DMA_FROMDEVICE); 1776 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1777 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); 1778 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); 1779 wmb(); 1780 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1781 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) 1782 np->put_rx.ex = np->first_rx.ex; 1783 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1784 np->put_rx_ctx = np->first_rx_ctx; 1785 } else { 1786 return 1; 1787 } 1788 } 1789 return 0; 1790} 1791 1792/* If rx bufs are exhausted called after 50ms to attempt to refresh */ 1793#ifdef CONFIG_FORCEDETH_NAPI 1794static void nv_do_rx_refill(unsigned long data) 1795{ 1796 struct net_device *dev = (struct net_device *) data; 1797 struct fe_priv *np = netdev_priv(dev); 1798 1799 /* Just reschedule NAPI rx processing */ 1800 napi_schedule(&np->napi); 1801} 1802#else 1803static void nv_do_rx_refill(unsigned long data) 1804{ 1805 struct net_device *dev = (struct net_device *) data; 1806 struct fe_priv *np = netdev_priv(dev); 1807 int retcode; 1808 1809 if (!using_multi_irqs(dev)) { 1810 if (np->msi_flags & NV_MSI_X_ENABLED) 1811 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1812 else 1813 disable_irq(np->pci_dev->irq); 1814 } else { 1815 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1816 } 1817 if (!nv_optimized(np)) 1818 retcode = nv_alloc_rx(dev); 1819 else 1820 retcode = nv_alloc_rx_optimized(dev); 1821 if (retcode) { 1822 spin_lock_irq(&np->lock); 1823 if (!np->in_shutdown) 1824 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1825 spin_unlock_irq(&np->lock); 1826 } 1827 if (!using_multi_irqs(dev)) { 1828 if (np->msi_flags & NV_MSI_X_ENABLED) 1829 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1830 else 1831 enable_irq(np->pci_dev->irq); 1832 } else { 1833 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1834 } 1835} 1836#endif 1837 1838static void nv_init_rx(struct net_device *dev) 1839{ 1840 struct fe_priv *np = netdev_priv(dev); 1841 int i; 1842 1843 np->get_rx = np->put_rx = np->first_rx = np->rx_ring; 1844 1845 if (!nv_optimized(np)) 1846 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; 1847 else 1848 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; 1849 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; 1850 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; 1851 1852 for (i = 0; i < np->rx_ring_size; i++) { 1853 if (!nv_optimized(np)) { 1854 np->rx_ring.orig[i].flaglen = 0; 1855 np->rx_ring.orig[i].buf = 0; 1856 } else { 1857 np->rx_ring.ex[i].flaglen = 0; 1858 np->rx_ring.ex[i].txvlan = 0; 1859 np->rx_ring.ex[i].bufhigh = 0; 1860 np->rx_ring.ex[i].buflow = 0; 1861 } 1862 np->rx_skb[i].skb = NULL; 1863 np->rx_skb[i].dma = 0; 1864 } 1865} 1866 1867static void nv_init_tx(struct net_device *dev) 1868{ 1869 struct fe_priv *np = netdev_priv(dev); 1870 int i; 1871 1872 np->get_tx = np->put_tx = np->first_tx = np->tx_ring; 1873 1874 if (!nv_optimized(np)) 1875 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; 1876 else 1877 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; 1878 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; 1879 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; 1880 np->tx_pkts_in_progress = 0; 1881 np->tx_change_owner = NULL; 1882 np->tx_end_flip = NULL; 1883 np->tx_stop = 0; 1884 1885 for (i = 0; i < np->tx_ring_size; i++) { 1886 if (!nv_optimized(np)) { 1887 np->tx_ring.orig[i].flaglen = 0; 1888 np->tx_ring.orig[i].buf = 0; 1889 } else { 1890 np->tx_ring.ex[i].flaglen = 0; 1891 np->tx_ring.ex[i].txvlan = 0; 1892 np->tx_ring.ex[i].bufhigh = 0; 1893 np->tx_ring.ex[i].buflow = 0; 1894 } 1895 np->tx_skb[i].skb = NULL; 1896 np->tx_skb[i].dma = 0; 1897 np->tx_skb[i].dma_len = 0; 1898 np->tx_skb[i].first_tx_desc = NULL; 1899 np->tx_skb[i].next_tx_ctx = NULL; 1900 } 1901} 1902 1903static int nv_init_ring(struct net_device *dev) 1904{ 1905 struct fe_priv *np = netdev_priv(dev); 1906 1907 nv_init_tx(dev); 1908 nv_init_rx(dev); 1909 1910 if (!nv_optimized(np)) 1911 return nv_alloc_rx(dev); 1912 else 1913 return nv_alloc_rx_optimized(dev); 1914} 1915 1916static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb) 1917{ 1918 struct fe_priv *np = netdev_priv(dev); 1919 1920 if (tx_skb->dma) { 1921 pci_unmap_page(np->pci_dev, tx_skb->dma, 1922 tx_skb->dma_len, 1923 PCI_DMA_TODEVICE); 1924 tx_skb->dma = 0; 1925 } 1926 if (tx_skb->skb) { 1927 dev_kfree_skb_any(tx_skb->skb); 1928 tx_skb->skb = NULL; 1929 return 1; 1930 } else { 1931 return 0; 1932 } 1933} 1934 1935static void nv_drain_tx(struct net_device *dev) 1936{ 1937 struct fe_priv *np = netdev_priv(dev); 1938 unsigned int i; 1939 1940 for (i = 0; i < np->tx_ring_size; i++) { 1941 if (!nv_optimized(np)) { 1942 np->tx_ring.orig[i].flaglen = 0; 1943 np->tx_ring.orig[i].buf = 0; 1944 } else { 1945 np->tx_ring.ex[i].flaglen = 0; 1946 np->tx_ring.ex[i].txvlan = 0; 1947 np->tx_ring.ex[i].bufhigh = 0; 1948 np->tx_ring.ex[i].buflow = 0; 1949 } 1950 if (nv_release_txskb(dev, &np->tx_skb[i])) 1951 dev->stats.tx_dropped++; 1952 np->tx_skb[i].dma = 0; 1953 np->tx_skb[i].dma_len = 0; 1954 np->tx_skb[i].first_tx_desc = NULL; 1955 np->tx_skb[i].next_tx_ctx = NULL; 1956 } 1957 np->tx_pkts_in_progress = 0; 1958 np->tx_change_owner = NULL; 1959 np->tx_end_flip = NULL; 1960} 1961 1962static void nv_drain_rx(struct net_device *dev) 1963{ 1964 struct fe_priv *np = netdev_priv(dev); 1965 int i; 1966 1967 for (i = 0; i < np->rx_ring_size; i++) { 1968 if (!nv_optimized(np)) { 1969 np->rx_ring.orig[i].flaglen = 0; 1970 np->rx_ring.orig[i].buf = 0; 1971 } else { 1972 np->rx_ring.ex[i].flaglen = 0; 1973 np->rx_ring.ex[i].txvlan = 0; 1974 np->rx_ring.ex[i].bufhigh = 0; 1975 np->rx_ring.ex[i].buflow = 0; 1976 } 1977 wmb(); 1978 if (np->rx_skb[i].skb) { 1979 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, 1980 (skb_end_pointer(np->rx_skb[i].skb) - 1981 np->rx_skb[i].skb->data), 1982 PCI_DMA_FROMDEVICE); 1983 dev_kfree_skb(np->rx_skb[i].skb); 1984 np->rx_skb[i].skb = NULL; 1985 } 1986 } 1987} 1988 1989static void nv_drain_rxtx(struct net_device *dev) 1990{ 1991 nv_drain_tx(dev); 1992 nv_drain_rx(dev); 1993} 1994 1995static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) 1996{ 1997 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); 1998} 1999 2000static void nv_legacybackoff_reseed(struct net_device *dev) 2001{ 2002 u8 __iomem *base = get_hwbase(dev); 2003 u32 reg; 2004 u32 low; 2005 int tx_status = 0; 2006 2007 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK; 2008 get_random_bytes(&low, sizeof(low)); 2009 reg |= low & NVREG_SLOTTIME_MASK; 2010 2011 /* Need to stop tx before change takes effect. 2012 * Caller has already gained np->lock. 2013 */ 2014 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START; 2015 if (tx_status) 2016 nv_stop_tx(dev); 2017 nv_stop_rx(dev); 2018 writel(reg, base + NvRegSlotTime); 2019 if (tx_status) 2020 nv_start_tx(dev); 2021 nv_start_rx(dev); 2022} 2023 2024/* Gear Backoff Seeds */ 2025#define BACKOFF_SEEDSET_ROWS 8 2026#define BACKOFF_SEEDSET_LFSRS 15 2027 2028/* Known Good seed sets */ 2029static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2030 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2031 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, 2032 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2033 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, 2034 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, 2035 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, 2036 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, 2037 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}}; 2038 2039static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2040 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2041 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2042 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, 2043 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2044 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2045 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2046 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2047 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}}; 2048 2049static void nv_gear_backoff_reseed(struct net_device *dev) 2050{ 2051 u8 __iomem *base = get_hwbase(dev); 2052 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed; 2053 u32 temp, seedset, combinedSeed; 2054 int i; 2055 2056 /* Setup seed for free running LFSR */ 2057 /* We are going to read the time stamp counter 3 times 2058 and swizzle bits around to increase randomness */ 2059 get_random_bytes(&miniseed1, sizeof(miniseed1)); 2060 miniseed1 &= 0x0fff; 2061 if (miniseed1 == 0) 2062 miniseed1 = 0xabc; 2063 2064 get_random_bytes(&miniseed2, sizeof(miniseed2)); 2065 miniseed2 &= 0x0fff; 2066 if (miniseed2 == 0) 2067 miniseed2 = 0xabc; 2068 miniseed2_reversed = 2069 ((miniseed2 & 0xF00) >> 8) | 2070 (miniseed2 & 0x0F0) | 2071 ((miniseed2 & 0x00F) << 8); 2072 2073 get_random_bytes(&miniseed3, sizeof(miniseed3)); 2074 miniseed3 &= 0x0fff; 2075 if (miniseed3 == 0) 2076 miniseed3 = 0xabc; 2077 miniseed3_reversed = 2078 ((miniseed3 & 0xF00) >> 8) | 2079 (miniseed3 & 0x0F0) | 2080 ((miniseed3 & 0x00F) << 8); 2081 2082 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) | 2083 (miniseed2 ^ miniseed3_reversed); 2084 2085 /* Seeds can not be zero */ 2086 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0) 2087 combinedSeed |= 0x08; 2088 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0) 2089 combinedSeed |= 0x8000; 2090 2091 /* No need to disable tx here */ 2092 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); 2093 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; 2094 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; 2095 writel(temp,base + NvRegBackOffControl); 2096 2097 /* Setup seeds for all gear LFSRs. */ 2098 get_random_bytes(&seedset, sizeof(seedset)); 2099 seedset = seedset % BACKOFF_SEEDSET_ROWS; 2100 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) 2101 { 2102 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); 2103 temp |= main_seedset[seedset][i-1] & 0x3ff; 2104 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); 2105 writel(temp, base + NvRegBackOffControl); 2106 } 2107} 2108 2109/* 2110 * nv_start_xmit: dev->hard_start_xmit function 2111 * Called with netif_tx_lock held. 2112 */ 2113static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 2114{ 2115 struct fe_priv *np = netdev_priv(dev); 2116 u32 tx_flags = 0; 2117 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 2118 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2119 unsigned int i; 2120 u32 offset = 0; 2121 u32 bcnt; 2122 u32 size = skb->len-skb->data_len; 2123 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2124 u32 empty_slots; 2125 struct ring_desc* put_tx; 2126 struct ring_desc* start_tx; 2127 struct ring_desc* prev_tx; 2128 struct nv_skb_map* prev_tx_ctx; 2129 unsigned long flags; 2130 2131 /* add fragments to entries count */ 2132 for (i = 0; i < fragments; i++) { 2133 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 2134 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2135 } 2136 2137 spin_lock_irqsave(&np->lock, flags); 2138 empty_slots = nv_get_empty_tx_slots(np); 2139 if (unlikely(empty_slots <= entries)) { 2140 netif_stop_queue(dev); 2141 np->tx_stop = 1; 2142 spin_unlock_irqrestore(&np->lock, flags); 2143 return NETDEV_TX_BUSY; 2144 } 2145 spin_unlock_irqrestore(&np->lock, flags); 2146 2147 start_tx = put_tx = np->put_tx.orig; 2148 2149 /* setup the header buffer */ 2150 do { 2151 prev_tx = put_tx; 2152 prev_tx_ctx = np->put_tx_ctx; 2153 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2154 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2155 PCI_DMA_TODEVICE); 2156 np->put_tx_ctx->dma_len = bcnt; 2157 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2158 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2159 2160 tx_flags = np->tx_flags; 2161 offset += bcnt; 2162 size -= bcnt; 2163 if (unlikely(put_tx++ == np->last_tx.orig)) 2164 put_tx = np->first_tx.orig; 2165 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2166 np->put_tx_ctx = np->first_tx_ctx; 2167 } while (size); 2168 2169 /* setup the fragments */ 2170 for (i = 0; i < fragments; i++) { 2171 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2172 u32 size = frag->size; 2173 offset = 0; 2174 2175 do { 2176 prev_tx = put_tx; 2177 prev_tx_ctx = np->put_tx_ctx; 2178 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2179 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 2180 PCI_DMA_TODEVICE); 2181 np->put_tx_ctx->dma_len = bcnt; 2182 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2183 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2184 2185 offset += bcnt; 2186 size -= bcnt; 2187 if (unlikely(put_tx++ == np->last_tx.orig)) 2188 put_tx = np->first_tx.orig; 2189 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2190 np->put_tx_ctx = np->first_tx_ctx; 2191 } while (size); 2192 } 2193 2194 /* set last fragment flag */ 2195 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); 2196 2197 /* save skb in this slot's context area */ 2198 prev_tx_ctx->skb = skb; 2199 2200 if (skb_is_gso(skb)) 2201 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2202 else 2203 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2204 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2205 2206 spin_lock_irqsave(&np->lock, flags); 2207 2208 /* set tx flags */ 2209 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2210 np->put_tx.orig = put_tx; 2211 2212 spin_unlock_irqrestore(&np->lock, flags); 2213 2214 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", 2215 dev->name, entries, tx_flags_extra); 2216 { 2217 int j; 2218 for (j=0; j<64; j++) { 2219 if ((j%16) == 0) 2220 dprintk("\n%03x:", j); 2221 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2222 } 2223 dprintk("\n"); 2224 } 2225 2226 dev->trans_start = jiffies; 2227 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2228 return NETDEV_TX_OK; 2229} 2230 2231static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) 2232{ 2233 struct fe_priv *np = netdev_priv(dev); 2234 u32 tx_flags = 0; 2235 u32 tx_flags_extra; 2236 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2237 unsigned int i; 2238 u32 offset = 0; 2239 u32 bcnt; 2240 u32 size = skb->len-skb->data_len; 2241 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2242 u32 empty_slots; 2243 struct ring_desc_ex* put_tx; 2244 struct ring_desc_ex* start_tx; 2245 struct ring_desc_ex* prev_tx; 2246 struct nv_skb_map* prev_tx_ctx; 2247 struct nv_skb_map* start_tx_ctx; 2248 unsigned long flags; 2249 2250 /* add fragments to entries count */ 2251 for (i = 0; i < fragments; i++) { 2252 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 2253 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2254 } 2255 2256 spin_lock_irqsave(&np->lock, flags); 2257 empty_slots = nv_get_empty_tx_slots(np); 2258 if (unlikely(empty_slots <= entries)) { 2259 netif_stop_queue(dev); 2260 np->tx_stop = 1; 2261 spin_unlock_irqrestore(&np->lock, flags); 2262 return NETDEV_TX_BUSY; 2263 } 2264 spin_unlock_irqrestore(&np->lock, flags); 2265 2266 start_tx = put_tx = np->put_tx.ex; 2267 start_tx_ctx = np->put_tx_ctx; 2268 2269 /* setup the header buffer */ 2270 do { 2271 prev_tx = put_tx; 2272 prev_tx_ctx = np->put_tx_ctx; 2273 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2274 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2275 PCI_DMA_TODEVICE); 2276 np->put_tx_ctx->dma_len = bcnt; 2277 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2278 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2279 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2280 2281 tx_flags = NV_TX2_VALID; 2282 offset += bcnt; 2283 size -= bcnt; 2284 if (unlikely(put_tx++ == np->last_tx.ex)) 2285 put_tx = np->first_tx.ex; 2286 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2287 np->put_tx_ctx = np->first_tx_ctx; 2288 } while (size); 2289 2290 /* setup the fragments */ 2291 for (i = 0; i < fragments; i++) { 2292 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2293 u32 size = frag->size; 2294 offset = 0; 2295 2296 do { 2297 prev_tx = put_tx; 2298 prev_tx_ctx = np->put_tx_ctx; 2299 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2300 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 2301 PCI_DMA_TODEVICE); 2302 np->put_tx_ctx->dma_len = bcnt; 2303 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2304 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2305 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2306 2307 offset += bcnt; 2308 size -= bcnt; 2309 if (unlikely(put_tx++ == np->last_tx.ex)) 2310 put_tx = np->first_tx.ex; 2311 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2312 np->put_tx_ctx = np->first_tx_ctx; 2313 } while (size); 2314 } 2315 2316 /* set last fragment flag */ 2317 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); 2318 2319 /* save skb in this slot's context area */ 2320 prev_tx_ctx->skb = skb; 2321 2322 if (skb_is_gso(skb)) 2323 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2324 else 2325 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2326 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2327 2328 /* vlan tag */ 2329 if (likely(!np->vlangrp)) { 2330 start_tx->txvlan = 0; 2331 } else { 2332 if (vlan_tx_tag_present(skb)) 2333 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb)); 2334 else 2335 start_tx->txvlan = 0; 2336 } 2337 2338 spin_lock_irqsave(&np->lock, flags); 2339 2340 if (np->tx_limit) { 2341 /* Limit the number of outstanding tx. Setup all fragments, but 2342 * do not set the VALID bit on the first descriptor. Save a pointer 2343 * to that descriptor and also for next skb_map element. 2344 */ 2345 2346 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) { 2347 if (!np->tx_change_owner) 2348 np->tx_change_owner = start_tx_ctx; 2349 2350 /* remove VALID bit */ 2351 tx_flags &= ~NV_TX2_VALID; 2352 start_tx_ctx->first_tx_desc = start_tx; 2353 start_tx_ctx->next_tx_ctx = np->put_tx_ctx; 2354 np->tx_end_flip = np->put_tx_ctx; 2355 } else { 2356 np->tx_pkts_in_progress++; 2357 } 2358 } 2359 2360 /* set tx flags */ 2361 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2362 np->put_tx.ex = put_tx; 2363 2364 spin_unlock_irqrestore(&np->lock, flags); 2365 2366 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", 2367 dev->name, entries, tx_flags_extra); 2368 { 2369 int j; 2370 for (j=0; j<64; j++) { 2371 if ((j%16) == 0) 2372 dprintk("\n%03x:", j); 2373 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2374 } 2375 dprintk("\n"); 2376 } 2377 2378 dev->trans_start = jiffies; 2379 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2380 return NETDEV_TX_OK; 2381} 2382 2383static inline void nv_tx_flip_ownership(struct net_device *dev) 2384{ 2385 struct fe_priv *np = netdev_priv(dev); 2386 2387 np->tx_pkts_in_progress--; 2388 if (np->tx_change_owner) { 2389 np->tx_change_owner->first_tx_desc->flaglen |= 2390 cpu_to_le32(NV_TX2_VALID); 2391 np->tx_pkts_in_progress++; 2392 2393 np->tx_change_owner = np->tx_change_owner->next_tx_ctx; 2394 if (np->tx_change_owner == np->tx_end_flip) 2395 np->tx_change_owner = NULL; 2396 2397 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2398 } 2399} 2400 2401/* 2402 * nv_tx_done: check for completed packets, release the skbs. 2403 * 2404 * Caller must own np->lock. 2405 */ 2406static int nv_tx_done(struct net_device *dev, int limit) 2407{ 2408 struct fe_priv *np = netdev_priv(dev); 2409 u32 flags; 2410 int tx_work = 0; 2411 struct ring_desc* orig_get_tx = np->get_tx.orig; 2412 2413 while ((np->get_tx.orig != np->put_tx.orig) && 2414 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && 2415 (tx_work < limit)) { 2416 2417 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", 2418 dev->name, flags); 2419 2420 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, 2421 np->get_tx_ctx->dma_len, 2422 PCI_DMA_TODEVICE); 2423 np->get_tx_ctx->dma = 0; 2424 2425 if (np->desc_ver == DESC_VER_1) { 2426 if (flags & NV_TX_LASTPACKET) { 2427 if (flags & NV_TX_ERROR) { 2428 if (flags & NV_TX_UNDERFLOW) 2429 dev->stats.tx_fifo_errors++; 2430 if (flags & NV_TX_CARRIERLOST) 2431 dev->stats.tx_carrier_errors++; 2432 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) 2433 nv_legacybackoff_reseed(dev); 2434 dev->stats.tx_errors++; 2435 } else { 2436 dev->stats.tx_packets++; 2437 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2438 } 2439 dev_kfree_skb_any(np->get_tx_ctx->skb); 2440 np->get_tx_ctx->skb = NULL; 2441 tx_work++; 2442 } 2443 } else { 2444 if (flags & NV_TX2_LASTPACKET) { 2445 if (flags & NV_TX2_ERROR) { 2446 if (flags & NV_TX2_UNDERFLOW) 2447 dev->stats.tx_fifo_errors++; 2448 if (flags & NV_TX2_CARRIERLOST) 2449 dev->stats.tx_carrier_errors++; 2450 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2451 nv_legacybackoff_reseed(dev); 2452 dev->stats.tx_errors++; 2453 } else { 2454 dev->stats.tx_packets++; 2455 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2456 } 2457 dev_kfree_skb_any(np->get_tx_ctx->skb); 2458 np->get_tx_ctx->skb = NULL; 2459 tx_work++; 2460 } 2461 } 2462 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) 2463 np->get_tx.orig = np->first_tx.orig; 2464 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2465 np->get_tx_ctx = np->first_tx_ctx; 2466 } 2467 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { 2468 np->tx_stop = 0; 2469 netif_wake_queue(dev); 2470 } 2471 return tx_work; 2472} 2473 2474static int nv_tx_done_optimized(struct net_device *dev, int limit) 2475{ 2476 struct fe_priv *np = netdev_priv(dev); 2477 u32 flags; 2478 int tx_work = 0; 2479 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 2480 2481 while ((np->get_tx.ex != np->put_tx.ex) && 2482 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) && 2483 (tx_work < limit)) { 2484 2485 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", 2486 dev->name, flags); 2487 2488 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, 2489 np->get_tx_ctx->dma_len, 2490 PCI_DMA_TODEVICE); 2491 np->get_tx_ctx->dma = 0; 2492 2493 if (flags & NV_TX2_LASTPACKET) { 2494 if (!(flags & NV_TX2_ERROR)) 2495 dev->stats.tx_packets++; 2496 else { 2497 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { 2498 if (np->driver_data & DEV_HAS_GEAR_MODE) 2499 nv_gear_backoff_reseed(dev); 2500 else 2501 nv_legacybackoff_reseed(dev); 2502 } 2503 } 2504 2505 dev_kfree_skb_any(np->get_tx_ctx->skb); 2506 np->get_tx_ctx->skb = NULL; 2507 tx_work++; 2508 2509 if (np->tx_limit) { 2510 nv_tx_flip_ownership(dev); 2511 } 2512 } 2513 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 2514 np->get_tx.ex = np->first_tx.ex; 2515 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2516 np->get_tx_ctx = np->first_tx_ctx; 2517 } 2518 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { 2519 np->tx_stop = 0; 2520 netif_wake_queue(dev); 2521 } 2522 return tx_work; 2523} 2524 2525/* 2526 * nv_tx_timeout: dev->tx_timeout function 2527 * Called with netif_tx_lock held. 2528 */ 2529static void nv_tx_timeout(struct net_device *dev) 2530{ 2531 struct fe_priv *np = netdev_priv(dev); 2532 u8 __iomem *base = get_hwbase(dev); 2533 u32 status; 2534 union ring_type put_tx; 2535 int saved_tx_limit; 2536 2537 if (np->msi_flags & NV_MSI_X_ENABLED) 2538 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2539 else 2540 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2541 2542 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); 2543 2544 { 2545 int i; 2546 2547 printk(KERN_INFO "%s: Ring at %lx\n", 2548 dev->name, (unsigned long)np->ring_addr); 2549 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 2550 for (i=0;i<=np->register_size;i+= 32) { 2551 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 2552 i, 2553 readl(base + i + 0), readl(base + i + 4), 2554 readl(base + i + 8), readl(base + i + 12), 2555 readl(base + i + 16), readl(base + i + 20), 2556 readl(base + i + 24), readl(base + i + 28)); 2557 } 2558 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2559 for (i=0;i<np->tx_ring_size;i+= 4) { 2560 if (!nv_optimized(np)) { 2561 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2562 i, 2563 le32_to_cpu(np->tx_ring.orig[i].buf), 2564 le32_to_cpu(np->tx_ring.orig[i].flaglen), 2565 le32_to_cpu(np->tx_ring.orig[i+1].buf), 2566 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), 2567 le32_to_cpu(np->tx_ring.orig[i+2].buf), 2568 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), 2569 le32_to_cpu(np->tx_ring.orig[i+3].buf), 2570 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); 2571 } else { 2572 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 2573 i, 2574 le32_to_cpu(np->tx_ring.ex[i].bufhigh), 2575 le32_to_cpu(np->tx_ring.ex[i].buflow), 2576 le32_to_cpu(np->tx_ring.ex[i].flaglen), 2577 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), 2578 le32_to_cpu(np->tx_ring.ex[i+1].buflow), 2579 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), 2580 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), 2581 le32_to_cpu(np->tx_ring.ex[i+2].buflow), 2582 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), 2583 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), 2584 le32_to_cpu(np->tx_ring.ex[i+3].buflow), 2585 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); 2586 } 2587 } 2588 } 2589 2590 spin_lock_irq(&np->lock); 2591 2592 /* 1) stop tx engine */ 2593 nv_stop_tx(dev); 2594 2595 /* 2) complete any outstanding tx and do not give HW any limited tx pkts */ 2596 saved_tx_limit = np->tx_limit; 2597 np->tx_limit = 0; /* prevent giving HW any limited pkts */ 2598 np->tx_stop = 0; /* prevent waking tx queue */ 2599 if (!nv_optimized(np)) 2600 nv_tx_done(dev, np->tx_ring_size); 2601 else 2602 nv_tx_done_optimized(dev, np->tx_ring_size); 2603 2604 /* save current HW postion */ 2605 if (np->tx_change_owner) 2606 put_tx.ex = np->tx_change_owner->first_tx_desc; 2607 else 2608 put_tx = np->put_tx; 2609 2610 /* 3) clear all tx state */ 2611 nv_drain_tx(dev); 2612 nv_init_tx(dev); 2613 2614 /* 4) restore state to current HW position */ 2615 np->get_tx = np->put_tx = put_tx; 2616 np->tx_limit = saved_tx_limit; 2617 2618 /* 5) restart tx engine */ 2619 nv_start_tx(dev); 2620 netif_wake_queue(dev); 2621 spin_unlock_irq(&np->lock); 2622} 2623 2624/* 2625 * Called when the nic notices a mismatch between the actual data len on the 2626 * wire and the len indicated in the 802 header 2627 */ 2628static int nv_getlen(struct net_device *dev, void *packet, int datalen) 2629{ 2630 int hdrlen; /* length of the 802 header */ 2631 int protolen; /* length as stored in the proto field */ 2632 2633 /* 1) calculate len according to header */ 2634 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2635 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 2636 hdrlen = VLAN_HLEN; 2637 } else { 2638 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 2639 hdrlen = ETH_HLEN; 2640 } 2641 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", 2642 dev->name, datalen, protolen, hdrlen); 2643 if (protolen > ETH_DATA_LEN) 2644 return datalen; /* Value in proto field not a len, no checks possible */ 2645 2646 protolen += hdrlen; 2647 /* consistency checks: */ 2648 if (datalen > ETH_ZLEN) { 2649 if (datalen >= protolen) { 2650 /* more data on wire than in 802 header, trim of 2651 * additional data. 2652 */ 2653 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2654 dev->name, protolen); 2655 return protolen; 2656 } else { 2657 /* less data on wire than mentioned in header. 2658 * Discard the packet. 2659 */ 2660 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", 2661 dev->name); 2662 return -1; 2663 } 2664 } else { 2665 /* short packet. Accept only if 802 values are also short */ 2666 if (protolen > ETH_ZLEN) { 2667 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", 2668 dev->name); 2669 return -1; 2670 } 2671 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2672 dev->name, datalen); 2673 return datalen; 2674 } 2675} 2676 2677static int nv_rx_process(struct net_device *dev, int limit) 2678{ 2679 struct fe_priv *np = netdev_priv(dev); 2680 u32 flags; 2681 int rx_work = 0; 2682 struct sk_buff *skb; 2683 int len; 2684 2685 while((np->get_rx.orig != np->put_rx.orig) && 2686 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2687 (rx_work < limit)) { 2688 2689 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", 2690 dev->name, flags); 2691 2692 /* 2693 * the packet is for us - immediately tear down the pci mapping. 2694 * TODO: check if a prefetch of the first cacheline improves 2695 * the performance. 2696 */ 2697 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2698 np->get_rx_ctx->dma_len, 2699 PCI_DMA_FROMDEVICE); 2700 skb = np->get_rx_ctx->skb; 2701 np->get_rx_ctx->skb = NULL; 2702 2703 { 2704 int j; 2705 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2706 for (j=0; j<64; j++) { 2707 if ((j%16) == 0) 2708 dprintk("\n%03x:", j); 2709 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2710 } 2711 dprintk("\n"); 2712 } 2713 /* look at what we actually got: */ 2714 if (np->desc_ver == DESC_VER_1) { 2715 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2716 len = flags & LEN_MASK_V1; 2717 if (unlikely(flags & NV_RX_ERROR)) { 2718 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { 2719 len = nv_getlen(dev, skb->data, len); 2720 if (len < 0) { 2721 dev->stats.rx_errors++; 2722 dev_kfree_skb(skb); 2723 goto next_pkt; 2724 } 2725 } 2726 /* framing errors are soft errors */ 2727 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { 2728 if (flags & NV_RX_SUBSTRACT1) { 2729 len--; 2730 } 2731 } 2732 /* the rest are hard errors */ 2733 else { 2734 if (flags & NV_RX_MISSEDFRAME) 2735 dev->stats.rx_missed_errors++; 2736 if (flags & NV_RX_CRCERR) 2737 dev->stats.rx_crc_errors++; 2738 if (flags & NV_RX_OVERFLOW) 2739 dev->stats.rx_over_errors++; 2740 dev->stats.rx_errors++; 2741 dev_kfree_skb(skb); 2742 goto next_pkt; 2743 } 2744 } 2745 } else { 2746 dev_kfree_skb(skb); 2747 goto next_pkt; 2748 } 2749 } else { 2750 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2751 len = flags & LEN_MASK_V2; 2752 if (unlikely(flags & NV_RX2_ERROR)) { 2753 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2754 len = nv_getlen(dev, skb->data, len); 2755 if (len < 0) { 2756 dev->stats.rx_errors++; 2757 dev_kfree_skb(skb); 2758 goto next_pkt; 2759 } 2760 } 2761 /* framing errors are soft errors */ 2762 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2763 if (flags & NV_RX2_SUBSTRACT1) { 2764 len--; 2765 } 2766 } 2767 /* the rest are hard errors */ 2768 else { 2769 if (flags & NV_RX2_CRCERR) 2770 dev->stats.rx_crc_errors++; 2771 if (flags & NV_RX2_OVERFLOW) 2772 dev->stats.rx_over_errors++; 2773 dev->stats.rx_errors++; 2774 dev_kfree_skb(skb); 2775 goto next_pkt; 2776 } 2777 } 2778 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2779 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2780 skb->ip_summed = CHECKSUM_UNNECESSARY; 2781 } else { 2782 dev_kfree_skb(skb); 2783 goto next_pkt; 2784 } 2785 } 2786 /* got a valid packet - forward it to the network core */ 2787 skb_put(skb, len); 2788 skb->protocol = eth_type_trans(skb, dev); 2789 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", 2790 dev->name, len, skb->protocol); 2791#ifdef CONFIG_FORCEDETH_NAPI 2792 netif_receive_skb(skb); 2793#else 2794 netif_rx(skb); 2795#endif 2796 dev->stats.rx_packets++; 2797 dev->stats.rx_bytes += len; 2798next_pkt: 2799 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2800 np->get_rx.orig = np->first_rx.orig; 2801 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2802 np->get_rx_ctx = np->first_rx_ctx; 2803 2804 rx_work++; 2805 } 2806 2807 return rx_work; 2808} 2809 2810static int nv_rx_process_optimized(struct net_device *dev, int limit) 2811{ 2812 struct fe_priv *np = netdev_priv(dev); 2813 u32 flags; 2814 u32 vlanflags = 0; 2815 int rx_work = 0; 2816 struct sk_buff *skb; 2817 int len; 2818 2819 while((np->get_rx.ex != np->put_rx.ex) && 2820 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2821 (rx_work < limit)) { 2822 2823 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", 2824 dev->name, flags); 2825 2826 /* 2827 * the packet is for us - immediately tear down the pci mapping. 2828 * TODO: check if a prefetch of the first cacheline improves 2829 * the performance. 2830 */ 2831 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2832 np->get_rx_ctx->dma_len, 2833 PCI_DMA_FROMDEVICE); 2834 skb = np->get_rx_ctx->skb; 2835 np->get_rx_ctx->skb = NULL; 2836 2837 { 2838 int j; 2839 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2840 for (j=0; j<64; j++) { 2841 if ((j%16) == 0) 2842 dprintk("\n%03x:", j); 2843 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2844 } 2845 dprintk("\n"); 2846 } 2847 /* look at what we actually got: */ 2848 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2849 len = flags & LEN_MASK_V2; 2850 if (unlikely(flags & NV_RX2_ERROR)) { 2851 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2852 len = nv_getlen(dev, skb->data, len); 2853 if (len < 0) { 2854 dev_kfree_skb(skb); 2855 goto next_pkt; 2856 } 2857 } 2858 /* framing errors are soft errors */ 2859 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2860 if (flags & NV_RX2_SUBSTRACT1) { 2861 len--; 2862 } 2863 } 2864 /* the rest are hard errors */ 2865 else { 2866 dev_kfree_skb(skb); 2867 goto next_pkt; 2868 } 2869 } 2870 2871 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2872 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2873 skb->ip_summed = CHECKSUM_UNNECESSARY; 2874 2875 /* got a valid packet - forward it to the network core */ 2876 skb_put(skb, len); 2877 skb->protocol = eth_type_trans(skb, dev); 2878 prefetch(skb->data); 2879 2880 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n", 2881 dev->name, len, skb->protocol); 2882 2883 if (likely(!np->vlangrp)) { 2884#ifdef CONFIG_FORCEDETH_NAPI 2885 netif_receive_skb(skb); 2886#else 2887 netif_rx(skb); 2888#endif 2889 } else { 2890 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2891 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2892#ifdef CONFIG_FORCEDETH_NAPI 2893 vlan_hwaccel_receive_skb(skb, np->vlangrp, 2894 vlanflags & NV_RX3_VLAN_TAG_MASK); 2895#else 2896 vlan_hwaccel_rx(skb, np->vlangrp, 2897 vlanflags & NV_RX3_VLAN_TAG_MASK); 2898#endif 2899 } else { 2900#ifdef CONFIG_FORCEDETH_NAPI 2901 netif_receive_skb(skb); 2902#else 2903 netif_rx(skb); 2904#endif 2905 } 2906 } 2907 2908 dev->stats.rx_packets++; 2909 dev->stats.rx_bytes += len; 2910 } else { 2911 dev_kfree_skb(skb); 2912 } 2913next_pkt: 2914 if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) 2915 np->get_rx.ex = np->first_rx.ex; 2916 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2917 np->get_rx_ctx = np->first_rx_ctx; 2918 2919 rx_work++; 2920 } 2921 2922 return rx_work; 2923} 2924 2925static void set_bufsize(struct net_device *dev) 2926{ 2927 struct fe_priv *np = netdev_priv(dev); 2928 2929 if (dev->mtu <= ETH_DATA_LEN) 2930 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; 2931 else 2932 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; 2933} 2934 2935/* 2936 * nv_change_mtu: dev->change_mtu function 2937 * Called with dev_base_lock held for read. 2938 */ 2939static int nv_change_mtu(struct net_device *dev, int new_mtu) 2940{ 2941 struct fe_priv *np = netdev_priv(dev); 2942 int old_mtu; 2943 2944 if (new_mtu < 64 || new_mtu > np->pkt_limit) 2945 return -EINVAL; 2946 2947 old_mtu = dev->mtu; 2948 dev->mtu = new_mtu; 2949 2950 /* return early if the buffer sizes will not change */ 2951 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) 2952 return 0; 2953 if (old_mtu == new_mtu) 2954 return 0; 2955 2956 /* synchronized against open : rtnl_lock() held by caller */ 2957 if (netif_running(dev)) { 2958 u8 __iomem *base = get_hwbase(dev); 2959 /* 2960 * It seems that the nic preloads valid ring entries into an 2961 * internal buffer. The procedure for flushing everything is 2962 * guessed, there is probably a simpler approach. 2963 * Changing the MTU is a rare event, it shouldn't matter. 2964 */ 2965 nv_disable_irq(dev); 2966 nv_napi_disable(dev); 2967 netif_tx_lock_bh(dev); 2968 netif_addr_lock(dev); 2969 spin_lock(&np->lock); 2970 /* stop engines */ 2971 nv_stop_rxtx(dev); 2972 nv_txrx_reset(dev); 2973 /* drain rx queue */ 2974 nv_drain_rxtx(dev); 2975 /* reinit driver view of the rx queue */ 2976 set_bufsize(dev); 2977 if (nv_init_ring(dev)) { 2978 if (!np->in_shutdown) 2979 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2980 } 2981 /* reinit nic view of the rx queue */ 2982 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 2983 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 2984 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 2985 base + NvRegRingSizes); 2986 pci_push(base); 2987 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2988 pci_push(base); 2989 2990 /* restart rx engine */ 2991 nv_start_rxtx(dev); 2992 spin_unlock(&np->lock); 2993 netif_addr_unlock(dev); 2994 netif_tx_unlock_bh(dev); 2995 nv_napi_enable(dev); 2996 nv_enable_irq(dev); 2997 } 2998 return 0; 2999} 3000 3001static void nv_copy_mac_to_hw(struct net_device *dev) 3002{ 3003 u8 __iomem *base = get_hwbase(dev); 3004 u32 mac[2]; 3005 3006 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 3007 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 3008 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 3009 3010 writel(mac[0], base + NvRegMacAddrA); 3011 writel(mac[1], base + NvRegMacAddrB); 3012} 3013 3014/* 3015 * nv_set_mac_address: dev->set_mac_address function 3016 * Called with rtnl_lock() held. 3017 */ 3018static int nv_set_mac_address(struct net_device *dev, void *addr) 3019{ 3020 struct fe_priv *np = netdev_priv(dev); 3021 struct sockaddr *macaddr = (struct sockaddr*)addr; 3022 3023 if (!is_valid_ether_addr(macaddr->sa_data)) 3024 return -EADDRNOTAVAIL; 3025 3026 /* synchronized against open : rtnl_lock() held by caller */ 3027 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 3028 3029 if (netif_running(dev)) { 3030 netif_tx_lock_bh(dev); 3031 netif_addr_lock(dev); 3032 spin_lock_irq(&np->lock); 3033 3034 /* stop rx engine */ 3035 nv_stop_rx(dev); 3036 3037 /* set mac address */ 3038 nv_copy_mac_to_hw(dev); 3039 3040 /* restart rx engine */ 3041 nv_start_rx(dev); 3042 spin_unlock_irq(&np->lock); 3043 netif_addr_unlock(dev); 3044 netif_tx_unlock_bh(dev); 3045 } else { 3046 nv_copy_mac_to_hw(dev); 3047 } 3048 return 0; 3049} 3050 3051/* 3052 * nv_set_multicast: dev->set_multicast function 3053 * Called with netif_tx_lock held. 3054 */ 3055static void nv_set_multicast(struct net_device *dev) 3056{ 3057 struct fe_priv *np = netdev_priv(dev); 3058 u8 __iomem *base = get_hwbase(dev); 3059 u32 addr[2]; 3060 u32 mask[2]; 3061 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; 3062 3063 memset(addr, 0, sizeof(addr)); 3064 memset(mask, 0, sizeof(mask)); 3065 3066 if (dev->flags & IFF_PROMISC) { 3067 pff |= NVREG_PFF_PROMISC; 3068 } else { 3069 pff |= NVREG_PFF_MYADDR; 3070 3071 if (dev->flags & IFF_ALLMULTI || dev->mc_list) { 3072 u32 alwaysOff[2]; 3073 u32 alwaysOn[2]; 3074 3075 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; 3076 if (dev->flags & IFF_ALLMULTI) { 3077 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 3078 } else { 3079 struct dev_mc_list *walk; 3080 3081 walk = dev->mc_list; 3082 while (walk != NULL) { 3083 u32 a, b; 3084 a = le32_to_cpu(*(__le32 *) walk->dmi_addr); 3085 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4])); 3086 alwaysOn[0] &= a; 3087 alwaysOff[0] &= ~a; 3088 alwaysOn[1] &= b; 3089 alwaysOff[1] &= ~b; 3090 walk = walk->next; 3091 } 3092 } 3093 addr[0] = alwaysOn[0]; 3094 addr[1] = alwaysOn[1]; 3095 mask[0] = alwaysOn[0] | alwaysOff[0]; 3096 mask[1] = alwaysOn[1] | alwaysOff[1]; 3097 } else { 3098 mask[0] = NVREG_MCASTMASKA_NONE; 3099 mask[1] = NVREG_MCASTMASKB_NONE; 3100 } 3101 } 3102 addr[0] |= NVREG_MCASTADDRA_FORCE; 3103 pff |= NVREG_PFF_ALWAYS; 3104 spin_lock_irq(&np->lock); 3105 nv_stop_rx(dev); 3106 writel(addr[0], base + NvRegMulticastAddrA); 3107 writel(addr[1], base + NvRegMulticastAddrB); 3108 writel(mask[0], base + NvRegMulticastMaskA); 3109 writel(mask[1], base + NvRegMulticastMaskB); 3110 writel(pff, base + NvRegPacketFilterFlags); 3111 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", 3112 dev->name); 3113 nv_start_rx(dev); 3114 spin_unlock_irq(&np->lock); 3115} 3116 3117static void nv_update_pause(struct net_device *dev, u32 pause_flags) 3118{ 3119 struct fe_priv *np = netdev_priv(dev); 3120 u8 __iomem *base = get_hwbase(dev); 3121 3122 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); 3123 3124 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { 3125 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; 3126 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { 3127 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); 3128 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3129 } else { 3130 writel(pff, base + NvRegPacketFilterFlags); 3131 } 3132 } 3133 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 3134 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 3135 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 3136 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; 3137 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) 3138 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; 3139 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { 3140 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; 3141 /* limit the number of tx pause frames to a default of 8 */ 3142 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit); 3143 } 3144 writel(pause_enable, base + NvRegTxPauseFrame); 3145 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 3146 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3147 } else { 3148 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 3149 writel(regmisc, base + NvRegMisc1); 3150 } 3151 } 3152} 3153 3154/** 3155 * nv_update_linkspeed: Setup the MAC according to the link partner 3156 * @dev: Network device to be configured 3157 * 3158 * The function queries the PHY and checks if there is a link partner. 3159 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is 3160 * set to 10 MBit HD. 3161 * 3162 * The function returns 0 if there is no link partner and 1 if there is 3163 * a good link partner. 3164 */ 3165static int nv_update_linkspeed(struct net_device *dev) 3166{ 3167 struct fe_priv *np = netdev_priv(dev); 3168 u8 __iomem *base = get_hwbase(dev); 3169 int adv = 0; 3170 int lpa = 0; 3171 int adv_lpa, adv_pause, lpa_pause; 3172 int newls = np->linkspeed; 3173 int newdup = np->duplex; 3174 int mii_status; 3175 int retval = 0; 3176 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 3177 u32 txrxFlags = 0; 3178 u32 phy_exp; 3179 3180 /* BMSR_LSTATUS is latched, read it twice: 3181 * we want the current value. 3182 */ 3183 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3184 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3185 3186 if (!(mii_status & BMSR_LSTATUS)) { 3187 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", 3188 dev->name); 3189 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3190 newdup = 0; 3191 retval = 0; 3192 goto set_speed; 3193 } 3194 3195 if (np->autoneg == 0) { 3196 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", 3197 dev->name, np->fixed_mode); 3198 if (np->fixed_mode & LPA_100FULL) { 3199 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3200 newdup = 1; 3201 } else if (np->fixed_mode & LPA_100HALF) { 3202 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3203 newdup = 0; 3204 } else if (np->fixed_mode & LPA_10FULL) { 3205 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3206 newdup = 1; 3207 } else { 3208 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3209 newdup = 0; 3210 } 3211 retval = 1; 3212 goto set_speed; 3213 } 3214 /* check auto negotiation is complete */ 3215 if (!(mii_status & BMSR_ANEGCOMPLETE)) { 3216 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ 3217 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3218 newdup = 0; 3219 retval = 0; 3220 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); 3221 goto set_speed; 3222 } 3223 3224 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3225 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 3226 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", 3227 dev->name, adv, lpa); 3228 3229 retval = 1; 3230 if (np->gigabit == PHY_GIGABIT) { 3231 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3232 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); 3233 3234 if ((control_1000 & ADVERTISE_1000FULL) && 3235 (status_1000 & LPA_1000FULL)) { 3236 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", 3237 dev->name); 3238 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 3239 newdup = 1; 3240 goto set_speed; 3241 } 3242 } 3243 3244 /* FIXME: handle parallel detection properly */ 3245 adv_lpa = lpa & adv; 3246 if (adv_lpa & LPA_100FULL) { 3247 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3248 newdup = 1; 3249 } else if (adv_lpa & LPA_100HALF) { 3250 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3251 newdup = 0; 3252 } else if (adv_lpa & LPA_10FULL) { 3253 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3254 newdup = 1; 3255 } else if (adv_lpa & LPA_10HALF) { 3256 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3257 newdup = 0; 3258 } else { 3259 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); 3260 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3261 newdup = 0; 3262 } 3263 3264set_speed: 3265 if (np->duplex == newdup && np->linkspeed == newls) 3266 return retval; 3267 3268 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", 3269 dev->name, np->linkspeed, np->duplex, newls, newdup); 3270 3271 np->duplex = newdup; 3272 np->linkspeed = newls; 3273 3274 /* The transmitter and receiver must be restarted for safe update */ 3275 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) { 3276 txrxFlags |= NV_RESTART_TX; 3277 nv_stop_tx(dev); 3278 } 3279 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { 3280 txrxFlags |= NV_RESTART_RX; 3281 nv_stop_rx(dev); 3282 } 3283 3284 if (np->gigabit == PHY_GIGABIT) { 3285 phyreg = readl(base + NvRegSlotTime); 3286 phyreg &= ~(0x3FF00); 3287 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) || 3288 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)) 3289 phyreg |= NVREG_SLOTTIME_10_100_FULL; 3290 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 3291 phyreg |= NVREG_SLOTTIME_1000_FULL; 3292 writel(phyreg, base + NvRegSlotTime); 3293 } 3294 3295 phyreg = readl(base + NvRegPhyInterface); 3296 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 3297 if (np->duplex == 0) 3298 phyreg |= PHY_HALF; 3299 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 3300 phyreg |= PHY_100; 3301 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3302 phyreg |= PHY_1000; 3303 writel(phyreg, base + NvRegPhyInterface); 3304 3305 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ 3306 if (phyreg & PHY_RGMII) { 3307 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { 3308 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 3309 } else { 3310 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { 3311 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) 3312 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10; 3313 else 3314 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100; 3315 } else { 3316 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 3317 } 3318 } 3319 } else { 3320 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) 3321 txreg = NVREG_TX_DEFERRAL_MII_STRETCH; 3322 else 3323 txreg = NVREG_TX_DEFERRAL_DEFAULT; 3324 } 3325 writel(txreg, base + NvRegTxDeferral); 3326 3327 if (np->desc_ver == DESC_VER_1) { 3328 txreg = NVREG_TX_WM_DESC1_DEFAULT; 3329 } else { 3330 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3331 txreg = NVREG_TX_WM_DESC2_3_1000; 3332 else 3333 txreg = NVREG_TX_WM_DESC2_3_DEFAULT; 3334 } 3335 writel(txreg, base + NvRegTxWatermark); 3336 3337 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 3338 base + NvRegMisc1); 3339 pci_push(base); 3340 writel(np->linkspeed, base + NvRegLinkSpeed); 3341 pci_push(base); 3342 3343 pause_flags = 0; 3344 /* setup pause frame */ 3345 if (np->duplex != 0) { 3346 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3347 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 3348 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 3349 3350 switch (adv_pause) { 3351 case ADVERTISE_PAUSE_CAP: 3352 if (lpa_pause & LPA_PAUSE_CAP) { 3353 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3354 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3355 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3356 } 3357 break; 3358 case ADVERTISE_PAUSE_ASYM: 3359 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 3360 { 3361 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3362 } 3363 break; 3364 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: 3365 if (lpa_pause & LPA_PAUSE_CAP) 3366 { 3367 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3368 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3369 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3370 } 3371 if (lpa_pause == LPA_PAUSE_ASYM) 3372 { 3373 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3374 } 3375 break; 3376 } 3377 } else { 3378 pause_flags = np->pause_flags; 3379 } 3380 } 3381 nv_update_pause(dev, pause_flags); 3382 3383 if (txrxFlags & NV_RESTART_TX) 3384 nv_start_tx(dev); 3385 if (txrxFlags & NV_RESTART_RX) 3386 nv_start_rx(dev); 3387 3388 return retval; 3389} 3390 3391static void nv_linkchange(struct net_device *dev) 3392{ 3393 if (nv_update_linkspeed(dev)) { 3394 if (!netif_carrier_ok(dev)) { 3395 netif_carrier_on(dev); 3396 printk(KERN_INFO "%s: link up.\n", dev->name); 3397 nv_start_rx(dev); 3398 } 3399 } else { 3400 if (netif_carrier_ok(dev)) { 3401 netif_carrier_off(dev); 3402 printk(KERN_INFO "%s: link down.\n", dev->name); 3403 nv_stop_rx(dev); 3404 } 3405 } 3406} 3407 3408static void nv_link_irq(struct net_device *dev) 3409{ 3410 u8 __iomem *base = get_hwbase(dev); 3411 u32 miistat; 3412 3413 miistat = readl(base + NvRegMIIStatus); 3414 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus); 3415 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); 3416 3417 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 3418 nv_linkchange(dev); 3419 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); 3420} 3421 3422static void nv_msi_workaround(struct fe_priv *np) 3423{ 3424 3425 /* Need to toggle the msi irq mask within the ethernet device, 3426 * otherwise, future interrupts will not be detected. 3427 */ 3428 if (np->msi_flags & NV_MSI_ENABLED) { 3429 u8 __iomem *base = np->base; 3430 3431 writel(0, base + NvRegMSIIrqMask); 3432 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 3433 } 3434} 3435 3436static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work) 3437{ 3438 struct fe_priv *np = netdev_priv(dev); 3439 3440 if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) { 3441 if (total_work > NV_DYNAMIC_THRESHOLD) { 3442 /* transition to poll based interrupts */ 3443 np->quiet_count = 0; 3444 if (np->irqmask != NVREG_IRQMASK_CPU) { 3445 np->irqmask = NVREG_IRQMASK_CPU; 3446 return 1; 3447 } 3448 } else { 3449 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) { 3450 np->quiet_count++; 3451 } else { 3452 /* reached a period of low activity, switch 3453 to per tx/rx packet interrupts */ 3454 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) { 3455 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 3456 return 1; 3457 } 3458 } 3459 } 3460 } 3461 return 0; 3462} 3463 3464static irqreturn_t nv_nic_irq(int foo, void *data) 3465{ 3466 struct net_device *dev = (struct net_device *) data; 3467 struct fe_priv *np = netdev_priv(dev); 3468 u8 __iomem *base = get_hwbase(dev); 3469#ifndef CONFIG_FORCEDETH_NAPI 3470 int total_work = 0; 3471 int loop_count = 0; 3472#endif 3473 3474 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 3475 3476 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3477 np->events = readl(base + NvRegIrqStatus); 3478 writel(np->events, base + NvRegIrqStatus); 3479 } else { 3480 np->events = readl(base + NvRegMSIXIrqStatus); 3481 writel(np->events, base + NvRegMSIXIrqStatus); 3482 } 3483 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events); 3484 if (!(np->events & np->irqmask)) 3485 return IRQ_NONE; 3486 3487 nv_msi_workaround(np); 3488 3489#ifdef CONFIG_FORCEDETH_NAPI 3490 napi_schedule(&np->napi); 3491 3492 /* Disable furthur irq's 3493 (msix not enabled with napi) */ 3494 writel(0, base + NvRegIrqMask); 3495 3496#else 3497 do 3498 { 3499 int work = 0; 3500 if ((work = nv_rx_process(dev, RX_WORK_PER_LOOP))) { 3501 if (unlikely(nv_alloc_rx(dev))) { 3502 spin_lock(&np->lock); 3503 if (!np->in_shutdown) 3504 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3505 spin_unlock(&np->lock); 3506 } 3507 } 3508 3509 spin_lock(&np->lock); 3510 work += nv_tx_done(dev, TX_WORK_PER_LOOP); 3511 spin_unlock(&np->lock); 3512 3513 if (!work) 3514 break; 3515 3516 total_work += work; 3517 3518 loop_count++; 3519 } 3520 while (loop_count < max_interrupt_work); 3521 3522 if (nv_change_interrupt_mode(dev, total_work)) { 3523 /* setup new irq mask */ 3524 writel(np->irqmask, base + NvRegIrqMask); 3525 } 3526 3527 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3528 spin_lock(&np->lock); 3529 nv_link_irq(dev); 3530 spin_unlock(&np->lock); 3531 } 3532 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3533 spin_lock(&np->lock); 3534 nv_linkchange(dev); 3535 spin_unlock(&np->lock); 3536 np->link_timeout = jiffies + LINK_TIMEOUT; 3537 } 3538 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { 3539 spin_lock(&np->lock); 3540 /* disable interrupts on the nic */ 3541 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3542 writel(0, base + NvRegIrqMask); 3543 else 3544 writel(np->irqmask, base + NvRegIrqMask); 3545 pci_push(base); 3546 3547 if (!np->in_shutdown) { 3548 np->nic_poll_irq = np->irqmask; 3549 np->recover_error = 1; 3550 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3551 } 3552 spin_unlock(&np->lock); 3553 } 3554#endif 3555 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3556 3557 return IRQ_HANDLED; 3558} 3559 3560/** 3561 * All _optimized functions are used to help increase performance 3562 * (reduce CPU and increase throughput). They use descripter version 3, 3563 * compiler directives, and reduce memory accesses. 3564 */ 3565static irqreturn_t nv_nic_irq_optimized(int foo, void *data) 3566{ 3567 struct net_device *dev = (struct net_device *) data; 3568 struct fe_priv *np = netdev_priv(dev); 3569 u8 __iomem *base = get_hwbase(dev); 3570#ifndef CONFIG_FORCEDETH_NAPI 3571 int total_work = 0; 3572 int loop_count = 0; 3573#endif 3574 3575 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); 3576 3577 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3578 np->events = readl(base + NvRegIrqStatus); 3579 writel(np->events, base + NvRegIrqStatus); 3580 } else { 3581 np->events = readl(base + NvRegMSIXIrqStatus); 3582 writel(np->events, base + NvRegMSIXIrqStatus); 3583 } 3584 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events); 3585 if (!(np->events & np->irqmask)) 3586 return IRQ_NONE; 3587 3588 nv_msi_workaround(np); 3589 3590#ifdef CONFIG_FORCEDETH_NAPI 3591 napi_schedule(&np->napi); 3592 3593 /* Disable furthur irq's 3594 (msix not enabled with napi) */ 3595 writel(0, base + NvRegIrqMask); 3596 3597#else 3598 do 3599 { 3600 int work = 0; 3601 if ((work = nv_rx_process_optimized(dev, RX_WORK_PER_LOOP))) { 3602 if (unlikely(nv_alloc_rx_optimized(dev))) { 3603 spin_lock(&np->lock); 3604 if (!np->in_shutdown) 3605 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3606 spin_unlock(&np->lock); 3607 } 3608 } 3609 3610 spin_lock(&np->lock); 3611 work += nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3612 spin_unlock(&np->lock); 3613 3614 if (!work) 3615 break; 3616 3617 total_work += work; 3618 3619 loop_count++; 3620 } 3621 while (loop_count < max_interrupt_work); 3622 3623 if (nv_change_interrupt_mode(dev, total_work)) { 3624 /* setup new irq mask */ 3625 writel(np->irqmask, base + NvRegIrqMask); 3626 } 3627 3628 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3629 spin_lock(&np->lock); 3630 nv_link_irq(dev); 3631 spin_unlock(&np->lock); 3632 } 3633 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3634 spin_lock(&np->lock); 3635 nv_linkchange(dev); 3636 spin_unlock(&np->lock); 3637 np->link_timeout = jiffies + LINK_TIMEOUT; 3638 } 3639 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { 3640 spin_lock(&np->lock); 3641 /* disable interrupts on the nic */ 3642 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3643 writel(0, base + NvRegIrqMask); 3644 else 3645 writel(np->irqmask, base + NvRegIrqMask); 3646 pci_push(base); 3647 3648 if (!np->in_shutdown) { 3649 np->nic_poll_irq = np->irqmask; 3650 np->recover_error = 1; 3651 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3652 } 3653 spin_unlock(&np->lock); 3654 } 3655 3656#endif 3657 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); 3658 3659 return IRQ_HANDLED; 3660} 3661 3662static irqreturn_t nv_nic_irq_tx(int foo, void *data) 3663{ 3664 struct net_device *dev = (struct net_device *) data; 3665 struct fe_priv *np = netdev_priv(dev); 3666 u8 __iomem *base = get_hwbase(dev); 3667 u32 events; 3668 int i; 3669 unsigned long flags; 3670 3671 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 3672 3673 for (i=0; ; i++) { 3674 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3675 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3676 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 3677 if (!(events & np->irqmask)) 3678 break; 3679 3680 spin_lock_irqsave(&np->lock, flags); 3681 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3682 spin_unlock_irqrestore(&np->lock, flags); 3683 3684 if (unlikely(i > max_interrupt_work)) { 3685 spin_lock_irqsave(&np->lock, flags); 3686 /* disable interrupts on the nic */ 3687 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 3688 pci_push(base); 3689 3690 if (!np->in_shutdown) { 3691 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; 3692 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3693 } 3694 spin_unlock_irqrestore(&np->lock, flags); 3695 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 3696 break; 3697 } 3698 3699 } 3700 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); 3701 3702 return IRQ_RETVAL(i); 3703} 3704 3705#ifdef CONFIG_FORCEDETH_NAPI 3706static int nv_napi_poll(struct napi_struct *napi, int budget) 3707{ 3708 struct fe_priv *np = container_of(napi, struct fe_priv, napi); 3709 struct net_device *dev = np->dev; 3710 u8 __iomem *base = get_hwbase(dev); 3711 unsigned long flags; 3712 int retcode; 3713 int tx_work, rx_work; 3714 3715 if (!nv_optimized(np)) { 3716 spin_lock_irqsave(&np->lock, flags); 3717 tx_work = nv_tx_done(dev, np->tx_ring_size); 3718 spin_unlock_irqrestore(&np->lock, flags); 3719 3720 rx_work = nv_rx_process(dev, budget); 3721 retcode = nv_alloc_rx(dev); 3722 } else { 3723 spin_lock_irqsave(&np->lock, flags); 3724 tx_work = nv_tx_done_optimized(dev, np->tx_ring_size); 3725 spin_unlock_irqrestore(&np->lock, flags); 3726 3727 rx_work = nv_rx_process_optimized(dev, budget); 3728 retcode = nv_alloc_rx_optimized(dev); 3729 } 3730 3731 if (retcode) { 3732 spin_lock_irqsave(&np->lock, flags); 3733 if (!np->in_shutdown) 3734 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3735 spin_unlock_irqrestore(&np->lock, flags); 3736 } 3737 3738 nv_change_interrupt_mode(dev, tx_work + rx_work); 3739 3740 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3741 spin_lock_irqsave(&np->lock, flags); 3742 nv_link_irq(dev); 3743 spin_unlock_irqrestore(&np->lock, flags); 3744 } 3745 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3746 spin_lock_irqsave(&np->lock, flags); 3747 nv_linkchange(dev); 3748 spin_unlock_irqrestore(&np->lock, flags); 3749 np->link_timeout = jiffies + LINK_TIMEOUT; 3750 } 3751 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { 3752 spin_lock_irqsave(&np->lock, flags); 3753 if (!np->in_shutdown) { 3754 np->nic_poll_irq = np->irqmask; 3755 np->recover_error = 1; 3756 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3757 } 3758 spin_unlock_irqrestore(&np->lock, flags); 3759 napi_complete(napi); 3760 return rx_work; 3761 } 3762 3763 if (rx_work < budget) { 3764 /* re-enable interrupts 3765 (msix not enabled in napi) */ 3766 napi_complete(napi); 3767 3768 writel(np->irqmask, base + NvRegIrqMask); 3769 } 3770 return rx_work; 3771} 3772#endif 3773 3774static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3775{ 3776 struct net_device *dev = (struct net_device *) data; 3777 struct fe_priv *np = netdev_priv(dev); 3778 u8 __iomem *base = get_hwbase(dev); 3779 u32 events; 3780 int i; 3781 unsigned long flags; 3782 3783 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 3784 3785 for (i=0; ; i++) { 3786 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3787 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3788 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 3789 if (!(events & np->irqmask)) 3790 break; 3791 3792 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3793 if (unlikely(nv_alloc_rx_optimized(dev))) { 3794 spin_lock_irqsave(&np->lock, flags); 3795 if (!np->in_shutdown) 3796 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3797 spin_unlock_irqrestore(&np->lock, flags); 3798 } 3799 } 3800 3801 if (unlikely(i > max_interrupt_work)) { 3802 spin_lock_irqsave(&np->lock, flags); 3803 /* disable interrupts on the nic */ 3804 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3805 pci_push(base); 3806 3807 if (!np->in_shutdown) { 3808 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; 3809 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3810 } 3811 spin_unlock_irqrestore(&np->lock, flags); 3812 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 3813 break; 3814 } 3815 } 3816 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 3817 3818 return IRQ_RETVAL(i); 3819} 3820 3821static irqreturn_t nv_nic_irq_other(int foo, void *data) 3822{ 3823 struct net_device *dev = (struct net_device *) data; 3824 struct fe_priv *np = netdev_priv(dev); 3825 u8 __iomem *base = get_hwbase(dev); 3826 u32 events; 3827 int i; 3828 unsigned long flags; 3829 3830 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 3831 3832 for (i=0; ; i++) { 3833 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3834 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3835 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3836 if (!(events & np->irqmask)) 3837 break; 3838 3839 /* check tx in case we reached max loop limit in tx isr */ 3840 spin_lock_irqsave(&np->lock, flags); 3841 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3842 spin_unlock_irqrestore(&np->lock, flags); 3843 3844 if (events & NVREG_IRQ_LINK) { 3845 spin_lock_irqsave(&np->lock, flags); 3846 nv_link_irq(dev); 3847 spin_unlock_irqrestore(&np->lock, flags); 3848 } 3849 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 3850 spin_lock_irqsave(&np->lock, flags); 3851 nv_linkchange(dev); 3852 spin_unlock_irqrestore(&np->lock, flags); 3853 np->link_timeout = jiffies + LINK_TIMEOUT; 3854 } 3855 if (events & NVREG_IRQ_RECOVER_ERROR) { 3856 spin_lock_irq(&np->lock); 3857 /* disable interrupts on the nic */ 3858 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3859 pci_push(base); 3860 3861 if (!np->in_shutdown) { 3862 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3863 np->recover_error = 1; 3864 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3865 } 3866 spin_unlock_irq(&np->lock); 3867 break; 3868 } 3869 if (unlikely(i > max_interrupt_work)) { 3870 spin_lock_irqsave(&np->lock, flags); 3871 /* disable interrupts on the nic */ 3872 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3873 pci_push(base); 3874 3875 if (!np->in_shutdown) { 3876 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3877 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3878 } 3879 spin_unlock_irqrestore(&np->lock, flags); 3880 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 3881 break; 3882 } 3883 3884 } 3885 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); 3886 3887 return IRQ_RETVAL(i); 3888} 3889 3890static irqreturn_t nv_nic_irq_test(int foo, void *data) 3891{ 3892 struct net_device *dev = (struct net_device *) data; 3893 struct fe_priv *np = netdev_priv(dev); 3894 u8 __iomem *base = get_hwbase(dev); 3895 u32 events; 3896 3897 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); 3898 3899 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3900 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3901 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3902 } else { 3903 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3904 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3905 } 3906 pci_push(base); 3907 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3908 if (!(events & NVREG_IRQ_TIMER)) 3909 return IRQ_RETVAL(0); 3910 3911 nv_msi_workaround(np); 3912 3913 spin_lock(&np->lock); 3914 np->intr_test = 1; 3915 spin_unlock(&np->lock); 3916 3917 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); 3918 3919 return IRQ_RETVAL(1); 3920} 3921 3922static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) 3923{ 3924 u8 __iomem *base = get_hwbase(dev); 3925 int i; 3926 u32 msixmap = 0; 3927 3928 /* Each interrupt bit can be mapped to a MSIX vector (4 bits). 3929 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents 3930 * the remaining 8 interrupts. 3931 */ 3932 for (i = 0; i < 8; i++) { 3933 if ((irqmask >> i) & 0x1) { 3934 msixmap |= vector << (i << 2); 3935 } 3936 } 3937 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3938 3939 msixmap = 0; 3940 for (i = 0; i < 8; i++) { 3941 if ((irqmask >> (i + 8)) & 0x1) { 3942 msixmap |= vector << (i << 2); 3943 } 3944 } 3945 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3946} 3947 3948static int nv_request_irq(struct net_device *dev, int intr_test) 3949{ 3950 struct fe_priv *np = get_nvpriv(dev); 3951 u8 __iomem *base = get_hwbase(dev); 3952 int ret = 1; 3953 int i; 3954 irqreturn_t (*handler)(int foo, void *data); 3955 3956 if (intr_test) { 3957 handler = nv_nic_irq_test; 3958 } else { 3959 if (nv_optimized(np)) 3960 handler = nv_nic_irq_optimized; 3961 else 3962 handler = nv_nic_irq; 3963 } 3964 3965 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3966 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3967 np->msi_x_entry[i].entry = i; 3968 } 3969 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 3970 np->msi_flags |= NV_MSI_X_ENABLED; 3971 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3972 /* Request irq for rx handling */ 3973 sprintf(np->name_rx, "%s-rx", dev->name); 3974 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, 3975 &nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) { 3976 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 3977 pci_disable_msix(np->pci_dev); 3978 np->msi_flags &= ~NV_MSI_X_ENABLED; 3979 goto out_err; 3980 } 3981 /* Request irq for tx handling */ 3982 sprintf(np->name_tx, "%s-tx", dev->name); 3983 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, 3984 &nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) { 3985 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 3986 pci_disable_msix(np->pci_dev); 3987 np->msi_flags &= ~NV_MSI_X_ENABLED; 3988 goto out_free_rx; 3989 } 3990 /* Request irq for link and timer handling */ 3991 sprintf(np->name_other, "%s-other", dev->name); 3992 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, 3993 &nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) { 3994 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 3995 pci_disable_msix(np->pci_dev); 3996 np->msi_flags &= ~NV_MSI_X_ENABLED; 3997 goto out_free_tx; 3998 } 3999 /* map interrupts to their respective vector */ 4000 writel(0, base + NvRegMSIXMap0); 4001 writel(0, base + NvRegMSIXMap1); 4002 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); 4003 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); 4004 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 4005 } else { 4006 /* Request irq for all interrupts */ 4007 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { 4008 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 4009 pci_disable_msix(np->pci_dev); 4010 np->msi_flags &= ~NV_MSI_X_ENABLED; 4011 goto out_err; 4012 } 4013 4014 /* map interrupts to vector 0 */ 4015 writel(0, base + NvRegMSIXMap0); 4016 writel(0, base + NvRegMSIXMap1); 4017 } 4018 } 4019 } 4020 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 4021 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 4022 np->msi_flags |= NV_MSI_ENABLED; 4023 dev->irq = np->pci_dev->irq; 4024 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 4025 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 4026 pci_disable_msi(np->pci_dev); 4027 np->msi_flags &= ~NV_MSI_ENABLED; 4028 dev->irq = np->pci_dev->irq; 4029 goto out_err; 4030 } 4031 4032 /* map interrupts to vector 0 */ 4033 writel(0, base + NvRegMSIMap0); 4034 writel(0, base + NvRegMSIMap1); 4035 /* enable msi vector 0 */ 4036 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 4037 } 4038 } 4039 if (ret != 0) { 4040 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) 4041 goto out_err; 4042 4043 } 4044 4045 return 0; 4046out_free_tx: 4047 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); 4048out_free_rx: 4049 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); 4050out_err: 4051 return 1; 4052} 4053 4054static void nv_free_irq(struct net_device *dev) 4055{ 4056 struct fe_priv *np = get_nvpriv(dev); 4057 int i; 4058 4059 if (np->msi_flags & NV_MSI_X_ENABLED) { 4060 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 4061 free_irq(np->msi_x_entry[i].vector, dev); 4062 } 4063 pci_disable_msix(np->pci_dev); 4064 np->msi_flags &= ~NV_MSI_X_ENABLED; 4065 } else { 4066 free_irq(np->pci_dev->irq, dev); 4067 if (np->msi_flags & NV_MSI_ENABLED) { 4068 pci_disable_msi(np->pci_dev); 4069 np->msi_flags &= ~NV_MSI_ENABLED; 4070 } 4071 } 4072} 4073 4074static void nv_do_nic_poll(unsigned long data) 4075{ 4076 struct net_device *dev = (struct net_device *) data; 4077 struct fe_priv *np = netdev_priv(dev); 4078 u8 __iomem *base = get_hwbase(dev); 4079 u32 mask = 0; 4080 4081 /* 4082 * First disable irq(s) and then 4083 * reenable interrupts on the nic, we have to do this before calling 4084 * nv_nic_irq because that may decide to do otherwise 4085 */ 4086 4087 if (!using_multi_irqs(dev)) { 4088 if (np->msi_flags & NV_MSI_X_ENABLED) 4089 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 4090 else 4091 disable_irq_lockdep(np->pci_dev->irq); 4092 mask = np->irqmask; 4093 } else { 4094 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4095 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 4096 mask |= NVREG_IRQ_RX_ALL; 4097 } 4098 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4099 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 4100 mask |= NVREG_IRQ_TX_ALL; 4101 } 4102 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4103 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 4104 mask |= NVREG_IRQ_OTHER; 4105 } 4106 } 4107 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */ 4108 4109 if (np->recover_error) { 4110 np->recover_error = 0; 4111 printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name); 4112 if (netif_running(dev)) { 4113 netif_tx_lock_bh(dev); 4114 netif_addr_lock(dev); 4115 spin_lock(&np->lock); 4116 /* stop engines */ 4117 nv_stop_rxtx(dev); 4118 if (np->driver_data & DEV_HAS_POWER_CNTRL) 4119 nv_mac_reset(dev); 4120 nv_txrx_reset(dev); 4121 /* drain rx queue */ 4122 nv_drain_rxtx(dev); 4123 /* reinit driver view of the rx queue */ 4124 set_bufsize(dev); 4125 if (nv_init_ring(dev)) { 4126 if (!np->in_shutdown) 4127 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4128 } 4129 /* reinit nic view of the rx queue */ 4130 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4131 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4132 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4133 base + NvRegRingSizes); 4134 pci_push(base); 4135 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4136 pci_push(base); 4137 /* clear interrupts */ 4138 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 4139 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4140 else 4141 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4142 4143 /* restart rx engine */ 4144 nv_start_rxtx(dev); 4145 spin_unlock(&np->lock); 4146 netif_addr_unlock(dev); 4147 netif_tx_unlock_bh(dev); 4148 } 4149 } 4150 4151 writel(mask, base + NvRegIrqMask); 4152 pci_push(base); 4153 4154 if (!using_multi_irqs(dev)) { 4155 np->nic_poll_irq = 0; 4156 if (nv_optimized(np)) 4157 nv_nic_irq_optimized(0, dev); 4158 else 4159 nv_nic_irq(0, dev); 4160 if (np->msi_flags & NV_MSI_X_ENABLED) 4161 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 4162 else 4163 enable_irq_lockdep(np->pci_dev->irq); 4164 } else { 4165 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4166 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; 4167 nv_nic_irq_rx(0, dev); 4168 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 4169 } 4170 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4171 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; 4172 nv_nic_irq_tx(0, dev); 4173 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 4174 } 4175 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4176 np->nic_poll_irq &= ~NVREG_IRQ_OTHER; 4177 nv_nic_irq_other(0, dev); 4178 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 4179 } 4180 } 4181 4182} 4183 4184#ifdef CONFIG_NET_POLL_CONTROLLER 4185static void nv_poll_controller(struct net_device *dev) 4186{ 4187 nv_do_nic_poll((unsigned long) dev); 4188} 4189#endif 4190 4191static void nv_do_stats_poll(unsigned long data) 4192{ 4193 struct net_device *dev = (struct net_device *) data; 4194 struct fe_priv *np = netdev_priv(dev); 4195 4196 nv_get_hw_stats(dev); 4197 4198 if (!np->in_shutdown) 4199 mod_timer(&np->stats_poll, 4200 round_jiffies(jiffies + STATS_INTERVAL)); 4201} 4202 4203static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 4204{ 4205 struct fe_priv *np = netdev_priv(dev); 4206 strcpy(info->driver, DRV_NAME); 4207 strcpy(info->version, FORCEDETH_VERSION); 4208 strcpy(info->bus_info, pci_name(np->pci_dev)); 4209} 4210 4211static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4212{ 4213 struct fe_priv *np = netdev_priv(dev); 4214 wolinfo->supported = WAKE_MAGIC; 4215 4216 spin_lock_irq(&np->lock); 4217 if (np->wolenabled) 4218 wolinfo->wolopts = WAKE_MAGIC; 4219 spin_unlock_irq(&np->lock); 4220} 4221 4222static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4223{ 4224 struct fe_priv *np = netdev_priv(dev); 4225 u8 __iomem *base = get_hwbase(dev); 4226 u32 flags = 0; 4227 4228 if (wolinfo->wolopts == 0) { 4229 np->wolenabled = 0; 4230 } else if (wolinfo->wolopts & WAKE_MAGIC) { 4231 np->wolenabled = 1; 4232 flags = NVREG_WAKEUPFLAGS_ENABLE; 4233 } 4234 if (netif_running(dev)) { 4235 spin_lock_irq(&np->lock); 4236 writel(flags, base + NvRegWakeUpFlags); 4237 spin_unlock_irq(&np->lock); 4238 } 4239 return 0; 4240} 4241 4242static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 4243{ 4244 struct fe_priv *np = netdev_priv(dev); 4245 int adv; 4246 4247 spin_lock_irq(&np->lock); 4248 ecmd->port = PORT_MII; 4249 if (!netif_running(dev)) { 4250 /* We do not track link speed / duplex setting if the 4251 * interface is disabled. Force a link check */ 4252 if (nv_update_linkspeed(dev)) { 4253 if (!netif_carrier_ok(dev)) 4254 netif_carrier_on(dev); 4255 } else { 4256 if (netif_carrier_ok(dev)) 4257 netif_carrier_off(dev); 4258 } 4259 } 4260 4261 if (netif_carrier_ok(dev)) { 4262 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 4263 case NVREG_LINKSPEED_10: 4264 ecmd->speed = SPEED_10; 4265 break; 4266 case NVREG_LINKSPEED_100: 4267 ecmd->speed = SPEED_100; 4268 break; 4269 case NVREG_LINKSPEED_1000: 4270 ecmd->speed = SPEED_1000; 4271 break; 4272 } 4273 ecmd->duplex = DUPLEX_HALF; 4274 if (np->duplex) 4275 ecmd->duplex = DUPLEX_FULL; 4276 } else { 4277 ecmd->speed = -1; 4278 ecmd->duplex = -1; 4279 } 4280 4281 ecmd->autoneg = np->autoneg; 4282 4283 ecmd->advertising = ADVERTISED_MII; 4284 if (np->autoneg) { 4285 ecmd->advertising |= ADVERTISED_Autoneg; 4286 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4287 if (adv & ADVERTISE_10HALF) 4288 ecmd->advertising |= ADVERTISED_10baseT_Half; 4289 if (adv & ADVERTISE_10FULL) 4290 ecmd->advertising |= ADVERTISED_10baseT_Full; 4291 if (adv & ADVERTISE_100HALF) 4292 ecmd->advertising |= ADVERTISED_100baseT_Half; 4293 if (adv & ADVERTISE_100FULL) 4294 ecmd->advertising |= ADVERTISED_100baseT_Full; 4295 if (np->gigabit == PHY_GIGABIT) { 4296 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4297 if (adv & ADVERTISE_1000FULL) 4298 ecmd->advertising |= ADVERTISED_1000baseT_Full; 4299 } 4300 } 4301 ecmd->supported = (SUPPORTED_Autoneg | 4302 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 4303 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 4304 SUPPORTED_MII); 4305 if (np->gigabit == PHY_GIGABIT) 4306 ecmd->supported |= SUPPORTED_1000baseT_Full; 4307 4308 ecmd->phy_address = np->phyaddr; 4309 ecmd->transceiver = XCVR_EXTERNAL; 4310 4311 /* ignore maxtxpkt, maxrxpkt for now */ 4312 spin_unlock_irq(&np->lock); 4313 return 0; 4314} 4315 4316static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 4317{ 4318 struct fe_priv *np = netdev_priv(dev); 4319 4320 if (ecmd->port != PORT_MII) 4321 return -EINVAL; 4322 if (ecmd->transceiver != XCVR_EXTERNAL) 4323 return -EINVAL; 4324 if (ecmd->phy_address != np->phyaddr) { 4325 /* TODO: support switching between multiple phys. Should be 4326 * trivial, but not enabled due to lack of test hardware. */ 4327 return -EINVAL; 4328 } 4329 if (ecmd->autoneg == AUTONEG_ENABLE) { 4330 u32 mask; 4331 4332 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 4333 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 4334 if (np->gigabit == PHY_GIGABIT) 4335 mask |= ADVERTISED_1000baseT_Full; 4336 4337 if ((ecmd->advertising & mask) == 0) 4338 return -EINVAL; 4339 4340 } else if (ecmd->autoneg == AUTONEG_DISABLE) { 4341 /* Note: autonegotiation disable, speed 1000 intentionally 4342 * forbidden - noone should need that. */ 4343 4344 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) 4345 return -EINVAL; 4346 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 4347 return -EINVAL; 4348 } else { 4349 return -EINVAL; 4350 } 4351 4352 netif_carrier_off(dev); 4353 if (netif_running(dev)) { 4354 unsigned long flags; 4355 4356 nv_disable_irq(dev); 4357 netif_tx_lock_bh(dev); 4358 netif_addr_lock(dev); 4359 /* with plain spinlock lockdep complains */ 4360 spin_lock_irqsave(&np->lock, flags); 4361 /* stop engines */ 4362 /* FIXME: 4363 * this can take some time, and interrupts are disabled 4364 * due to spin_lock_irqsave, but let's hope no daemon 4365 * is going to change the settings very often... 4366 * Worst case: 4367 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX 4368 * + some minor delays, which is up to a second approximately 4369 */ 4370 nv_stop_rxtx(dev); 4371 spin_unlock_irqrestore(&np->lock, flags); 4372 netif_addr_unlock(dev); 4373 netif_tx_unlock_bh(dev); 4374 } 4375 4376 if (ecmd->autoneg == AUTONEG_ENABLE) { 4377 int adv, bmcr; 4378 4379 np->autoneg = 1; 4380 4381 /* advertise only what has been requested */ 4382 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4383 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4384 if (ecmd->advertising & ADVERTISED_10baseT_Half) 4385 adv |= ADVERTISE_10HALF; 4386 if (ecmd->advertising & ADVERTISED_10baseT_Full) 4387 adv |= ADVERTISE_10FULL; 4388 if (ecmd->advertising & ADVERTISED_100baseT_Half) 4389 adv |= ADVERTISE_100HALF; 4390 if (ecmd->advertising & ADVERTISED_100baseT_Full) 4391 adv |= ADVERTISE_100FULL; 4392 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 4393 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4394 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4395 adv |= ADVERTISE_PAUSE_ASYM; 4396 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4397 4398 if (np->gigabit == PHY_GIGABIT) { 4399 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4400 adv &= ~ADVERTISE_1000FULL; 4401 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 4402 adv |= ADVERTISE_1000FULL; 4403 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4404 } 4405 4406 if (netif_running(dev)) 4407 printk(KERN_INFO "%s: link down.\n", dev->name); 4408 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4409 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4410 bmcr |= BMCR_ANENABLE; 4411 /* reset the phy in order for settings to stick, 4412 * and cause autoneg to start */ 4413 if (phy_reset(dev, bmcr)) { 4414 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4415 return -EINVAL; 4416 } 4417 } else { 4418 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4419 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4420 } 4421 } else { 4422 int adv, bmcr; 4423 4424 np->autoneg = 0; 4425 4426 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4427 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4428 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) 4429 adv |= ADVERTISE_10HALF; 4430 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) 4431 adv |= ADVERTISE_10FULL; 4432 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) 4433 adv |= ADVERTISE_100HALF; 4434 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) 4435 adv |= ADVERTISE_100FULL; 4436 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4437 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ 4438 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4439 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4440 } 4441 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { 4442 adv |= ADVERTISE_PAUSE_ASYM; 4443 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4444 } 4445 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4446 np->fixed_mode = adv; 4447 4448 if (np->gigabit == PHY_GIGABIT) { 4449 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4450 adv &= ~ADVERTISE_1000FULL; 4451 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4452 } 4453 4454 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4455 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); 4456 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) 4457 bmcr |= BMCR_FULLDPLX; 4458 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 4459 bmcr |= BMCR_SPEED100; 4460 if (np->phy_oui == PHY_OUI_MARVELL) { 4461 /* reset the phy in order for forced mode settings to stick */ 4462 if (phy_reset(dev, bmcr)) { 4463 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4464 return -EINVAL; 4465 } 4466 } else { 4467 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4468 if (netif_running(dev)) { 4469 /* Wait a bit and then reconfigure the nic. */ 4470 udelay(10); 4471 nv_linkchange(dev); 4472 } 4473 } 4474 } 4475 4476 if (netif_running(dev)) { 4477 nv_start_rxtx(dev); 4478 nv_enable_irq(dev); 4479 } 4480 4481 return 0; 4482} 4483 4484#define FORCEDETH_REGS_VER 1 4485 4486static int nv_get_regs_len(struct net_device *dev) 4487{ 4488 struct fe_priv *np = netdev_priv(dev); 4489 return np->register_size; 4490} 4491 4492static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 4493{ 4494 struct fe_priv *np = netdev_priv(dev); 4495 u8 __iomem *base = get_hwbase(dev); 4496 u32 *rbuf = buf; 4497 int i; 4498 4499 regs->version = FORCEDETH_REGS_VER; 4500 spin_lock_irq(&np->lock); 4501 for (i = 0;i <= np->register_size/sizeof(u32); i++) 4502 rbuf[i] = readl(base + i*sizeof(u32)); 4503 spin_unlock_irq(&np->lock); 4504} 4505 4506static int nv_nway_reset(struct net_device *dev) 4507{ 4508 struct fe_priv *np = netdev_priv(dev); 4509 int ret; 4510 4511 if (np->autoneg) { 4512 int bmcr; 4513 4514 netif_carrier_off(dev); 4515 if (netif_running(dev)) { 4516 nv_disable_irq(dev); 4517 netif_tx_lock_bh(dev); 4518 netif_addr_lock(dev); 4519 spin_lock(&np->lock); 4520 /* stop engines */ 4521 nv_stop_rxtx(dev); 4522 spin_unlock(&np->lock); 4523 netif_addr_unlock(dev); 4524 netif_tx_unlock_bh(dev); 4525 printk(KERN_INFO "%s: link down.\n", dev->name); 4526 } 4527 4528 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4529 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4530 bmcr |= BMCR_ANENABLE; 4531 /* reset the phy in order for settings to stick*/ 4532 if (phy_reset(dev, bmcr)) { 4533 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4534 return -EINVAL; 4535 } 4536 } else { 4537 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4538 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4539 } 4540 4541 if (netif_running(dev)) { 4542 nv_start_rxtx(dev); 4543 nv_enable_irq(dev); 4544 } 4545 ret = 0; 4546 } else { 4547 ret = -EINVAL; 4548 } 4549 4550 return ret; 4551} 4552 4553static int nv_set_tso(struct net_device *dev, u32 value) 4554{ 4555 struct fe_priv *np = netdev_priv(dev); 4556 4557 if ((np->driver_data & DEV_HAS_CHECKSUM)) 4558 return ethtool_op_set_tso(dev, value); 4559 else 4560 return -EOPNOTSUPP; 4561} 4562 4563static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4564{ 4565 struct fe_priv *np = netdev_priv(dev); 4566 4567 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4568 ring->rx_mini_max_pending = 0; 4569 ring->rx_jumbo_max_pending = 0; 4570 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4571 4572 ring->rx_pending = np->rx_ring_size; 4573 ring->rx_mini_pending = 0; 4574 ring->rx_jumbo_pending = 0; 4575 ring->tx_pending = np->tx_ring_size; 4576} 4577 4578static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4579{ 4580 struct fe_priv *np = netdev_priv(dev); 4581 u8 __iomem *base = get_hwbase(dev); 4582 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; 4583 dma_addr_t ring_addr; 4584 4585 if (ring->rx_pending < RX_RING_MIN || 4586 ring->tx_pending < TX_RING_MIN || 4587 ring->rx_mini_pending != 0 || 4588 ring->rx_jumbo_pending != 0 || 4589 (np->desc_ver == DESC_VER_1 && 4590 (ring->rx_pending > RING_MAX_DESC_VER_1 || 4591 ring->tx_pending > RING_MAX_DESC_VER_1)) || 4592 (np->desc_ver != DESC_VER_1 && 4593 (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 4594 ring->tx_pending > RING_MAX_DESC_VER_2_3))) { 4595 return -EINVAL; 4596 } 4597 4598 /* allocate new rings */ 4599 if (!nv_optimized(np)) { 4600 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4601 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4602 &ring_addr); 4603 } else { 4604 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4605 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4606 &ring_addr); 4607 } 4608 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); 4609 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); 4610 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { 4611 /* fall back to old rings */ 4612 if (!nv_optimized(np)) { 4613 if (rxtx_ring) 4614 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4615 rxtx_ring, ring_addr); 4616 } else { 4617 if (rxtx_ring) 4618 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4619 rxtx_ring, ring_addr); 4620 } 4621 if (rx_skbuff) 4622 kfree(rx_skbuff); 4623 if (tx_skbuff) 4624 kfree(tx_skbuff); 4625 goto exit; 4626 } 4627 4628 if (netif_running(dev)) { 4629 nv_disable_irq(dev); 4630 nv_napi_disable(dev); 4631 netif_tx_lock_bh(dev); 4632 netif_addr_lock(dev); 4633 spin_lock(&np->lock); 4634 /* stop engines */ 4635 nv_stop_rxtx(dev); 4636 nv_txrx_reset(dev); 4637 /* drain queues */ 4638 nv_drain_rxtx(dev); 4639 /* delete queues */ 4640 free_rings(dev); 4641 } 4642 4643 /* set new values */ 4644 np->rx_ring_size = ring->rx_pending; 4645 np->tx_ring_size = ring->tx_pending; 4646 4647 if (!nv_optimized(np)) { 4648 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4649 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4650 } else { 4651 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4652 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4653 } 4654 np->rx_skb = (struct nv_skb_map*)rx_skbuff; 4655 np->tx_skb = (struct nv_skb_map*)tx_skbuff; 4656 np->ring_addr = ring_addr; 4657 4658 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4659 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); 4660 4661 if (netif_running(dev)) { 4662 /* reinit driver view of the queues */ 4663 set_bufsize(dev); 4664 if (nv_init_ring(dev)) { 4665 if (!np->in_shutdown) 4666 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4667 } 4668 4669 /* reinit nic view of the queues */ 4670 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4671 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4672 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4673 base + NvRegRingSizes); 4674 pci_push(base); 4675 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4676 pci_push(base); 4677 4678 /* restart engines */ 4679 nv_start_rxtx(dev); 4680 spin_unlock(&np->lock); 4681 netif_addr_unlock(dev); 4682 netif_tx_unlock_bh(dev); 4683 nv_napi_enable(dev); 4684 nv_enable_irq(dev); 4685 } 4686 return 0; 4687exit: 4688 return -ENOMEM; 4689} 4690 4691static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4692{ 4693 struct fe_priv *np = netdev_priv(dev); 4694 4695 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; 4696 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; 4697 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; 4698} 4699 4700static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4701{ 4702 struct fe_priv *np = netdev_priv(dev); 4703 int adv, bmcr; 4704 4705 if ((!np->autoneg && np->duplex == 0) || 4706 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 4707 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 4708 dev->name); 4709 return -EINVAL; 4710 } 4711 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 4712 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); 4713 return -EINVAL; 4714 } 4715 4716 netif_carrier_off(dev); 4717 if (netif_running(dev)) { 4718 nv_disable_irq(dev); 4719 netif_tx_lock_bh(dev); 4720 netif_addr_lock(dev); 4721 spin_lock(&np->lock); 4722 /* stop engines */ 4723 nv_stop_rxtx(dev); 4724 spin_unlock(&np->lock); 4725 netif_addr_unlock(dev); 4726 netif_tx_unlock_bh(dev); 4727 } 4728 4729 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); 4730 if (pause->rx_pause) 4731 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; 4732 if (pause->tx_pause) 4733 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; 4734 4735 if (np->autoneg && pause->autoneg) { 4736 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; 4737 4738 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4739 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4740 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 4741 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4742 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4743 adv |= ADVERTISE_PAUSE_ASYM; 4744 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4745 4746 if (netif_running(dev)) 4747 printk(KERN_INFO "%s: link down.\n", dev->name); 4748 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4749 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4750 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4751 } else { 4752 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4753 if (pause->rx_pause) 4754 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4755 if (pause->tx_pause) 4756 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4757 4758 if (!netif_running(dev)) 4759 nv_update_linkspeed(dev); 4760 else 4761 nv_update_pause(dev, np->pause_flags); 4762 } 4763 4764 if (netif_running(dev)) { 4765 nv_start_rxtx(dev); 4766 nv_enable_irq(dev); 4767 } 4768 return 0; 4769} 4770 4771static u32 nv_get_rx_csum(struct net_device *dev) 4772{ 4773 struct fe_priv *np = netdev_priv(dev); 4774 return (np->rx_csum) != 0; 4775} 4776 4777static int nv_set_rx_csum(struct net_device *dev, u32 data) 4778{ 4779 struct fe_priv *np = netdev_priv(dev); 4780 u8 __iomem *base = get_hwbase(dev); 4781 int retcode = 0; 4782 4783 if (np->driver_data & DEV_HAS_CHECKSUM) { 4784 if (data) { 4785 np->rx_csum = 1; 4786 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4787 } else { 4788 np->rx_csum = 0; 4789 /* vlan is dependent on rx checksum offload */ 4790 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) 4791 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; 4792 } 4793 if (netif_running(dev)) { 4794 spin_lock_irq(&np->lock); 4795 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4796 spin_unlock_irq(&np->lock); 4797 } 4798 } else { 4799 return -EINVAL; 4800 } 4801 4802 return retcode; 4803} 4804 4805static int nv_set_tx_csum(struct net_device *dev, u32 data) 4806{ 4807 struct fe_priv *np = netdev_priv(dev); 4808 4809 if (np->driver_data & DEV_HAS_CHECKSUM) 4810 return ethtool_op_set_tx_csum(dev, data); 4811 else 4812 return -EOPNOTSUPP; 4813} 4814 4815static int nv_set_sg(struct net_device *dev, u32 data) 4816{ 4817 struct fe_priv *np = netdev_priv(dev); 4818 4819 if (np->driver_data & DEV_HAS_CHECKSUM) 4820 return ethtool_op_set_sg(dev, data); 4821 else 4822 return -EOPNOTSUPP; 4823} 4824 4825static int nv_get_sset_count(struct net_device *dev, int sset) 4826{ 4827 struct fe_priv *np = netdev_priv(dev); 4828 4829 switch (sset) { 4830 case ETH_SS_TEST: 4831 if (np->driver_data & DEV_HAS_TEST_EXTENDED) 4832 return NV_TEST_COUNT_EXTENDED; 4833 else 4834 return NV_TEST_COUNT_BASE; 4835 case ETH_SS_STATS: 4836 if (np->driver_data & DEV_HAS_STATISTICS_V3) 4837 return NV_DEV_STATISTICS_V3_COUNT; 4838 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4839 return NV_DEV_STATISTICS_V2_COUNT; 4840 else if (np->driver_data & DEV_HAS_STATISTICS_V1) 4841 return NV_DEV_STATISTICS_V1_COUNT; 4842 else 4843 return 0; 4844 default: 4845 return -EOPNOTSUPP; 4846 } 4847} 4848 4849static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) 4850{ 4851 struct fe_priv *np = netdev_priv(dev); 4852 4853 /* update stats */ 4854 nv_do_stats_poll((unsigned long)dev); 4855 4856 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); 4857} 4858 4859static int nv_link_test(struct net_device *dev) 4860{ 4861 struct fe_priv *np = netdev_priv(dev); 4862 int mii_status; 4863 4864 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4865 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4866 4867 /* check phy link status */ 4868 if (!(mii_status & BMSR_LSTATUS)) 4869 return 0; 4870 else 4871 return 1; 4872} 4873 4874static int nv_register_test(struct net_device *dev) 4875{ 4876 u8 __iomem *base = get_hwbase(dev); 4877 int i = 0; 4878 u32 orig_read, new_read; 4879 4880 do { 4881 orig_read = readl(base + nv_registers_test[i].reg); 4882 4883 /* xor with mask to toggle bits */ 4884 orig_read ^= nv_registers_test[i].mask; 4885 4886 writel(orig_read, base + nv_registers_test[i].reg); 4887 4888 new_read = readl(base + nv_registers_test[i].reg); 4889 4890 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) 4891 return 0; 4892 4893 /* restore original value */ 4894 orig_read ^= nv_registers_test[i].mask; 4895 writel(orig_read, base + nv_registers_test[i].reg); 4896 4897 } while (nv_registers_test[++i].reg != 0); 4898 4899 return 1; 4900} 4901 4902static int nv_interrupt_test(struct net_device *dev) 4903{ 4904 struct fe_priv *np = netdev_priv(dev); 4905 u8 __iomem *base = get_hwbase(dev); 4906 int ret = 1; 4907 int testcnt; 4908 u32 save_msi_flags, save_poll_interval = 0; 4909 4910 if (netif_running(dev)) { 4911 /* free current irq */ 4912 nv_free_irq(dev); 4913 save_poll_interval = readl(base+NvRegPollingInterval); 4914 } 4915 4916 /* flag to test interrupt handler */ 4917 np->intr_test = 0; 4918 4919 /* setup test irq */ 4920 save_msi_flags = np->msi_flags; 4921 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; 4922 np->msi_flags |= 0x001; /* setup 1 vector */ 4923 if (nv_request_irq(dev, 1)) 4924 return 0; 4925 4926 /* setup timer interrupt */ 4927 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 4928 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4929 4930 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4931 4932 /* wait for at least one interrupt */ 4933 msleep(100); 4934 4935 spin_lock_irq(&np->lock); 4936 4937 /* flag should be set within ISR */ 4938 testcnt = np->intr_test; 4939 if (!testcnt) 4940 ret = 2; 4941 4942 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4943 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 4944 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4945 else 4946 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4947 4948 spin_unlock_irq(&np->lock); 4949 4950 nv_free_irq(dev); 4951 4952 np->msi_flags = save_msi_flags; 4953 4954 if (netif_running(dev)) { 4955 writel(save_poll_interval, base + NvRegPollingInterval); 4956 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4957 /* restore original irq */ 4958 if (nv_request_irq(dev, 0)) 4959 return 0; 4960 } 4961 4962 return ret; 4963} 4964 4965static int nv_loopback_test(struct net_device *dev) 4966{ 4967 struct fe_priv *np = netdev_priv(dev); 4968 u8 __iomem *base = get_hwbase(dev); 4969 struct sk_buff *tx_skb, *rx_skb; 4970 dma_addr_t test_dma_addr; 4971 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 4972 u32 flags; 4973 int len, i, pkt_len; 4974 u8 *pkt_data; 4975 u32 filter_flags = 0; 4976 u32 misc1_flags = 0; 4977 int ret = 1; 4978 4979 if (netif_running(dev)) { 4980 nv_disable_irq(dev); 4981 filter_flags = readl(base + NvRegPacketFilterFlags); 4982 misc1_flags = readl(base + NvRegMisc1); 4983 } else { 4984 nv_txrx_reset(dev); 4985 } 4986 4987 /* reinit driver view of the rx queue */ 4988 set_bufsize(dev); 4989 nv_init_ring(dev); 4990 4991 /* setup hardware for loopback */ 4992 writel(NVREG_MISC1_FORCE, base + NvRegMisc1); 4993 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); 4994 4995 /* reinit nic view of the rx queue */ 4996 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4997 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4998 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4999 base + NvRegRingSizes); 5000 pci_push(base); 5001 5002 /* restart rx engine */ 5003 nv_start_rxtx(dev); 5004 5005 /* setup packet for tx */ 5006 pkt_len = ETH_DATA_LEN; 5007 tx_skb = dev_alloc_skb(pkt_len); 5008 if (!tx_skb) { 5009 printk(KERN_ERR "dev_alloc_skb() failed during loopback test" 5010 " of %s\n", dev->name); 5011 ret = 0; 5012 goto out; 5013 } 5014 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, 5015 skb_tailroom(tx_skb), 5016 PCI_DMA_FROMDEVICE); 5017 pkt_data = skb_put(tx_skb, pkt_len); 5018 for (i = 0; i < pkt_len; i++) 5019 pkt_data[i] = (u8)(i & 0xff); 5020 5021 if (!nv_optimized(np)) { 5022 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 5023 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 5024 } else { 5025 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); 5026 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); 5027 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 5028 } 5029 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5030 pci_push(get_hwbase(dev)); 5031 5032 msleep(500); 5033 5034 /* check for rx of the packet */ 5035 if (!nv_optimized(np)) { 5036 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); 5037 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 5038 5039 } else { 5040 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); 5041 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 5042 } 5043 5044 if (flags & NV_RX_AVAIL) { 5045 ret = 0; 5046 } else if (np->desc_ver == DESC_VER_1) { 5047 if (flags & NV_RX_ERROR) 5048 ret = 0; 5049 } else { 5050 if (flags & NV_RX2_ERROR) { 5051 ret = 0; 5052 } 5053 } 5054 5055 if (ret) { 5056 if (len != pkt_len) { 5057 ret = 0; 5058 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 5059 dev->name, len, pkt_len); 5060 } else { 5061 rx_skb = np->rx_skb[0].skb; 5062 for (i = 0; i < pkt_len; i++) { 5063 if (rx_skb->data[i] != (u8)(i & 0xff)) { 5064 ret = 0; 5065 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", 5066 dev->name, i); 5067 break; 5068 } 5069 } 5070 } 5071 } else { 5072 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); 5073 } 5074 5075 pci_unmap_page(np->pci_dev, test_dma_addr, 5076 (skb_end_pointer(tx_skb) - tx_skb->data), 5077 PCI_DMA_TODEVICE); 5078 dev_kfree_skb_any(tx_skb); 5079 out: 5080 /* stop engines */ 5081 nv_stop_rxtx(dev); 5082 nv_txrx_reset(dev); 5083 /* drain rx queue */ 5084 nv_drain_rxtx(dev); 5085 5086 if (netif_running(dev)) { 5087 writel(misc1_flags, base + NvRegMisc1); 5088 writel(filter_flags, base + NvRegPacketFilterFlags); 5089 nv_enable_irq(dev); 5090 } 5091 5092 return ret; 5093} 5094 5095static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) 5096{ 5097 struct fe_priv *np = netdev_priv(dev); 5098 u8 __iomem *base = get_hwbase(dev); 5099 int result; 5100 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); 5101 5102 if (!nv_link_test(dev)) { 5103 test->flags |= ETH_TEST_FL_FAILED; 5104 buffer[0] = 1; 5105 } 5106 5107 if (test->flags & ETH_TEST_FL_OFFLINE) { 5108 if (netif_running(dev)) { 5109 netif_stop_queue(dev); 5110 nv_napi_disable(dev); 5111 netif_tx_lock_bh(dev); 5112 netif_addr_lock(dev); 5113 spin_lock_irq(&np->lock); 5114 nv_disable_hw_interrupts(dev, np->irqmask); 5115 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 5116 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5117 } else { 5118 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 5119 } 5120 /* stop engines */ 5121 nv_stop_rxtx(dev); 5122 nv_txrx_reset(dev); 5123 /* drain rx queue */ 5124 nv_drain_rxtx(dev); 5125 spin_unlock_irq(&np->lock); 5126 netif_addr_unlock(dev); 5127 netif_tx_unlock_bh(dev); 5128 } 5129 5130 if (!nv_register_test(dev)) { 5131 test->flags |= ETH_TEST_FL_FAILED; 5132 buffer[1] = 1; 5133 } 5134 5135 result = nv_interrupt_test(dev); 5136 if (result != 1) { 5137 test->flags |= ETH_TEST_FL_FAILED; 5138 buffer[2] = 1; 5139 } 5140 if (result == 0) { 5141 /* bail out */ 5142 return; 5143 } 5144 5145 if (!nv_loopback_test(dev)) { 5146 test->flags |= ETH_TEST_FL_FAILED; 5147 buffer[3] = 1; 5148 } 5149 5150 if (netif_running(dev)) { 5151 /* reinit driver view of the rx queue */ 5152 set_bufsize(dev); 5153 if (nv_init_ring(dev)) { 5154 if (!np->in_shutdown) 5155 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5156 } 5157 /* reinit nic view of the rx queue */ 5158 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5159 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5160 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5161 base + NvRegRingSizes); 5162 pci_push(base); 5163 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5164 pci_push(base); 5165 /* restart rx engine */ 5166 nv_start_rxtx(dev); 5167 netif_start_queue(dev); 5168 nv_napi_enable(dev); 5169 nv_enable_hw_interrupts(dev, np->irqmask); 5170 } 5171 } 5172} 5173 5174static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) 5175{ 5176 switch (stringset) { 5177 case ETH_SS_STATS: 5178 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); 5179 break; 5180 case ETH_SS_TEST: 5181 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); 5182 break; 5183 } 5184} 5185 5186static const struct ethtool_ops ops = { 5187 .get_drvinfo = nv_get_drvinfo, 5188 .get_link = ethtool_op_get_link, 5189 .get_wol = nv_get_wol, 5190 .set_wol = nv_set_wol, 5191 .get_settings = nv_get_settings, 5192 .set_settings = nv_set_settings, 5193 .get_regs_len = nv_get_regs_len, 5194 .get_regs = nv_get_regs, 5195 .nway_reset = nv_nway_reset, 5196 .set_tso = nv_set_tso, 5197 .get_ringparam = nv_get_ringparam, 5198 .set_ringparam = nv_set_ringparam, 5199 .get_pauseparam = nv_get_pauseparam, 5200 .set_pauseparam = nv_set_pauseparam, 5201 .get_rx_csum = nv_get_rx_csum, 5202 .set_rx_csum = nv_set_rx_csum, 5203 .set_tx_csum = nv_set_tx_csum, 5204 .set_sg = nv_set_sg, 5205 .get_strings = nv_get_strings, 5206 .get_ethtool_stats = nv_get_ethtool_stats, 5207 .get_sset_count = nv_get_sset_count, 5208 .self_test = nv_self_test, 5209}; 5210 5211static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 5212{ 5213 struct fe_priv *np = get_nvpriv(dev); 5214 5215 spin_lock_irq(&np->lock); 5216 5217 /* save vlan group */ 5218 np->vlangrp = grp; 5219 5220 if (grp) { 5221 /* enable vlan on MAC */ 5222 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; 5223 } else { 5224 /* disable vlan on MAC */ 5225 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; 5226 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; 5227 } 5228 5229 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5230 5231 spin_unlock_irq(&np->lock); 5232} 5233 5234/* The mgmt unit and driver use a semaphore to access the phy during init */ 5235static int nv_mgmt_acquire_sema(struct net_device *dev) 5236{ 5237 struct fe_priv *np = netdev_priv(dev); 5238 u8 __iomem *base = get_hwbase(dev); 5239 int i; 5240 u32 tx_ctrl, mgmt_sema; 5241 5242 for (i = 0; i < 10; i++) { 5243 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; 5244 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) 5245 break; 5246 msleep(500); 5247 } 5248 5249 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) 5250 return 0; 5251 5252 for (i = 0; i < 2; i++) { 5253 tx_ctrl = readl(base + NvRegTransmitterControl); 5254 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; 5255 writel(tx_ctrl, base + NvRegTransmitterControl); 5256 5257 /* verify that semaphore was acquired */ 5258 tx_ctrl = readl(base + NvRegTransmitterControl); 5259 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && 5260 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) { 5261 np->mgmt_sema = 1; 5262 return 1; 5263 } 5264 else 5265 udelay(50); 5266 } 5267 5268 return 0; 5269} 5270 5271static void nv_mgmt_release_sema(struct net_device *dev) 5272{ 5273 struct fe_priv *np = netdev_priv(dev); 5274 u8 __iomem *base = get_hwbase(dev); 5275 u32 tx_ctrl; 5276 5277 if (np->driver_data & DEV_HAS_MGMT_UNIT) { 5278 if (np->mgmt_sema) { 5279 tx_ctrl = readl(base + NvRegTransmitterControl); 5280 tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ; 5281 writel(tx_ctrl, base + NvRegTransmitterControl); 5282 } 5283 } 5284} 5285 5286 5287static int nv_mgmt_get_version(struct net_device *dev) 5288{ 5289 struct fe_priv *np = netdev_priv(dev); 5290 u8 __iomem *base = get_hwbase(dev); 5291 u32 data_ready = readl(base + NvRegTransmitterControl); 5292 u32 data_ready2 = 0; 5293 unsigned long start; 5294 int ready = 0; 5295 5296 writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion); 5297 writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl); 5298 start = jiffies; 5299 while (time_before(jiffies, start + 5*HZ)) { 5300 data_ready2 = readl(base + NvRegTransmitterControl); 5301 if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) { 5302 ready = 1; 5303 break; 5304 } 5305 schedule_timeout_uninterruptible(1); 5306 } 5307 5308 if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR)) 5309 return 0; 5310 5311 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION; 5312 5313 return 1; 5314} 5315 5316static int nv_open(struct net_device *dev) 5317{ 5318 struct fe_priv *np = netdev_priv(dev); 5319 u8 __iomem *base = get_hwbase(dev); 5320 int ret = 1; 5321 int oom, i; 5322 u32 low; 5323 5324 dprintk(KERN_DEBUG "nv_open: begin\n"); 5325 5326 /* power up phy */ 5327 mii_rw(dev, np->phyaddr, MII_BMCR, 5328 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); 5329 5330 /* erase previous misconfiguration */ 5331 if (np->driver_data & DEV_HAS_POWER_CNTRL) 5332 nv_mac_reset(dev); 5333 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5334 writel(0, base + NvRegMulticastAddrB); 5335 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5336 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5337 writel(0, base + NvRegPacketFilterFlags); 5338 5339 writel(0, base + NvRegTransmitterControl); 5340 writel(0, base + NvRegReceiverControl); 5341 5342 writel(0, base + NvRegAdapterControl); 5343 5344 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 5345 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 5346 5347 /* initialize descriptor rings */ 5348 set_bufsize(dev); 5349 oom = nv_init_ring(dev); 5350 5351 writel(0, base + NvRegLinkSpeed); 5352 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5353 nv_txrx_reset(dev); 5354 writel(0, base + NvRegUnknownSetupReg6); 5355 5356 np->in_shutdown = 0; 5357 5358 /* give hw rings */ 5359 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5360 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5361 base + NvRegRingSizes); 5362 5363 writel(np->linkspeed, base + NvRegLinkSpeed); 5364 if (np->desc_ver == DESC_VER_1) 5365 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 5366 else 5367 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); 5368 writel(np->txrxctl_bits, base + NvRegTxRxControl); 5369 writel(np->vlanctl_bits, base + NvRegVlanControl); 5370 pci_push(base); 5371 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 5372 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 5373 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 5374 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 5375 5376 writel(0, base + NvRegMIIMask); 5377 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5378 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5379 5380 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 5381 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 5382 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 5383 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5384 5385 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 5386 5387 get_random_bytes(&low, sizeof(low)); 5388 low &= NVREG_SLOTTIME_MASK; 5389 if (np->desc_ver == DESC_VER_1) { 5390 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime); 5391 } else { 5392 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) { 5393 /* setup legacy backoff */ 5394 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime); 5395 } else { 5396 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime); 5397 nv_gear_backoff_reseed(dev); 5398 } 5399 } 5400 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); 5401 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); 5402 if (poll_interval == -1) { 5403 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 5404 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 5405 else 5406 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 5407 } 5408 else 5409 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 5410 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5411 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 5412 base + NvRegAdapterControl); 5413 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); 5414 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); 5415 if (np->wolenabled) 5416 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 5417 5418 i = readl(base + NvRegPowerState); 5419 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 5420 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 5421 5422 pci_push(base); 5423 udelay(10); 5424 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 5425 5426 nv_disable_hw_interrupts(dev, np->irqmask); 5427 pci_push(base); 5428 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5429 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5430 pci_push(base); 5431 5432 if (nv_request_irq(dev, 0)) { 5433 goto out_drain; 5434 } 5435 5436 /* ask for interrupts */ 5437 nv_enable_hw_interrupts(dev, np->irqmask); 5438 5439 spin_lock_irq(&np->lock); 5440 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5441 writel(0, base + NvRegMulticastAddrB); 5442 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5443 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5444 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5445 /* One manual link speed update: Interrupts are enabled, future link 5446 * speed changes cause interrupts and are handled by nv_link_irq(). 5447 */ 5448 { 5449 u32 miistat; 5450 miistat = readl(base + NvRegMIIStatus); 5451 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5452 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); 5453 } 5454 /* set linkspeed to invalid value, thus force nv_update_linkspeed 5455 * to init hw */ 5456 np->linkspeed = 0; 5457 ret = nv_update_linkspeed(dev); 5458 nv_start_rxtx(dev); 5459 netif_start_queue(dev); 5460 nv_napi_enable(dev); 5461 5462 if (ret) { 5463 netif_carrier_on(dev); 5464 } else { 5465 printk(KERN_INFO "%s: no link during initialization.\n", dev->name); 5466 netif_carrier_off(dev); 5467 } 5468 if (oom) 5469 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5470 5471 /* start statistics timer */ 5472 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) 5473 mod_timer(&np->stats_poll, 5474 round_jiffies(jiffies + STATS_INTERVAL)); 5475 5476 spin_unlock_irq(&np->lock); 5477 5478 return 0; 5479out_drain: 5480 nv_drain_rxtx(dev); 5481 return ret; 5482} 5483 5484static int nv_close(struct net_device *dev) 5485{ 5486 struct fe_priv *np = netdev_priv(dev); 5487 u8 __iomem *base; 5488 5489 spin_lock_irq(&np->lock); 5490 np->in_shutdown = 1; 5491 spin_unlock_irq(&np->lock); 5492 nv_napi_disable(dev); 5493 synchronize_irq(np->pci_dev->irq); 5494 5495 del_timer_sync(&np->oom_kick); 5496 del_timer_sync(&np->nic_poll); 5497 del_timer_sync(&np->stats_poll); 5498 5499 netif_stop_queue(dev); 5500 spin_lock_irq(&np->lock); 5501 nv_stop_rxtx(dev); 5502 nv_txrx_reset(dev); 5503 5504 /* disable interrupts on the nic or we will lock up */ 5505 base = get_hwbase(dev); 5506 nv_disable_hw_interrupts(dev, np->irqmask); 5507 pci_push(base); 5508 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 5509 5510 spin_unlock_irq(&np->lock); 5511 5512 nv_free_irq(dev); 5513 5514 nv_drain_rxtx(dev); 5515 5516 if (np->wolenabled) { 5517 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5518 nv_start_rx(dev); 5519 } else { 5520 /* power down phy */ 5521 mii_rw(dev, np->phyaddr, MII_BMCR, 5522 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN); 5523 } 5524 5525 /* FIXME: power down nic */ 5526 5527 return 0; 5528} 5529 5530static const struct net_device_ops nv_netdev_ops = { 5531 .ndo_open = nv_open, 5532 .ndo_stop = nv_close, 5533 .ndo_get_stats = nv_get_stats, 5534 .ndo_start_xmit = nv_start_xmit, 5535 .ndo_tx_timeout = nv_tx_timeout, 5536 .ndo_change_mtu = nv_change_mtu, 5537 .ndo_validate_addr = eth_validate_addr, 5538 .ndo_set_mac_address = nv_set_mac_address, 5539 .ndo_set_multicast_list = nv_set_multicast, 5540 .ndo_vlan_rx_register = nv_vlan_rx_register, 5541#ifdef CONFIG_NET_POLL_CONTROLLER 5542 .ndo_poll_controller = nv_poll_controller, 5543#endif 5544}; 5545 5546static const struct net_device_ops nv_netdev_ops_optimized = { 5547 .ndo_open = nv_open, 5548 .ndo_stop = nv_close, 5549 .ndo_get_stats = nv_get_stats, 5550 .ndo_start_xmit = nv_start_xmit_optimized, 5551 .ndo_tx_timeout = nv_tx_timeout, 5552 .ndo_change_mtu = nv_change_mtu, 5553 .ndo_validate_addr = eth_validate_addr, 5554 .ndo_set_mac_address = nv_set_mac_address, 5555 .ndo_set_multicast_list = nv_set_multicast, 5556 .ndo_vlan_rx_register = nv_vlan_rx_register, 5557#ifdef CONFIG_NET_POLL_CONTROLLER 5558 .ndo_poll_controller = nv_poll_controller, 5559#endif 5560}; 5561 5562static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 5563{ 5564 struct net_device *dev; 5565 struct fe_priv *np; 5566 unsigned long addr; 5567 u8 __iomem *base; 5568 int err, i; 5569 u32 powerstate, txreg; 5570 u32 phystate_orig = 0, phystate; 5571 int phyinitialized = 0; 5572 static int printed_version; 5573 5574 if (!printed_version++) 5575 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet" 5576 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION); 5577 5578 dev = alloc_etherdev(sizeof(struct fe_priv)); 5579 err = -ENOMEM; 5580 if (!dev) 5581 goto out; 5582 5583 np = netdev_priv(dev); 5584 np->dev = dev; 5585 np->pci_dev = pci_dev; 5586 spin_lock_init(&np->lock); 5587 SET_NETDEV_DEV(dev, &pci_dev->dev); 5588 5589 init_timer(&np->oom_kick); 5590 np->oom_kick.data = (unsigned long) dev; 5591 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ 5592 init_timer(&np->nic_poll); 5593 np->nic_poll.data = (unsigned long) dev; 5594 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ 5595 init_timer(&np->stats_poll); 5596 np->stats_poll.data = (unsigned long) dev; 5597 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ 5598 5599 err = pci_enable_device(pci_dev); 5600 if (err) 5601 goto out_free; 5602 5603 pci_set_master(pci_dev); 5604 5605 err = pci_request_regions(pci_dev, DRV_NAME); 5606 if (err < 0) 5607 goto out_disable; 5608 5609 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) 5610 np->register_size = NV_PCI_REGSZ_VER3; 5611 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 5612 np->register_size = NV_PCI_REGSZ_VER2; 5613 else 5614 np->register_size = NV_PCI_REGSZ_VER1; 5615 5616 err = -EINVAL; 5617 addr = 0; 5618 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5619 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", 5620 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), 5621 pci_resource_len(pci_dev, i), 5622 pci_resource_flags(pci_dev, i)); 5623 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 5624 pci_resource_len(pci_dev, i) >= np->register_size) { 5625 addr = pci_resource_start(pci_dev, i); 5626 break; 5627 } 5628 } 5629 if (i == DEVICE_COUNT_RESOURCE) { 5630 dev_printk(KERN_INFO, &pci_dev->dev, 5631 "Couldn't find register window\n"); 5632 goto out_relreg; 5633 } 5634 5635 /* copy of driver data */ 5636 np->driver_data = id->driver_data; 5637 /* copy of device id */ 5638 np->device_id = id->device; 5639 5640 /* handle different descriptor versions */ 5641 if (id->driver_data & DEV_HAS_HIGH_DMA) { 5642 /* packet format 3: supports 40-bit addressing */ 5643 np->desc_ver = DESC_VER_3; 5644 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 5645 if (dma_64bit) { 5646 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39))) 5647 dev_printk(KERN_INFO, &pci_dev->dev, 5648 "64-bit DMA failed, using 32-bit addressing\n"); 5649 else 5650 dev->features |= NETIF_F_HIGHDMA; 5651 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) { 5652 dev_printk(KERN_INFO, &pci_dev->dev, 5653 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); 5654 } 5655 } 5656 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 5657 /* packet format 2: supports jumbo frames */ 5658 np->desc_ver = DESC_VER_2; 5659 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; 5660 } else { 5661 /* original packet format */ 5662 np->desc_ver = DESC_VER_1; 5663 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; 5664 } 5665 5666 np->pkt_limit = NV_PKTLIMIT_1; 5667 if (id->driver_data & DEV_HAS_LARGEDESC) 5668 np->pkt_limit = NV_PKTLIMIT_2; 5669 5670 if (id->driver_data & DEV_HAS_CHECKSUM) { 5671 np->rx_csum = 1; 5672 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5673 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 5674 dev->features |= NETIF_F_TSO; 5675 } 5676 5677 np->vlanctl_bits = 0; 5678 if (id->driver_data & DEV_HAS_VLAN) { 5679 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 5680 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; 5681 } 5682 5683 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 5684 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || 5685 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || 5686 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) { 5687 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 5688 } 5689 5690 5691 err = -ENOMEM; 5692 np->base = ioremap(addr, np->register_size); 5693 if (!np->base) 5694 goto out_relreg; 5695 dev->base_addr = (unsigned long)np->base; 5696 5697 dev->irq = pci_dev->irq; 5698 5699 np->rx_ring_size = RX_RING_DEFAULT; 5700 np->tx_ring_size = TX_RING_DEFAULT; 5701 5702 if (!nv_optimized(np)) { 5703 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 5704 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 5705 &np->ring_addr); 5706 if (!np->rx_ring.orig) 5707 goto out_unmap; 5708 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 5709 } else { 5710 np->rx_ring.ex = pci_alloc_consistent(pci_dev, 5711 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 5712 &np->ring_addr); 5713 if (!np->rx_ring.ex) 5714 goto out_unmap; 5715 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 5716 } 5717 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5718 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5719 if (!np->rx_skb || !np->tx_skb) 5720 goto out_freering; 5721 5722 if (!nv_optimized(np)) 5723 dev->netdev_ops = &nv_netdev_ops; 5724 else 5725 dev->netdev_ops = &nv_netdev_ops_optimized; 5726 5727#ifdef CONFIG_FORCEDETH_NAPI 5728 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5729#endif 5730 SET_ETHTOOL_OPS(dev, &ops); 5731 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5732 5733 pci_set_drvdata(pci_dev, dev); 5734 5735 /* read the mac address */ 5736 base = get_hwbase(dev); 5737 np->orig_mac[0] = readl(base + NvRegMacAddrA); 5738 np->orig_mac[1] = readl(base + NvRegMacAddrB); 5739 5740 /* check the workaround bit for correct mac address order */ 5741 txreg = readl(base + NvRegTransmitPoll); 5742 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) { 5743 /* mac address is already in correct order */ 5744 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5745 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5746 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5747 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5748 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5749 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5750 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { 5751 /* mac address is already in correct order */ 5752 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5753 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5754 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5755 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5756 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5757 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5758 /* 5759 * Set orig mac address back to the reversed version. 5760 * This flag will be cleared during low power transition. 5761 * Therefore, we should always put back the reversed address. 5762 */ 5763 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) + 5764 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24); 5765 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8); 5766 } else { 5767 /* need to reverse mac address to correct order */ 5768 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 5769 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 5770 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 5771 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 5772 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 5773 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 5774 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5775 printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n"); 5776 } 5777 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 5778 5779 if (!is_valid_ether_addr(dev->perm_addr)) { 5780 /* 5781 * Bad mac address. At least one bios sets the mac address 5782 * to 01:23:45:67:89:ab 5783 */ 5784 dev_printk(KERN_ERR, &pci_dev->dev, 5785 "Invalid Mac address detected: %pM\n", 5786 dev->dev_addr); 5787 dev_printk(KERN_ERR, &pci_dev->dev, 5788 "Please complain to your hardware vendor. Switching to a random MAC.\n"); 5789 dev->dev_addr[0] = 0x00; 5790 dev->dev_addr[1] = 0x00; 5791 dev->dev_addr[2] = 0x6c; 5792 get_random_bytes(&dev->dev_addr[3], 3); 5793 } 5794 5795 dprintk(KERN_DEBUG "%s: MAC Address %pM\n", 5796 pci_name(pci_dev), dev->dev_addr); 5797 5798 /* set mac address */ 5799 nv_copy_mac_to_hw(dev); 5800 5801 /* Workaround current PCI init glitch: wakeup bits aren't 5802 * being set from PCI PM capability. 5803 */ 5804 device_init_wakeup(&pci_dev->dev, 1); 5805 5806 /* disable WOL */ 5807 writel(0, base + NvRegWakeUpFlags); 5808 np->wolenabled = 0; 5809 5810 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5811 5812 /* take phy and nic out of low power mode */ 5813 powerstate = readl(base + NvRegPowerState2); 5814 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; 5815 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || 5816 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && 5817 pci_dev->revision >= 0xA3) 5818 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; 5819 writel(powerstate, base + NvRegPowerState2); 5820 } 5821 5822 if (np->desc_ver == DESC_VER_1) { 5823 np->tx_flags = NV_TX_VALID; 5824 } else { 5825 np->tx_flags = NV_TX2_VALID; 5826 } 5827 5828 np->msi_flags = 0; 5829 if ((id->driver_data & DEV_HAS_MSI) && msi) { 5830 np->msi_flags |= NV_MSI_CAPABLE; 5831 } 5832 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 5833 /* msix has had reported issues when modifying irqmask 5834 as in the case of napi, therefore, disable for now 5835 */ 5836#ifndef CONFIG_FORCEDETH_NAPI 5837 np->msi_flags |= NV_MSI_X_CAPABLE; 5838#endif 5839 } 5840 5841 if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) { 5842 np->irqmask = NVREG_IRQMASK_CPU; 5843 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5844 np->msi_flags |= 0x0001; 5845 } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC && 5846 !(id->driver_data & DEV_NEED_TIMERIRQ)) { 5847 /* start off in throughput mode */ 5848 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5849 /* remove support for msix mode */ 5850 np->msi_flags &= ~NV_MSI_X_CAPABLE; 5851 } else { 5852 optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; 5853 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5854 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5855 np->msi_flags |= 0x0003; 5856 } 5857 5858 if (id->driver_data & DEV_NEED_TIMERIRQ) 5859 np->irqmask |= NVREG_IRQ_TIMER; 5860 if (id->driver_data & DEV_NEED_LINKTIMER) { 5861 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); 5862 np->need_linktimer = 1; 5863 np->link_timeout = jiffies + LINK_TIMEOUT; 5864 } else { 5865 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); 5866 np->need_linktimer = 0; 5867 } 5868 5869 /* Limit the number of tx's outstanding for hw bug */ 5870 if (id->driver_data & DEV_NEED_TX_LIMIT) { 5871 np->tx_limit = 1; 5872 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_32 || 5873 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_33 || 5874 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_34 || 5875 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_35 || 5876 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_36 || 5877 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_37 || 5878 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_38 || 5879 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_39) && 5880 pci_dev->revision >= 0xA2) 5881 np->tx_limit = 0; 5882 } 5883 5884 /* clear phy state and temporarily halt phy interrupts */ 5885 writel(0, base + NvRegMIIMask); 5886 phystate = readl(base + NvRegAdapterControl); 5887 if (phystate & NVREG_ADAPTCTL_RUNNING) { 5888 phystate_orig = 1; 5889 phystate &= ~NVREG_ADAPTCTL_RUNNING; 5890 writel(phystate, base + NvRegAdapterControl); 5891 } 5892 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5893 5894 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 5895 /* management unit running on the mac? */ 5896 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) && 5897 (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) && 5898 nv_mgmt_acquire_sema(dev) && 5899 nv_mgmt_get_version(dev)) { 5900 np->mac_in_use = 1; 5901 if (np->mgmt_version > 0) { 5902 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; 5903 } 5904 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", 5905 pci_name(pci_dev), np->mac_in_use); 5906 /* management unit setup the phy already? */ 5907 if (np->mac_in_use && 5908 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 5909 NVREG_XMITCTL_SYNC_PHY_INIT)) { 5910 /* phy is inited by mgmt unit */ 5911 phyinitialized = 1; 5912 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", 5913 pci_name(pci_dev)); 5914 } else { 5915 /* we need to init the phy */ 5916 } 5917 } 5918 } 5919 5920 /* find a suitable phy */ 5921 for (i = 1; i <= 32; i++) { 5922 int id1, id2; 5923 int phyaddr = i & 0x1F; 5924 5925 spin_lock_irq(&np->lock); 5926 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); 5927 spin_unlock_irq(&np->lock); 5928 if (id1 < 0 || id1 == 0xffff) 5929 continue; 5930 spin_lock_irq(&np->lock); 5931 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); 5932 spin_unlock_irq(&np->lock); 5933 if (id2 < 0 || id2 == 0xffff) 5934 continue; 5935 5936 np->phy_model = id2 & PHYID2_MODEL_MASK; 5937 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 5938 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 5939 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 5940 pci_name(pci_dev), id1, id2, phyaddr); 5941 np->phyaddr = phyaddr; 5942 np->phy_oui = id1 | id2; 5943 5944 /* Realtek hardcoded phy id1 to all zero's on certain phys */ 5945 if (np->phy_oui == PHY_OUI_REALTEK2) 5946 np->phy_oui = PHY_OUI_REALTEK; 5947 /* Setup phy revision for Realtek */ 5948 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211) 5949 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK; 5950 5951 break; 5952 } 5953 if (i == 33) { 5954 dev_printk(KERN_INFO, &pci_dev->dev, 5955 "open: Could not find a valid PHY.\n"); 5956 goto out_error; 5957 } 5958 5959 if (!phyinitialized) { 5960 /* reset it */ 5961 phy_init(dev); 5962 } else { 5963 /* see if it is a gigabit phy */ 5964 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 5965 if (mii_status & PHY_GIGABIT) { 5966 np->gigabit = PHY_GIGABIT; 5967 } 5968 } 5969 5970 /* set default link speed settings */ 5971 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 5972 np->duplex = 0; 5973 np->autoneg = 1; 5974 5975 err = register_netdev(dev); 5976 if (err) { 5977 dev_printk(KERN_INFO, &pci_dev->dev, 5978 "unable to register netdev: %d\n", err); 5979 goto out_error; 5980 } 5981 5982 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, " 5983 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 5984 dev->name, 5985 np->phy_oui, 5986 np->phyaddr, 5987 dev->dev_addr[0], 5988 dev->dev_addr[1], 5989 dev->dev_addr[2], 5990 dev->dev_addr[3], 5991 dev->dev_addr[4], 5992 dev->dev_addr[5]); 5993 5994 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 5995 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5996 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? 5997 "csum " : "", 5998 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 5999 "vlan " : "", 6000 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", 6001 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", 6002 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", 6003 np->gigabit == PHY_GIGABIT ? "gbit " : "", 6004 np->need_linktimer ? "lnktim " : "", 6005 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", 6006 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", 6007 np->desc_ver); 6008 6009 return 0; 6010 6011out_error: 6012 if (phystate_orig) 6013 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 6014 pci_set_drvdata(pci_dev, NULL); 6015out_freering: 6016 free_rings(dev); 6017out_unmap: 6018 iounmap(get_hwbase(dev)); 6019out_relreg: 6020 pci_release_regions(pci_dev); 6021out_disable: 6022 pci_disable_device(pci_dev); 6023out_free: 6024 free_netdev(dev); 6025out: 6026 return err; 6027} 6028 6029static void nv_restore_phy(struct net_device *dev) 6030{ 6031 struct fe_priv *np = netdev_priv(dev); 6032 u16 phy_reserved, mii_control; 6033 6034 if (np->phy_oui == PHY_OUI_REALTEK && 6035 np->phy_model == PHY_MODEL_REALTEK_8201 && 6036 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 6037 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3); 6038 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); 6039 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 6040 phy_reserved |= PHY_REALTEK_INIT8; 6041 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved); 6042 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1); 6043 6044 /* restart auto negotiation */ 6045 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 6046 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 6047 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control); 6048 } 6049} 6050 6051static void nv_restore_mac_addr(struct pci_dev *pci_dev) 6052{ 6053 struct net_device *dev = pci_get_drvdata(pci_dev); 6054 struct fe_priv *np = netdev_priv(dev); 6055 u8 __iomem *base = get_hwbase(dev); 6056 6057 /* special op: write back the misordered MAC address - otherwise 6058 * the next nv_probe would see a wrong address. 6059 */ 6060 writel(np->orig_mac[0], base + NvRegMacAddrA); 6061 writel(np->orig_mac[1], base + NvRegMacAddrB); 6062 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV, 6063 base + NvRegTransmitPoll); 6064} 6065 6066static void __devexit nv_remove(struct pci_dev *pci_dev) 6067{ 6068 struct net_device *dev = pci_get_drvdata(pci_dev); 6069 6070 unregister_netdev(dev); 6071 6072 nv_restore_mac_addr(pci_dev); 6073 6074 /* restore any phy related changes */ 6075 nv_restore_phy(dev); 6076 6077 nv_mgmt_release_sema(dev); 6078 6079 /* free all structures */ 6080 free_rings(dev); 6081 iounmap(get_hwbase(dev)); 6082 pci_release_regions(pci_dev); 6083 pci_disable_device(pci_dev); 6084 free_netdev(dev); 6085 pci_set_drvdata(pci_dev, NULL); 6086} 6087 6088#ifdef CONFIG_PM 6089static int nv_suspend(struct pci_dev *pdev, pm_message_t state) 6090{ 6091 struct net_device *dev = pci_get_drvdata(pdev); 6092 struct fe_priv *np = netdev_priv(dev); 6093 u8 __iomem *base = get_hwbase(dev); 6094 int i; 6095 6096 if (netif_running(dev)) { 6097 // Gross. 6098 nv_close(dev); 6099 } 6100 netif_device_detach(dev); 6101 6102 /* save non-pci configuration space */ 6103 for (i = 0;i <= np->register_size/sizeof(u32); i++) 6104 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 6105 6106 pci_save_state(pdev); 6107 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); 6108 pci_disable_device(pdev); 6109 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 6110 return 0; 6111} 6112 6113static int nv_resume(struct pci_dev *pdev) 6114{ 6115 struct net_device *dev = pci_get_drvdata(pdev); 6116 struct fe_priv *np = netdev_priv(dev); 6117 u8 __iomem *base = get_hwbase(dev); 6118 int i, rc = 0; 6119 6120 pci_set_power_state(pdev, PCI_D0); 6121 pci_restore_state(pdev); 6122 /* ack any pending wake events, disable PME */ 6123 pci_enable_wake(pdev, PCI_D0, 0); 6124 6125 /* restore non-pci configuration space */ 6126 for (i = 0;i <= np->register_size/sizeof(u32); i++) 6127 writel(np->saved_config_space[i], base+i*sizeof(u32)); 6128 6129 pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE); 6130 6131 /* restore phy state, including autoneg */ 6132 phy_init(dev); 6133 6134 netif_device_attach(dev); 6135 if (netif_running(dev)) { 6136 rc = nv_open(dev); 6137 nv_set_multicast(dev); 6138 } 6139 return rc; 6140} 6141 6142static void nv_shutdown(struct pci_dev *pdev) 6143{ 6144 struct net_device *dev = pci_get_drvdata(pdev); 6145 struct fe_priv *np = netdev_priv(dev); 6146 6147 if (netif_running(dev)) 6148 nv_close(dev); 6149 6150 /* 6151 * Restore the MAC so a kernel started by kexec won't get confused. 6152 * If we really go for poweroff, we must not restore the MAC, 6153 * otherwise the MAC for WOL will be reversed at least on some boards. 6154 */ 6155 if (system_state != SYSTEM_POWER_OFF) { 6156 nv_restore_mac_addr(pdev); 6157 } 6158 6159 pci_disable_device(pdev); 6160 /* 6161 * Apparently it is not possible to reinitialise from D3 hot, 6162 * only put the device into D3 if we really go for poweroff. 6163 */ 6164 if (system_state == SYSTEM_POWER_OFF) { 6165 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled)) 6166 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled); 6167 pci_set_power_state(pdev, PCI_D3hot); 6168 } 6169} 6170#else 6171#define nv_suspend NULL 6172#define nv_shutdown NULL 6173#define nv_resume NULL 6174#endif /* CONFIG_PM */ 6175 6176static struct pci_device_id pci_tbl[] = { 6177 { /* nForce Ethernet Controller */ 6178 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), 6179 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6180 }, 6181 { /* nForce2 Ethernet Controller */ 6182 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), 6183 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6184 }, 6185 { /* nForce3 Ethernet Controller */ 6186 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), 6187 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6188 }, 6189 { /* nForce3 Ethernet Controller */ 6190 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), 6191 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6192 }, 6193 { /* nForce3 Ethernet Controller */ 6194 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), 6195 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6196 }, 6197 { /* nForce3 Ethernet Controller */ 6198 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), 6199 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6200 }, 6201 { /* nForce3 Ethernet Controller */ 6202 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), 6203 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6204 }, 6205 { /* CK804 Ethernet Controller */ 6206 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), 6207 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6208 }, 6209 { /* CK804 Ethernet Controller */ 6210 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), 6211 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6212 }, 6213 { /* MCP04 Ethernet Controller */ 6214 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), 6215 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6216 }, 6217 { /* MCP04 Ethernet Controller */ 6218 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), 6219 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6220 }, 6221 { /* MCP51 Ethernet Controller */ 6222 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 6223 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, 6224 }, 6225 { /* MCP51 Ethernet Controller */ 6226 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 6227 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, 6228 }, 6229 { /* MCP55 Ethernet Controller */ 6230 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 6231 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT, 6232 }, 6233 { /* MCP55 Ethernet Controller */ 6234 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 6235 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT, 6236 }, 6237 { /* MCP61 Ethernet Controller */ 6238 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 6239 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 6240 }, 6241 { /* MCP61 Ethernet Controller */ 6242 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 6243 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 6244 }, 6245 { /* MCP61 Ethernet Controller */ 6246 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 6247 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 6248 }, 6249 { /* MCP61 Ethernet Controller */ 6250 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 6251 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 6252 }, 6253 { /* MCP65 Ethernet Controller */ 6254 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 6255 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6256 }, 6257 { /* MCP65 Ethernet Controller */ 6258 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 6259 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6260 }, 6261 { /* MCP65 Ethernet Controller */ 6262 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 6263 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6264 }, 6265 { /* MCP65 Ethernet Controller */ 6266 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 6267 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6268 }, 6269 { /* MCP67 Ethernet Controller */ 6270 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 6271 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 6272 }, 6273 { /* MCP67 Ethernet Controller */ 6274 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 6275 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 6276 }, 6277 { /* MCP67 Ethernet Controller */ 6278 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 6279 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 6280 }, 6281 { /* MCP67 Ethernet Controller */ 6282 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 6283 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 6284 }, 6285 { /* MCP73 Ethernet Controller */ 6286 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28), 6287 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 6288 }, 6289 { /* MCP73 Ethernet Controller */ 6290 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29), 6291 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 6292 }, 6293 { /* MCP73 Ethernet Controller */ 6294 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30), 6295 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 6296 }, 6297 { /* MCP73 Ethernet Controller */ 6298 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), 6299 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 6300 }, 6301 { /* MCP77 Ethernet Controller */ 6302 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 6303 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6304 }, 6305 { /* MCP77 Ethernet Controller */ 6306 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 6307 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6308 }, 6309 { /* MCP77 Ethernet Controller */ 6310 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 6311 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6312 }, 6313 { /* MCP77 Ethernet Controller */ 6314 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 6315 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6316 }, 6317 { /* MCP79 Ethernet Controller */ 6318 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 6319 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6320 }, 6321 { /* MCP79 Ethernet Controller */ 6322 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 6323 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6324 }, 6325 { /* MCP79 Ethernet Controller */ 6326 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 6327 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6328 }, 6329 { /* MCP79 Ethernet Controller */ 6330 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 6331 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6332 }, 6333 {0,}, 6334}; 6335 6336static struct pci_driver driver = { 6337 .name = DRV_NAME, 6338 .id_table = pci_tbl, 6339 .probe = nv_probe, 6340 .remove = __devexit_p(nv_remove), 6341 .suspend = nv_suspend, 6342 .resume = nv_resume, 6343 .shutdown = nv_shutdown, 6344}; 6345 6346static int __init init_nic(void) 6347{ 6348 return pci_register_driver(&driver); 6349} 6350 6351static void __exit exit_nic(void) 6352{ 6353 pci_unregister_driver(&driver); 6354} 6355 6356module_param(max_interrupt_work, int, 0); 6357MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 6358module_param(optimization_mode, int, 0); 6359MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load."); 6360module_param(poll_interval, int, 0); 6361MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 6362module_param(msi, int, 0); 6363MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); 6364module_param(msix, int, 0); 6365MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); 6366module_param(dma_64bit, int, 0); 6367MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 6368module_param(phy_cross, int, 0); 6369MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); 6370 6371MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 6372MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 6373MODULE_LICENSE("GPL"); 6374 6375MODULE_DEVICE_TABLE(pci, pci_tbl); 6376 6377module_init(init_nic); 6378module_exit(exit_nic);