Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.34 6408 lines 197 kB view raw
1/* 2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. 3 * 4 * Note: This driver is a cleanroom reimplementation based on reverse 5 * engineered documentation written by Carl-Daniel Hailfinger 6 * and Andrew de Quincey. 7 * 8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered 9 * trademarks of NVIDIA Corporation in the United States and other 10 * countries. 11 * 12 * Copyright (C) 2003,4,5 Manfred Spraul 13 * Copyright (C) 2004 Andrew de Quincey (wol support) 14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 15 * IRQ rate fixes, bigendian fixes, cleanups, verification) 16 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation 17 * 18 * This program is free software; you can redistribute it and/or modify 19 * it under the terms of the GNU General Public License as published by 20 * the Free Software Foundation; either version 2 of the License, or 21 * (at your option) any later version. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, write to the Free Software 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 31 * 32 * Known bugs: 33 * We suspect that on some hardware no TX done interrupts are generated. 34 * This means recovery from netif_stop_queue only happens if the hw timer 35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) 36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance. 37 * If your hardware reliably generates tx done interrupts, then you can remove 38 * DEV_NEED_TIMERIRQ from the driver_data flags. 39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 40 * superfluous timer interrupts from the nic. 41 */ 42#define FORCEDETH_VERSION "0.64" 43#define DRV_NAME "forcedeth" 44 45#include <linux/module.h> 46#include <linux/types.h> 47#include <linux/pci.h> 48#include <linux/interrupt.h> 49#include <linux/netdevice.h> 50#include <linux/etherdevice.h> 51#include <linux/delay.h> 52#include <linux/sched.h> 53#include <linux/spinlock.h> 54#include <linux/ethtool.h> 55#include <linux/timer.h> 56#include <linux/skbuff.h> 57#include <linux/mii.h> 58#include <linux/random.h> 59#include <linux/init.h> 60#include <linux/if_vlan.h> 61#include <linux/dma-mapping.h> 62#include <linux/slab.h> 63 64#include <asm/irq.h> 65#include <asm/io.h> 66#include <asm/uaccess.h> 67#include <asm/system.h> 68 69#if 0 70#define dprintk printk 71#else 72#define dprintk(x...) do { } while (0) 73#endif 74 75#define TX_WORK_PER_LOOP 64 76#define RX_WORK_PER_LOOP 64 77 78/* 79 * Hardware access: 80 */ 81 82#define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */ 83#define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */ 84#define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */ 85#define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */ 86#define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */ 87#define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */ 88#define DEV_HAS_MSI 0x0000040 /* device supports MSI */ 89#define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */ 90#define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */ 91#define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */ 92#define DEV_HAS_STATISTICS_V2 0x0000600 /* device supports hw statistics version 2 */ 93#define DEV_HAS_STATISTICS_V3 0x0000e00 /* device supports hw statistics version 3 */ 94#define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */ 95#define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */ 96#define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */ 97#define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */ 98#define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */ 99#define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */ 100#define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */ 101#define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */ 102#define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */ 103#define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */ 104#define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */ 105#define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */ 106#define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */ 107 108enum { 109 NvRegIrqStatus = 0x000, 110#define NVREG_IRQSTAT_MIIEVENT 0x040 111#define NVREG_IRQSTAT_MASK 0x83ff 112 NvRegIrqMask = 0x004, 113#define NVREG_IRQ_RX_ERROR 0x0001 114#define NVREG_IRQ_RX 0x0002 115#define NVREG_IRQ_RX_NOBUF 0x0004 116#define NVREG_IRQ_TX_ERR 0x0008 117#define NVREG_IRQ_TX_OK 0x0010 118#define NVREG_IRQ_TIMER 0x0020 119#define NVREG_IRQ_LINK 0x0040 120#define NVREG_IRQ_RX_FORCED 0x0080 121#define NVREG_IRQ_TX_FORCED 0x0100 122#define NVREG_IRQ_RECOVER_ERROR 0x8200 123#define NVREG_IRQMASK_THROUGHPUT 0x00df 124#define NVREG_IRQMASK_CPU 0x0060 125#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) 126#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) 127#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) 128 129 NvRegUnknownSetupReg6 = 0x008, 130#define NVREG_UNKSETUP6_VAL 3 131 132/* 133 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic 134 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 135 */ 136 NvRegPollingInterval = 0x00c, 137#define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */ 138#define NVREG_POLL_DEFAULT_CPU 13 139 NvRegMSIMap0 = 0x020, 140 NvRegMSIMap1 = 0x024, 141 NvRegMSIIrqMask = 0x030, 142#define NVREG_MSI_VECTOR_0_ENABLED 0x01 143 NvRegMisc1 = 0x080, 144#define NVREG_MISC1_PAUSE_TX 0x01 145#define NVREG_MISC1_HD 0x02 146#define NVREG_MISC1_FORCE 0x3b0f3c 147 148 NvRegMacReset = 0x34, 149#define NVREG_MAC_RESET_ASSERT 0x0F3 150 NvRegTransmitterControl = 0x084, 151#define NVREG_XMITCTL_START 0x01 152#define NVREG_XMITCTL_MGMT_ST 0x40000000 153#define NVREG_XMITCTL_SYNC_MASK 0x000f0000 154#define NVREG_XMITCTL_SYNC_NOT_READY 0x0 155#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 156#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 157#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 158#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 159#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 160#define NVREG_XMITCTL_HOST_LOADED 0x00004000 161#define NVREG_XMITCTL_TX_PATH_EN 0x01000000 162#define NVREG_XMITCTL_DATA_START 0x00100000 163#define NVREG_XMITCTL_DATA_READY 0x00010000 164#define NVREG_XMITCTL_DATA_ERROR 0x00020000 165 NvRegTransmitterStatus = 0x088, 166#define NVREG_XMITSTAT_BUSY 0x01 167 168 NvRegPacketFilterFlags = 0x8c, 169#define NVREG_PFF_PAUSE_RX 0x08 170#define NVREG_PFF_ALWAYS 0x7F0000 171#define NVREG_PFF_PROMISC 0x80 172#define NVREG_PFF_MYADDR 0x20 173#define NVREG_PFF_LOOPBACK 0x10 174 175 NvRegOffloadConfig = 0x90, 176#define NVREG_OFFLOAD_HOMEPHY 0x601 177#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 178 NvRegReceiverControl = 0x094, 179#define NVREG_RCVCTL_START 0x01 180#define NVREG_RCVCTL_RX_PATH_EN 0x01000000 181 NvRegReceiverStatus = 0x98, 182#define NVREG_RCVSTAT_BUSY 0x01 183 184 NvRegSlotTime = 0x9c, 185#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 186#define NVREG_SLOTTIME_10_100_FULL 0x00007f00 187#define NVREG_SLOTTIME_1000_FULL 0x0003ff00 188#define NVREG_SLOTTIME_HALF 0x0000ff00 189#define NVREG_SLOTTIME_DEFAULT 0x00007f00 190#define NVREG_SLOTTIME_MASK 0x000000ff 191 192 NvRegTxDeferral = 0xA0, 193#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 194#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 195#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 196#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f 197#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f 198#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000 199 NvRegRxDeferral = 0xA4, 200#define NVREG_RX_DEFERRAL_DEFAULT 0x16 201 NvRegMacAddrA = 0xA8, 202 NvRegMacAddrB = 0xAC, 203 NvRegMulticastAddrA = 0xB0, 204#define NVREG_MCASTADDRA_FORCE 0x01 205 NvRegMulticastAddrB = 0xB4, 206 NvRegMulticastMaskA = 0xB8, 207#define NVREG_MCASTMASKA_NONE 0xffffffff 208 NvRegMulticastMaskB = 0xBC, 209#define NVREG_MCASTMASKB_NONE 0xffff 210 211 NvRegPhyInterface = 0xC0, 212#define PHY_RGMII 0x10000000 213 NvRegBackOffControl = 0xC4, 214#define NVREG_BKOFFCTRL_DEFAULT 0x70000000 215#define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff 216#define NVREG_BKOFFCTRL_SELECT 24 217#define NVREG_BKOFFCTRL_GEAR 12 218 219 NvRegTxRingPhysAddr = 0x100, 220 NvRegRxRingPhysAddr = 0x104, 221 NvRegRingSizes = 0x108, 222#define NVREG_RINGSZ_TXSHIFT 0 223#define NVREG_RINGSZ_RXSHIFT 16 224 NvRegTransmitPoll = 0x10c, 225#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 226 NvRegLinkSpeed = 0x110, 227#define NVREG_LINKSPEED_FORCE 0x10000 228#define NVREG_LINKSPEED_10 1000 229#define NVREG_LINKSPEED_100 100 230#define NVREG_LINKSPEED_1000 50 231#define NVREG_LINKSPEED_MASK (0xFFF) 232 NvRegUnknownSetupReg5 = 0x130, 233#define NVREG_UNKSETUP5_BIT31 (1<<31) 234 NvRegTxWatermark = 0x13c, 235#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 236#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 237#define NVREG_TX_WM_DESC2_3_1000 0xfe08000 238 NvRegTxRxControl = 0x144, 239#define NVREG_TXRXCTL_KICK 0x0001 240#define NVREG_TXRXCTL_BIT1 0x0002 241#define NVREG_TXRXCTL_BIT2 0x0004 242#define NVREG_TXRXCTL_IDLE 0x0008 243#define NVREG_TXRXCTL_RESET 0x0010 244#define NVREG_TXRXCTL_RXCHECK 0x0400 245#define NVREG_TXRXCTL_DESC_1 0 246#define NVREG_TXRXCTL_DESC_2 0x002100 247#define NVREG_TXRXCTL_DESC_3 0xc02200 248#define NVREG_TXRXCTL_VLANSTRIP 0x00040 249#define NVREG_TXRXCTL_VLANINS 0x00080 250 NvRegTxRingPhysAddrHigh = 0x148, 251 NvRegRxRingPhysAddrHigh = 0x14C, 252 NvRegTxPauseFrame = 0x170, 253#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080 254#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 255#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 256#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 257 NvRegTxPauseFrameLimit = 0x174, 258#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000 259 NvRegMIIStatus = 0x180, 260#define NVREG_MIISTAT_ERROR 0x0001 261#define NVREG_MIISTAT_LINKCHANGE 0x0008 262#define NVREG_MIISTAT_MASK_RW 0x0007 263#define NVREG_MIISTAT_MASK_ALL 0x000f 264 NvRegMIIMask = 0x184, 265#define NVREG_MII_LINKCHANGE 0x0008 266 267 NvRegAdapterControl = 0x188, 268#define NVREG_ADAPTCTL_START 0x02 269#define NVREG_ADAPTCTL_LINKUP 0x04 270#define NVREG_ADAPTCTL_PHYVALID 0x40000 271#define NVREG_ADAPTCTL_RUNNING 0x100000 272#define NVREG_ADAPTCTL_PHYSHIFT 24 273 NvRegMIISpeed = 0x18c, 274#define NVREG_MIISPEED_BIT8 (1<<8) 275#define NVREG_MIIDELAY 5 276 NvRegMIIControl = 0x190, 277#define NVREG_MIICTL_INUSE 0x08000 278#define NVREG_MIICTL_WRITE 0x00400 279#define NVREG_MIICTL_ADDRSHIFT 5 280 NvRegMIIData = 0x194, 281 NvRegTxUnicast = 0x1a0, 282 NvRegTxMulticast = 0x1a4, 283 NvRegTxBroadcast = 0x1a8, 284 NvRegWakeUpFlags = 0x200, 285#define NVREG_WAKEUPFLAGS_VAL 0x7770 286#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 287#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 288#define NVREG_WAKEUPFLAGS_D3SHIFT 12 289#define NVREG_WAKEUPFLAGS_D2SHIFT 8 290#define NVREG_WAKEUPFLAGS_D1SHIFT 4 291#define NVREG_WAKEUPFLAGS_D0SHIFT 0 292#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 293#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 294#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 295#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 296 297 NvRegMgmtUnitGetVersion = 0x204, 298#define NVREG_MGMTUNITGETVERSION 0x01 299 NvRegMgmtUnitVersion = 0x208, 300#define NVREG_MGMTUNITVERSION 0x08 301 NvRegPowerCap = 0x268, 302#define NVREG_POWERCAP_D3SUPP (1<<30) 303#define NVREG_POWERCAP_D2SUPP (1<<26) 304#define NVREG_POWERCAP_D1SUPP (1<<25) 305 NvRegPowerState = 0x26c, 306#define NVREG_POWERSTATE_POWEREDUP 0x8000 307#define NVREG_POWERSTATE_VALID 0x0100 308#define NVREG_POWERSTATE_MASK 0x0003 309#define NVREG_POWERSTATE_D0 0x0000 310#define NVREG_POWERSTATE_D1 0x0001 311#define NVREG_POWERSTATE_D2 0x0002 312#define NVREG_POWERSTATE_D3 0x0003 313 NvRegMgmtUnitControl = 0x278, 314#define NVREG_MGMTUNITCONTROL_INUSE 0x20000 315 NvRegTxCnt = 0x280, 316 NvRegTxZeroReXmt = 0x284, 317 NvRegTxOneReXmt = 0x288, 318 NvRegTxManyReXmt = 0x28c, 319 NvRegTxLateCol = 0x290, 320 NvRegTxUnderflow = 0x294, 321 NvRegTxLossCarrier = 0x298, 322 NvRegTxExcessDef = 0x29c, 323 NvRegTxRetryErr = 0x2a0, 324 NvRegRxFrameErr = 0x2a4, 325 NvRegRxExtraByte = 0x2a8, 326 NvRegRxLateCol = 0x2ac, 327 NvRegRxRunt = 0x2b0, 328 NvRegRxFrameTooLong = 0x2b4, 329 NvRegRxOverflow = 0x2b8, 330 NvRegRxFCSErr = 0x2bc, 331 NvRegRxFrameAlignErr = 0x2c0, 332 NvRegRxLenErr = 0x2c4, 333 NvRegRxUnicast = 0x2c8, 334 NvRegRxMulticast = 0x2cc, 335 NvRegRxBroadcast = 0x2d0, 336 NvRegTxDef = 0x2d4, 337 NvRegTxFrame = 0x2d8, 338 NvRegRxCnt = 0x2dc, 339 NvRegTxPause = 0x2e0, 340 NvRegRxPause = 0x2e4, 341 NvRegRxDropFrame = 0x2e8, 342 NvRegVlanControl = 0x300, 343#define NVREG_VLANCONTROL_ENABLE 0x2000 344 NvRegMSIXMap0 = 0x3e0, 345 NvRegMSIXMap1 = 0x3e4, 346 NvRegMSIXIrqStatus = 0x3f0, 347 348 NvRegPowerState2 = 0x600, 349#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15 350#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 351#define NVREG_POWERSTATE2_PHY_RESET 0x0004 352#define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00 353}; 354 355/* Big endian: should work, but is untested */ 356struct ring_desc { 357 __le32 buf; 358 __le32 flaglen; 359}; 360 361struct ring_desc_ex { 362 __le32 bufhigh; 363 __le32 buflow; 364 __le32 txvlan; 365 __le32 flaglen; 366}; 367 368union ring_type { 369 struct ring_desc* orig; 370 struct ring_desc_ex* ex; 371}; 372 373#define FLAG_MASK_V1 0xffff0000 374#define FLAG_MASK_V2 0xffffc000 375#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 376#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) 377 378#define NV_TX_LASTPACKET (1<<16) 379#define NV_TX_RETRYERROR (1<<19) 380#define NV_TX_RETRYCOUNT_MASK (0xF<<20) 381#define NV_TX_FORCED_INTERRUPT (1<<24) 382#define NV_TX_DEFERRED (1<<26) 383#define NV_TX_CARRIERLOST (1<<27) 384#define NV_TX_LATECOLLISION (1<<28) 385#define NV_TX_UNDERFLOW (1<<29) 386#define NV_TX_ERROR (1<<30) 387#define NV_TX_VALID (1<<31) 388 389#define NV_TX2_LASTPACKET (1<<29) 390#define NV_TX2_RETRYERROR (1<<18) 391#define NV_TX2_RETRYCOUNT_MASK (0xF<<19) 392#define NV_TX2_FORCED_INTERRUPT (1<<30) 393#define NV_TX2_DEFERRED (1<<25) 394#define NV_TX2_CARRIERLOST (1<<26) 395#define NV_TX2_LATECOLLISION (1<<27) 396#define NV_TX2_UNDERFLOW (1<<28) 397/* error and valid are the same for both */ 398#define NV_TX2_ERROR (1<<30) 399#define NV_TX2_VALID (1<<31) 400#define NV_TX2_TSO (1<<28) 401#define NV_TX2_TSO_SHIFT 14 402#define NV_TX2_TSO_MAX_SHIFT 14 403#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) 404#define NV_TX2_CHECKSUM_L3 (1<<27) 405#define NV_TX2_CHECKSUM_L4 (1<<26) 406 407#define NV_TX3_VLAN_TAG_PRESENT (1<<18) 408 409#define NV_RX_DESCRIPTORVALID (1<<16) 410#define NV_RX_MISSEDFRAME (1<<17) 411#define NV_RX_SUBSTRACT1 (1<<18) 412#define NV_RX_ERROR1 (1<<23) 413#define NV_RX_ERROR2 (1<<24) 414#define NV_RX_ERROR3 (1<<25) 415#define NV_RX_ERROR4 (1<<26) 416#define NV_RX_CRCERR (1<<27) 417#define NV_RX_OVERFLOW (1<<28) 418#define NV_RX_FRAMINGERR (1<<29) 419#define NV_RX_ERROR (1<<30) 420#define NV_RX_AVAIL (1<<31) 421#define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR) 422 423#define NV_RX2_CHECKSUMMASK (0x1C000000) 424#define NV_RX2_CHECKSUM_IP (0x10000000) 425#define NV_RX2_CHECKSUM_IP_TCP (0x14000000) 426#define NV_RX2_CHECKSUM_IP_UDP (0x18000000) 427#define NV_RX2_DESCRIPTORVALID (1<<29) 428#define NV_RX2_SUBSTRACT1 (1<<25) 429#define NV_RX2_ERROR1 (1<<18) 430#define NV_RX2_ERROR2 (1<<19) 431#define NV_RX2_ERROR3 (1<<20) 432#define NV_RX2_ERROR4 (1<<21) 433#define NV_RX2_CRCERR (1<<22) 434#define NV_RX2_OVERFLOW (1<<23) 435#define NV_RX2_FRAMINGERR (1<<24) 436/* error and avail are the same for both */ 437#define NV_RX2_ERROR (1<<30) 438#define NV_RX2_AVAIL (1<<31) 439#define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR) 440 441#define NV_RX3_VLAN_TAG_PRESENT (1<<16) 442#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 443 444/* Miscelaneous hardware related defines: */ 445#define NV_PCI_REGSZ_VER1 0x270 446#define NV_PCI_REGSZ_VER2 0x2d4 447#define NV_PCI_REGSZ_VER3 0x604 448#define NV_PCI_REGSZ_MAX 0x604 449 450/* various timeout delays: all in usec */ 451#define NV_TXRX_RESET_DELAY 4 452#define NV_TXSTOP_DELAY1 10 453#define NV_TXSTOP_DELAY1MAX 500000 454#define NV_TXSTOP_DELAY2 100 455#define NV_RXSTOP_DELAY1 10 456#define NV_RXSTOP_DELAY1MAX 500000 457#define NV_RXSTOP_DELAY2 100 458#define NV_SETUP5_DELAY 5 459#define NV_SETUP5_DELAYMAX 50000 460#define NV_POWERUP_DELAY 5 461#define NV_POWERUP_DELAYMAX 5000 462#define NV_MIIBUSY_DELAY 50 463#define NV_MIIPHY_DELAY 10 464#define NV_MIIPHY_DELAYMAX 10000 465#define NV_MAC_RESET_DELAY 64 466 467#define NV_WAKEUPPATTERNS 5 468#define NV_WAKEUPMASKENTRIES 4 469 470/* General driver defaults */ 471#define NV_WATCHDOG_TIMEO (5*HZ) 472 473#define RX_RING_DEFAULT 512 474#define TX_RING_DEFAULT 256 475#define RX_RING_MIN 128 476#define TX_RING_MIN 64 477#define RING_MAX_DESC_VER_1 1024 478#define RING_MAX_DESC_VER_2_3 16384 479 480/* rx/tx mac addr + type + vlan + align + slack*/ 481#define NV_RX_HEADERS (64) 482/* even more slack. */ 483#define NV_RX_ALLOC_PAD (64) 484 485/* maximum mtu size */ 486#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ 487#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ 488 489#define OOM_REFILL (1+HZ/20) 490#define POLL_WAIT (1+HZ/100) 491#define LINK_TIMEOUT (3*HZ) 492#define STATS_INTERVAL (10*HZ) 493 494/* 495 * desc_ver values: 496 * The nic supports three different descriptor types: 497 * - DESC_VER_1: Original 498 * - DESC_VER_2: support for jumbo frames. 499 * - DESC_VER_3: 64-bit format. 500 */ 501#define DESC_VER_1 1 502#define DESC_VER_2 2 503#define DESC_VER_3 3 504 505/* PHY defines */ 506#define PHY_OUI_MARVELL 0x5043 507#define PHY_OUI_CICADA 0x03f1 508#define PHY_OUI_VITESSE 0x01c1 509#define PHY_OUI_REALTEK 0x0732 510#define PHY_OUI_REALTEK2 0x0020 511#define PHYID1_OUI_MASK 0x03ff 512#define PHYID1_OUI_SHFT 6 513#define PHYID2_OUI_MASK 0xfc00 514#define PHYID2_OUI_SHFT 10 515#define PHYID2_MODEL_MASK 0x03f0 516#define PHY_MODEL_REALTEK_8211 0x0110 517#define PHY_REV_MASK 0x0001 518#define PHY_REV_REALTEK_8211B 0x0000 519#define PHY_REV_REALTEK_8211C 0x0001 520#define PHY_MODEL_REALTEK_8201 0x0200 521#define PHY_MODEL_MARVELL_E3016 0x0220 522#define PHY_MARVELL_E3016_INITMASK 0x0300 523#define PHY_CICADA_INIT1 0x0f000 524#define PHY_CICADA_INIT2 0x0e00 525#define PHY_CICADA_INIT3 0x01000 526#define PHY_CICADA_INIT4 0x0200 527#define PHY_CICADA_INIT5 0x0004 528#define PHY_CICADA_INIT6 0x02000 529#define PHY_VITESSE_INIT_REG1 0x1f 530#define PHY_VITESSE_INIT_REG2 0x10 531#define PHY_VITESSE_INIT_REG3 0x11 532#define PHY_VITESSE_INIT_REG4 0x12 533#define PHY_VITESSE_INIT_MSK1 0xc 534#define PHY_VITESSE_INIT_MSK2 0x0180 535#define PHY_VITESSE_INIT1 0x52b5 536#define PHY_VITESSE_INIT2 0xaf8a 537#define PHY_VITESSE_INIT3 0x8 538#define PHY_VITESSE_INIT4 0x8f8a 539#define PHY_VITESSE_INIT5 0xaf86 540#define PHY_VITESSE_INIT6 0x8f86 541#define PHY_VITESSE_INIT7 0xaf82 542#define PHY_VITESSE_INIT8 0x0100 543#define PHY_VITESSE_INIT9 0x8f82 544#define PHY_VITESSE_INIT10 0x0 545#define PHY_REALTEK_INIT_REG1 0x1f 546#define PHY_REALTEK_INIT_REG2 0x19 547#define PHY_REALTEK_INIT_REG3 0x13 548#define PHY_REALTEK_INIT_REG4 0x14 549#define PHY_REALTEK_INIT_REG5 0x18 550#define PHY_REALTEK_INIT_REG6 0x11 551#define PHY_REALTEK_INIT_REG7 0x01 552#define PHY_REALTEK_INIT1 0x0000 553#define PHY_REALTEK_INIT2 0x8e00 554#define PHY_REALTEK_INIT3 0x0001 555#define PHY_REALTEK_INIT4 0xad17 556#define PHY_REALTEK_INIT5 0xfb54 557#define PHY_REALTEK_INIT6 0xf5c7 558#define PHY_REALTEK_INIT7 0x1000 559#define PHY_REALTEK_INIT8 0x0003 560#define PHY_REALTEK_INIT9 0x0008 561#define PHY_REALTEK_INIT10 0x0005 562#define PHY_REALTEK_INIT11 0x0200 563#define PHY_REALTEK_INIT_MSK1 0x0003 564 565#define PHY_GIGABIT 0x0100 566 567#define PHY_TIMEOUT 0x1 568#define PHY_ERROR 0x2 569 570#define PHY_100 0x1 571#define PHY_1000 0x2 572#define PHY_HALF 0x100 573 574#define NV_PAUSEFRAME_RX_CAPABLE 0x0001 575#define NV_PAUSEFRAME_TX_CAPABLE 0x0002 576#define NV_PAUSEFRAME_RX_ENABLE 0x0004 577#define NV_PAUSEFRAME_TX_ENABLE 0x0008 578#define NV_PAUSEFRAME_RX_REQ 0x0010 579#define NV_PAUSEFRAME_TX_REQ 0x0020 580#define NV_PAUSEFRAME_AUTONEG 0x0040 581 582/* MSI/MSI-X defines */ 583#define NV_MSI_X_MAX_VECTORS 8 584#define NV_MSI_X_VECTORS_MASK 0x000f 585#define NV_MSI_CAPABLE 0x0010 586#define NV_MSI_X_CAPABLE 0x0020 587#define NV_MSI_ENABLED 0x0040 588#define NV_MSI_X_ENABLED 0x0080 589 590#define NV_MSI_X_VECTOR_ALL 0x0 591#define NV_MSI_X_VECTOR_RX 0x0 592#define NV_MSI_X_VECTOR_TX 0x1 593#define NV_MSI_X_VECTOR_OTHER 0x2 594 595#define NV_MSI_PRIV_OFFSET 0x68 596#define NV_MSI_PRIV_VALUE 0xffffffff 597 598#define NV_RESTART_TX 0x1 599#define NV_RESTART_RX 0x2 600 601#define NV_TX_LIMIT_COUNT 16 602 603#define NV_DYNAMIC_THRESHOLD 4 604#define NV_DYNAMIC_MAX_QUIET_COUNT 2048 605 606/* statistics */ 607struct nv_ethtool_str { 608 char name[ETH_GSTRING_LEN]; 609}; 610 611static const struct nv_ethtool_str nv_estats_str[] = { 612 { "tx_bytes" }, 613 { "tx_zero_rexmt" }, 614 { "tx_one_rexmt" }, 615 { "tx_many_rexmt" }, 616 { "tx_late_collision" }, 617 { "tx_fifo_errors" }, 618 { "tx_carrier_errors" }, 619 { "tx_excess_deferral" }, 620 { "tx_retry_error" }, 621 { "rx_frame_error" }, 622 { "rx_extra_byte" }, 623 { "rx_late_collision" }, 624 { "rx_runt" }, 625 { "rx_frame_too_long" }, 626 { "rx_over_errors" }, 627 { "rx_crc_errors" }, 628 { "rx_frame_align_error" }, 629 { "rx_length_error" }, 630 { "rx_unicast" }, 631 { "rx_multicast" }, 632 { "rx_broadcast" }, 633 { "rx_packets" }, 634 { "rx_errors_total" }, 635 { "tx_errors_total" }, 636 637 /* version 2 stats */ 638 { "tx_deferral" }, 639 { "tx_packets" }, 640 { "rx_bytes" }, 641 { "tx_pause" }, 642 { "rx_pause" }, 643 { "rx_drop_frame" }, 644 645 /* version 3 stats */ 646 { "tx_unicast" }, 647 { "tx_multicast" }, 648 { "tx_broadcast" } 649}; 650 651struct nv_ethtool_stats { 652 u64 tx_bytes; 653 u64 tx_zero_rexmt; 654 u64 tx_one_rexmt; 655 u64 tx_many_rexmt; 656 u64 tx_late_collision; 657 u64 tx_fifo_errors; 658 u64 tx_carrier_errors; 659 u64 tx_excess_deferral; 660 u64 tx_retry_error; 661 u64 rx_frame_error; 662 u64 rx_extra_byte; 663 u64 rx_late_collision; 664 u64 rx_runt; 665 u64 rx_frame_too_long; 666 u64 rx_over_errors; 667 u64 rx_crc_errors; 668 u64 rx_frame_align_error; 669 u64 rx_length_error; 670 u64 rx_unicast; 671 u64 rx_multicast; 672 u64 rx_broadcast; 673 u64 rx_packets; 674 u64 rx_errors_total; 675 u64 tx_errors_total; 676 677 /* version 2 stats */ 678 u64 tx_deferral; 679 u64 tx_packets; 680 u64 rx_bytes; 681 u64 tx_pause; 682 u64 rx_pause; 683 u64 rx_drop_frame; 684 685 /* version 3 stats */ 686 u64 tx_unicast; 687 u64 tx_multicast; 688 u64 tx_broadcast; 689}; 690 691#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 692#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3) 693#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 694 695/* diagnostics */ 696#define NV_TEST_COUNT_BASE 3 697#define NV_TEST_COUNT_EXTENDED 4 698 699static const struct nv_ethtool_str nv_etests_str[] = { 700 { "link (online/offline)" }, 701 { "register (offline) " }, 702 { "interrupt (offline) " }, 703 { "loopback (offline) " } 704}; 705 706struct register_test { 707 __u32 reg; 708 __u32 mask; 709}; 710 711static const struct register_test nv_registers_test[] = { 712 { NvRegUnknownSetupReg6, 0x01 }, 713 { NvRegMisc1, 0x03c }, 714 { NvRegOffloadConfig, 0x03ff }, 715 { NvRegMulticastAddrA, 0xffffffff }, 716 { NvRegTxWatermark, 0x0ff }, 717 { NvRegWakeUpFlags, 0x07777 }, 718 { 0,0 } 719}; 720 721struct nv_skb_map { 722 struct sk_buff *skb; 723 dma_addr_t dma; 724 unsigned int dma_len:31; 725 unsigned int dma_single:1; 726 struct ring_desc_ex *first_tx_desc; 727 struct nv_skb_map *next_tx_ctx; 728}; 729 730/* 731 * SMP locking: 732 * All hardware access under netdev_priv(dev)->lock, except the performance 733 * critical parts: 734 * - rx is (pseudo-) lockless: it relies on the single-threading provided 735 * by the arch code for interrupts. 736 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 737 * needs netdev_priv(dev)->lock :-( 738 * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 739 */ 740 741/* in dev: base, irq */ 742struct fe_priv { 743 spinlock_t lock; 744 745 struct net_device *dev; 746 struct napi_struct napi; 747 748 /* General data: 749 * Locking: spin_lock(&np->lock); */ 750 struct nv_ethtool_stats estats; 751 int in_shutdown; 752 u32 linkspeed; 753 int duplex; 754 int autoneg; 755 int fixed_mode; 756 int phyaddr; 757 int wolenabled; 758 unsigned int phy_oui; 759 unsigned int phy_model; 760 unsigned int phy_rev; 761 u16 gigabit; 762 int intr_test; 763 int recover_error; 764 int quiet_count; 765 766 /* General data: RO fields */ 767 dma_addr_t ring_addr; 768 struct pci_dev *pci_dev; 769 u32 orig_mac[2]; 770 u32 events; 771 u32 irqmask; 772 u32 desc_ver; 773 u32 txrxctl_bits; 774 u32 vlanctl_bits; 775 u32 driver_data; 776 u32 device_id; 777 u32 register_size; 778 int rx_csum; 779 u32 mac_in_use; 780 int mgmt_version; 781 int mgmt_sema; 782 783 void __iomem *base; 784 785 /* rx specific fields. 786 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 787 */ 788 union ring_type get_rx, put_rx, first_rx, last_rx; 789 struct nv_skb_map *get_rx_ctx, *put_rx_ctx; 790 struct nv_skb_map *first_rx_ctx, *last_rx_ctx; 791 struct nv_skb_map *rx_skb; 792 793 union ring_type rx_ring; 794 unsigned int rx_buf_sz; 795 unsigned int pkt_limit; 796 struct timer_list oom_kick; 797 struct timer_list nic_poll; 798 struct timer_list stats_poll; 799 u32 nic_poll_irq; 800 int rx_ring_size; 801 802 /* media detection workaround. 803 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 804 */ 805 int need_linktimer; 806 unsigned long link_timeout; 807 /* 808 * tx specific fields. 809 */ 810 union ring_type get_tx, put_tx, first_tx, last_tx; 811 struct nv_skb_map *get_tx_ctx, *put_tx_ctx; 812 struct nv_skb_map *first_tx_ctx, *last_tx_ctx; 813 struct nv_skb_map *tx_skb; 814 815 union ring_type tx_ring; 816 u32 tx_flags; 817 int tx_ring_size; 818 int tx_limit; 819 u32 tx_pkts_in_progress; 820 struct nv_skb_map *tx_change_owner; 821 struct nv_skb_map *tx_end_flip; 822 int tx_stop; 823 824 /* vlan fields */ 825 struct vlan_group *vlangrp; 826 827 /* msi/msi-x fields */ 828 u32 msi_flags; 829 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; 830 831 /* flow control */ 832 u32 pause_flags; 833 834 /* power saved state */ 835 u32 saved_config_space[NV_PCI_REGSZ_MAX/4]; 836 837 /* for different msi-x irq type */ 838 char name_rx[IFNAMSIZ + 3]; /* -rx */ 839 char name_tx[IFNAMSIZ + 3]; /* -tx */ 840 char name_other[IFNAMSIZ + 6]; /* -other */ 841}; 842 843/* 844 * Maximum number of loops until we assume that a bit in the irq mask 845 * is stuck. Overridable with module param. 846 */ 847static int max_interrupt_work = 4; 848 849/* 850 * Optimization can be either throuput mode or cpu mode 851 * 852 * Throughput Mode: Every tx and rx packet will generate an interrupt. 853 * CPU Mode: Interrupts are controlled by a timer. 854 */ 855enum { 856 NV_OPTIMIZATION_MODE_THROUGHPUT, 857 NV_OPTIMIZATION_MODE_CPU, 858 NV_OPTIMIZATION_MODE_DYNAMIC 859}; 860static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC; 861 862/* 863 * Poll interval for timer irq 864 * 865 * This interval determines how frequent an interrupt is generated. 866 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] 867 * Min = 0, and Max = 65535 868 */ 869static int poll_interval = -1; 870 871/* 872 * MSI interrupts 873 */ 874enum { 875 NV_MSI_INT_DISABLED, 876 NV_MSI_INT_ENABLED 877}; 878static int msi = NV_MSI_INT_ENABLED; 879 880/* 881 * MSIX interrupts 882 */ 883enum { 884 NV_MSIX_INT_DISABLED, 885 NV_MSIX_INT_ENABLED 886}; 887static int msix = NV_MSIX_INT_ENABLED; 888 889/* 890 * DMA 64bit 891 */ 892enum { 893 NV_DMA_64BIT_DISABLED, 894 NV_DMA_64BIT_ENABLED 895}; 896static int dma_64bit = NV_DMA_64BIT_ENABLED; 897 898/* 899 * Crossover Detection 900 * Realtek 8201 phy + some OEM boards do not work properly. 901 */ 902enum { 903 NV_CROSSOVER_DETECTION_DISABLED, 904 NV_CROSSOVER_DETECTION_ENABLED 905}; 906static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; 907 908/* 909 * Power down phy when interface is down (persists through reboot; 910 * older Linux and other OSes may not power it up again) 911 */ 912static int phy_power_down = 0; 913 914static inline struct fe_priv *get_nvpriv(struct net_device *dev) 915{ 916 return netdev_priv(dev); 917} 918 919static inline u8 __iomem *get_hwbase(struct net_device *dev) 920{ 921 return ((struct fe_priv *)netdev_priv(dev))->base; 922} 923 924static inline void pci_push(u8 __iomem *base) 925{ 926 /* force out pending posted writes */ 927 readl(base); 928} 929 930static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 931{ 932 return le32_to_cpu(prd->flaglen) 933 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 934} 935 936static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 937{ 938 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; 939} 940 941static bool nv_optimized(struct fe_priv *np) 942{ 943 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 944 return false; 945 return true; 946} 947 948static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 949 int delay, int delaymax, const char *msg) 950{ 951 u8 __iomem *base = get_hwbase(dev); 952 953 pci_push(base); 954 do { 955 udelay(delay); 956 delaymax -= delay; 957 if (delaymax < 0) { 958 if (msg) 959 printk("%s", msg); 960 return 1; 961 } 962 } while ((readl(base + offset) & mask) != target); 963 return 0; 964} 965 966#define NV_SETUP_RX_RING 0x01 967#define NV_SETUP_TX_RING 0x02 968 969static inline u32 dma_low(dma_addr_t addr) 970{ 971 return addr; 972} 973 974static inline u32 dma_high(dma_addr_t addr) 975{ 976 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */ 977} 978 979static void setup_hw_rings(struct net_device *dev, int rxtx_flags) 980{ 981 struct fe_priv *np = get_nvpriv(dev); 982 u8 __iomem *base = get_hwbase(dev); 983 984 if (!nv_optimized(np)) { 985 if (rxtx_flags & NV_SETUP_RX_RING) { 986 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 987 } 988 if (rxtx_flags & NV_SETUP_TX_RING) { 989 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 990 } 991 } else { 992 if (rxtx_flags & NV_SETUP_RX_RING) { 993 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 994 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); 995 } 996 if (rxtx_flags & NV_SETUP_TX_RING) { 997 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); 998 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh); 999 } 1000 } 1001} 1002 1003static void free_rings(struct net_device *dev) 1004{ 1005 struct fe_priv *np = get_nvpriv(dev); 1006 1007 if (!nv_optimized(np)) { 1008 if (np->rx_ring.orig) 1009 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 1010 np->rx_ring.orig, np->ring_addr); 1011 } else { 1012 if (np->rx_ring.ex) 1013 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 1014 np->rx_ring.ex, np->ring_addr); 1015 } 1016 if (np->rx_skb) 1017 kfree(np->rx_skb); 1018 if (np->tx_skb) 1019 kfree(np->tx_skb); 1020} 1021 1022static int using_multi_irqs(struct net_device *dev) 1023{ 1024 struct fe_priv *np = get_nvpriv(dev); 1025 1026 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1027 ((np->msi_flags & NV_MSI_X_ENABLED) && 1028 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) 1029 return 0; 1030 else 1031 return 1; 1032} 1033 1034static void nv_txrx_gate(struct net_device *dev, bool gate) 1035{ 1036 struct fe_priv *np = get_nvpriv(dev); 1037 u8 __iomem *base = get_hwbase(dev); 1038 u32 powerstate; 1039 1040 if (!np->mac_in_use && 1041 (np->driver_data & DEV_HAS_POWER_CNTRL)) { 1042 powerstate = readl(base + NvRegPowerState2); 1043 if (gate) 1044 powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS; 1045 else 1046 powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS; 1047 writel(powerstate, base + NvRegPowerState2); 1048 } 1049} 1050 1051static void nv_enable_irq(struct net_device *dev) 1052{ 1053 struct fe_priv *np = get_nvpriv(dev); 1054 1055 if (!using_multi_irqs(dev)) { 1056 if (np->msi_flags & NV_MSI_X_ENABLED) 1057 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1058 else 1059 enable_irq(np->pci_dev->irq); 1060 } else { 1061 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1062 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1063 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1064 } 1065} 1066 1067static void nv_disable_irq(struct net_device *dev) 1068{ 1069 struct fe_priv *np = get_nvpriv(dev); 1070 1071 if (!using_multi_irqs(dev)) { 1072 if (np->msi_flags & NV_MSI_X_ENABLED) 1073 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1074 else 1075 disable_irq(np->pci_dev->irq); 1076 } else { 1077 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1078 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1079 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1080 } 1081} 1082 1083/* In MSIX mode, a write to irqmask behaves as XOR */ 1084static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) 1085{ 1086 u8 __iomem *base = get_hwbase(dev); 1087 1088 writel(mask, base + NvRegIrqMask); 1089} 1090 1091static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) 1092{ 1093 struct fe_priv *np = get_nvpriv(dev); 1094 u8 __iomem *base = get_hwbase(dev); 1095 1096 if (np->msi_flags & NV_MSI_X_ENABLED) { 1097 writel(mask, base + NvRegIrqMask); 1098 } else { 1099 if (np->msi_flags & NV_MSI_ENABLED) 1100 writel(0, base + NvRegMSIIrqMask); 1101 writel(0, base + NvRegIrqMask); 1102 } 1103} 1104 1105static void nv_napi_enable(struct net_device *dev) 1106{ 1107#ifdef CONFIG_FORCEDETH_NAPI 1108 struct fe_priv *np = get_nvpriv(dev); 1109 1110 napi_enable(&np->napi); 1111#endif 1112} 1113 1114static void nv_napi_disable(struct net_device *dev) 1115{ 1116#ifdef CONFIG_FORCEDETH_NAPI 1117 struct fe_priv *np = get_nvpriv(dev); 1118 1119 napi_disable(&np->napi); 1120#endif 1121} 1122 1123#define MII_READ (-1) 1124/* mii_rw: read/write a register on the PHY. 1125 * 1126 * Caller must guarantee serialization 1127 */ 1128static int mii_rw(struct net_device *dev, int addr, int miireg, int value) 1129{ 1130 u8 __iomem *base = get_hwbase(dev); 1131 u32 reg; 1132 int retval; 1133 1134 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus); 1135 1136 reg = readl(base + NvRegMIIControl); 1137 if (reg & NVREG_MIICTL_INUSE) { 1138 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); 1139 udelay(NV_MIIBUSY_DELAY); 1140 } 1141 1142 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; 1143 if (value != MII_READ) { 1144 writel(value, base + NvRegMIIData); 1145 reg |= NVREG_MIICTL_WRITE; 1146 } 1147 writel(reg, base + NvRegMIIControl); 1148 1149 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1150 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { 1151 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", 1152 dev->name, miireg, addr); 1153 retval = -1; 1154 } else if (value != MII_READ) { 1155 /* it was a write operation - fewer failures are detectable */ 1156 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", 1157 dev->name, value, miireg, addr); 1158 retval = 0; 1159 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1160 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", 1161 dev->name, miireg, addr); 1162 retval = -1; 1163 } else { 1164 retval = readl(base + NvRegMIIData); 1165 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", 1166 dev->name, miireg, addr, retval); 1167 } 1168 1169 return retval; 1170} 1171 1172static int phy_reset(struct net_device *dev, u32 bmcr_setup) 1173{ 1174 struct fe_priv *np = netdev_priv(dev); 1175 u32 miicontrol; 1176 unsigned int tries = 0; 1177 1178 miicontrol = BMCR_RESET | bmcr_setup; 1179 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1180 return -1; 1181 } 1182 1183 /* wait for 500ms */ 1184 msleep(500); 1185 1186 /* must wait till reset is deasserted */ 1187 while (miicontrol & BMCR_RESET) { 1188 msleep(10); 1189 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1190 /* FIXME: 100 tries seem excessive */ 1191 if (tries++ > 100) 1192 return -1; 1193 } 1194 return 0; 1195} 1196 1197static int phy_init(struct net_device *dev) 1198{ 1199 struct fe_priv *np = get_nvpriv(dev); 1200 u8 __iomem *base = get_hwbase(dev); 1201 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1202 1203 /* phy errata for E3016 phy */ 1204 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1205 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1206 reg &= ~PHY_MARVELL_E3016_INITMASK; 1207 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { 1208 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); 1209 return PHY_ERROR; 1210 } 1211 } 1212 if (np->phy_oui == PHY_OUI_REALTEK) { 1213 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1214 np->phy_rev == PHY_REV_REALTEK_8211B) { 1215 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1216 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1217 return PHY_ERROR; 1218 } 1219 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1220 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1221 return PHY_ERROR; 1222 } 1223 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1224 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1225 return PHY_ERROR; 1226 } 1227 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1228 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1229 return PHY_ERROR; 1230 } 1231 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1232 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1233 return PHY_ERROR; 1234 } 1235 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) { 1236 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1237 return PHY_ERROR; 1238 } 1239 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1240 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1241 return PHY_ERROR; 1242 } 1243 } 1244 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1245 np->phy_rev == PHY_REV_REALTEK_8211C) { 1246 u32 powerstate = readl(base + NvRegPowerState2); 1247 1248 /* need to perform hw phy reset */ 1249 powerstate |= NVREG_POWERSTATE2_PHY_RESET; 1250 writel(powerstate, base + NvRegPowerState2); 1251 msleep(25); 1252 1253 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET; 1254 writel(powerstate, base + NvRegPowerState2); 1255 msleep(25); 1256 1257 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1258 reg |= PHY_REALTEK_INIT9; 1259 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) { 1260 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1261 return PHY_ERROR; 1262 } 1263 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) { 1264 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1265 return PHY_ERROR; 1266 } 1267 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ); 1268 if (!(reg & PHY_REALTEK_INIT11)) { 1269 reg |= PHY_REALTEK_INIT11; 1270 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) { 1271 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1272 return PHY_ERROR; 1273 } 1274 } 1275 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1276 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1277 return PHY_ERROR; 1278 } 1279 } 1280 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1281 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) { 1282 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1283 phy_reserved |= PHY_REALTEK_INIT7; 1284 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) { 1285 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1286 return PHY_ERROR; 1287 } 1288 } 1289 } 1290 } 1291 1292 /* set advertise register */ 1293 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1294 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1295 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1296 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); 1297 return PHY_ERROR; 1298 } 1299 1300 /* get phy interface type */ 1301 phyinterface = readl(base + NvRegPhyInterface); 1302 1303 /* see if gigabit phy */ 1304 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1305 if (mii_status & PHY_GIGABIT) { 1306 np->gigabit = PHY_GIGABIT; 1307 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 1308 mii_control_1000 &= ~ADVERTISE_1000HALF; 1309 if (phyinterface & PHY_RGMII) 1310 mii_control_1000 |= ADVERTISE_1000FULL; 1311 else 1312 mii_control_1000 &= ~ADVERTISE_1000FULL; 1313 1314 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1315 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1316 return PHY_ERROR; 1317 } 1318 } 1319 else 1320 np->gigabit = 0; 1321 1322 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1323 mii_control |= BMCR_ANENABLE; 1324 1325 if (np->phy_oui == PHY_OUI_REALTEK && 1326 np->phy_model == PHY_MODEL_REALTEK_8211 && 1327 np->phy_rev == PHY_REV_REALTEK_8211C) { 1328 /* start autoneg since we already performed hw reset above */ 1329 mii_control |= BMCR_ANRESTART; 1330 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1331 printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev)); 1332 return PHY_ERROR; 1333 } 1334 } else { 1335 /* reset the phy 1336 * (certain phys need bmcr to be setup with reset) 1337 */ 1338 if (phy_reset(dev, mii_control)) { 1339 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1340 return PHY_ERROR; 1341 } 1342 } 1343 1344 /* phy vendor specific configuration */ 1345 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1346 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1347 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1348 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1349 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { 1350 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1351 return PHY_ERROR; 1352 } 1353 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1354 phy_reserved |= PHY_CICADA_INIT5; 1355 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { 1356 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1357 return PHY_ERROR; 1358 } 1359 } 1360 if (np->phy_oui == PHY_OUI_CICADA) { 1361 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); 1362 phy_reserved |= PHY_CICADA_INIT6; 1363 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { 1364 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1365 return PHY_ERROR; 1366 } 1367 } 1368 if (np->phy_oui == PHY_OUI_VITESSE) { 1369 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) { 1370 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1371 return PHY_ERROR; 1372 } 1373 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) { 1374 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1375 return PHY_ERROR; 1376 } 1377 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1378 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1379 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1380 return PHY_ERROR; 1381 } 1382 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1383 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1384 phy_reserved |= PHY_VITESSE_INIT3; 1385 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1386 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1387 return PHY_ERROR; 1388 } 1389 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) { 1390 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1391 return PHY_ERROR; 1392 } 1393 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) { 1394 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1395 return PHY_ERROR; 1396 } 1397 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1398 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1399 phy_reserved |= PHY_VITESSE_INIT3; 1400 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1401 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1402 return PHY_ERROR; 1403 } 1404 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1405 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1406 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1407 return PHY_ERROR; 1408 } 1409 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { 1410 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1411 return PHY_ERROR; 1412 } 1413 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { 1414 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1415 return PHY_ERROR; 1416 } 1417 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1418 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1419 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1420 return PHY_ERROR; 1421 } 1422 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1423 phy_reserved &= ~PHY_VITESSE_INIT_MSK2; 1424 phy_reserved |= PHY_VITESSE_INIT8; 1425 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1426 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1427 return PHY_ERROR; 1428 } 1429 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) { 1430 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1431 return PHY_ERROR; 1432 } 1433 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) { 1434 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1435 return PHY_ERROR; 1436 } 1437 } 1438 if (np->phy_oui == PHY_OUI_REALTEK) { 1439 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1440 np->phy_rev == PHY_REV_REALTEK_8211B) { 1441 /* reset could have cleared these out, set them back */ 1442 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1443 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1444 return PHY_ERROR; 1445 } 1446 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1447 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1448 return PHY_ERROR; 1449 } 1450 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1451 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1452 return PHY_ERROR; 1453 } 1454 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1455 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1456 return PHY_ERROR; 1457 } 1458 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1459 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1460 return PHY_ERROR; 1461 } 1462 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) { 1463 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1464 return PHY_ERROR; 1465 } 1466 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1467 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1468 return PHY_ERROR; 1469 } 1470 } 1471 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1472 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) { 1473 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1474 phy_reserved |= PHY_REALTEK_INIT7; 1475 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) { 1476 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1477 return PHY_ERROR; 1478 } 1479 } 1480 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 1481 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1482 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1483 return PHY_ERROR; 1484 } 1485 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); 1486 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 1487 phy_reserved |= PHY_REALTEK_INIT3; 1488 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) { 1489 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1490 return PHY_ERROR; 1491 } 1492 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1493 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1494 return PHY_ERROR; 1495 } 1496 } 1497 } 1498 } 1499 1500 /* some phys clear out pause advertisment on reset, set it back */ 1501 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); 1502 1503 /* restart auto negotiation, power down phy */ 1504 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1505 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1506 if (phy_power_down) { 1507 mii_control |= BMCR_PDOWN; 1508 } 1509 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1510 return PHY_ERROR; 1511 } 1512 1513 return 0; 1514} 1515 1516static void nv_start_rx(struct net_device *dev) 1517{ 1518 struct fe_priv *np = netdev_priv(dev); 1519 u8 __iomem *base = get_hwbase(dev); 1520 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1521 1522 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 1523 /* Already running? Stop it. */ 1524 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1525 rx_ctrl &= ~NVREG_RCVCTL_START; 1526 writel(rx_ctrl, base + NvRegReceiverControl); 1527 pci_push(base); 1528 } 1529 writel(np->linkspeed, base + NvRegLinkSpeed); 1530 pci_push(base); 1531 rx_ctrl |= NVREG_RCVCTL_START; 1532 if (np->mac_in_use) 1533 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1534 writel(rx_ctrl, base + NvRegReceiverControl); 1535 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1536 dev->name, np->duplex, np->linkspeed); 1537 pci_push(base); 1538} 1539 1540static void nv_stop_rx(struct net_device *dev) 1541{ 1542 struct fe_priv *np = netdev_priv(dev); 1543 u8 __iomem *base = get_hwbase(dev); 1544 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1545 1546 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 1547 if (!np->mac_in_use) 1548 rx_ctrl &= ~NVREG_RCVCTL_START; 1549 else 1550 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1551 writel(rx_ctrl, base + NvRegReceiverControl); 1552 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1553 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1554 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1555 1556 udelay(NV_RXSTOP_DELAY2); 1557 if (!np->mac_in_use) 1558 writel(0, base + NvRegLinkSpeed); 1559} 1560 1561static void nv_start_tx(struct net_device *dev) 1562{ 1563 struct fe_priv *np = netdev_priv(dev); 1564 u8 __iomem *base = get_hwbase(dev); 1565 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1566 1567 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 1568 tx_ctrl |= NVREG_XMITCTL_START; 1569 if (np->mac_in_use) 1570 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1571 writel(tx_ctrl, base + NvRegTransmitterControl); 1572 pci_push(base); 1573} 1574 1575static void nv_stop_tx(struct net_device *dev) 1576{ 1577 struct fe_priv *np = netdev_priv(dev); 1578 u8 __iomem *base = get_hwbase(dev); 1579 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1580 1581 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 1582 if (!np->mac_in_use) 1583 tx_ctrl &= ~NVREG_XMITCTL_START; 1584 else 1585 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1586 writel(tx_ctrl, base + NvRegTransmitterControl); 1587 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1588 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1589 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1590 1591 udelay(NV_TXSTOP_DELAY2); 1592 if (!np->mac_in_use) 1593 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, 1594 base + NvRegTransmitPoll); 1595} 1596 1597static void nv_start_rxtx(struct net_device *dev) 1598{ 1599 nv_start_rx(dev); 1600 nv_start_tx(dev); 1601} 1602 1603static void nv_stop_rxtx(struct net_device *dev) 1604{ 1605 nv_stop_rx(dev); 1606 nv_stop_tx(dev); 1607} 1608 1609static void nv_txrx_reset(struct net_device *dev) 1610{ 1611 struct fe_priv *np = netdev_priv(dev); 1612 u8 __iomem *base = get_hwbase(dev); 1613 1614 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); 1615 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1616 pci_push(base); 1617 udelay(NV_TXRX_RESET_DELAY); 1618 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1619 pci_push(base); 1620} 1621 1622static void nv_mac_reset(struct net_device *dev) 1623{ 1624 struct fe_priv *np = netdev_priv(dev); 1625 u8 __iomem *base = get_hwbase(dev); 1626 u32 temp1, temp2, temp3; 1627 1628 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); 1629 1630 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1631 pci_push(base); 1632 1633 /* save registers since they will be cleared on reset */ 1634 temp1 = readl(base + NvRegMacAddrA); 1635 temp2 = readl(base + NvRegMacAddrB); 1636 temp3 = readl(base + NvRegTransmitPoll); 1637 1638 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); 1639 pci_push(base); 1640 udelay(NV_MAC_RESET_DELAY); 1641 writel(0, base + NvRegMacReset); 1642 pci_push(base); 1643 udelay(NV_MAC_RESET_DELAY); 1644 1645 /* restore saved registers */ 1646 writel(temp1, base + NvRegMacAddrA); 1647 writel(temp2, base + NvRegMacAddrB); 1648 writel(temp3, base + NvRegTransmitPoll); 1649 1650 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1651 pci_push(base); 1652} 1653 1654static void nv_get_hw_stats(struct net_device *dev) 1655{ 1656 struct fe_priv *np = netdev_priv(dev); 1657 u8 __iomem *base = get_hwbase(dev); 1658 1659 np->estats.tx_bytes += readl(base + NvRegTxCnt); 1660 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); 1661 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); 1662 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); 1663 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); 1664 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); 1665 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); 1666 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); 1667 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); 1668 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); 1669 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); 1670 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); 1671 np->estats.rx_runt += readl(base + NvRegRxRunt); 1672 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); 1673 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); 1674 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); 1675 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); 1676 np->estats.rx_length_error += readl(base + NvRegRxLenErr); 1677 np->estats.rx_unicast += readl(base + NvRegRxUnicast); 1678 np->estats.rx_multicast += readl(base + NvRegRxMulticast); 1679 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); 1680 np->estats.rx_packets = 1681 np->estats.rx_unicast + 1682 np->estats.rx_multicast + 1683 np->estats.rx_broadcast; 1684 np->estats.rx_errors_total = 1685 np->estats.rx_crc_errors + 1686 np->estats.rx_over_errors + 1687 np->estats.rx_frame_error + 1688 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + 1689 np->estats.rx_late_collision + 1690 np->estats.rx_runt + 1691 np->estats.rx_frame_too_long; 1692 np->estats.tx_errors_total = 1693 np->estats.tx_late_collision + 1694 np->estats.tx_fifo_errors + 1695 np->estats.tx_carrier_errors + 1696 np->estats.tx_excess_deferral + 1697 np->estats.tx_retry_error; 1698 1699 if (np->driver_data & DEV_HAS_STATISTICS_V2) { 1700 np->estats.tx_deferral += readl(base + NvRegTxDef); 1701 np->estats.tx_packets += readl(base + NvRegTxFrame); 1702 np->estats.rx_bytes += readl(base + NvRegRxCnt); 1703 np->estats.tx_pause += readl(base + NvRegTxPause); 1704 np->estats.rx_pause += readl(base + NvRegRxPause); 1705 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1706 } 1707 1708 if (np->driver_data & DEV_HAS_STATISTICS_V3) { 1709 np->estats.tx_unicast += readl(base + NvRegTxUnicast); 1710 np->estats.tx_multicast += readl(base + NvRegTxMulticast); 1711 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast); 1712 } 1713} 1714 1715/* 1716 * nv_get_stats: dev->get_stats function 1717 * Get latest stats value from the nic. 1718 * Called with read_lock(&dev_base_lock) held for read - 1719 * only synchronized against unregister_netdevice. 1720 */ 1721static struct net_device_stats *nv_get_stats(struct net_device *dev) 1722{ 1723 struct fe_priv *np = netdev_priv(dev); 1724 1725 /* If the nic supports hw counters then retrieve latest values */ 1726 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) { 1727 nv_get_hw_stats(dev); 1728 1729 /* copy to net_device stats */ 1730 dev->stats.tx_bytes = np->estats.tx_bytes; 1731 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1732 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1733 dev->stats.rx_crc_errors = np->estats.rx_crc_errors; 1734 dev->stats.rx_over_errors = np->estats.rx_over_errors; 1735 dev->stats.rx_errors = np->estats.rx_errors_total; 1736 dev->stats.tx_errors = np->estats.tx_errors_total; 1737 } 1738 1739 return &dev->stats; 1740} 1741 1742/* 1743 * nv_alloc_rx: fill rx ring entries. 1744 * Return 1 if the allocations for the skbs failed and the 1745 * rx engine is without Available descriptors 1746 */ 1747static int nv_alloc_rx(struct net_device *dev) 1748{ 1749 struct fe_priv *np = netdev_priv(dev); 1750 struct ring_desc* less_rx; 1751 1752 less_rx = np->get_rx.orig; 1753 if (less_rx-- == np->first_rx.orig) 1754 less_rx = np->last_rx.orig; 1755 1756 while (np->put_rx.orig != less_rx) { 1757 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1758 if (skb) { 1759 np->put_rx_ctx->skb = skb; 1760 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1761 skb->data, 1762 skb_tailroom(skb), 1763 PCI_DMA_FROMDEVICE); 1764 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1765 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); 1766 wmb(); 1767 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1768 if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) 1769 np->put_rx.orig = np->first_rx.orig; 1770 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1771 np->put_rx_ctx = np->first_rx_ctx; 1772 } else { 1773 return 1; 1774 } 1775 } 1776 return 0; 1777} 1778 1779static int nv_alloc_rx_optimized(struct net_device *dev) 1780{ 1781 struct fe_priv *np = netdev_priv(dev); 1782 struct ring_desc_ex* less_rx; 1783 1784 less_rx = np->get_rx.ex; 1785 if (less_rx-- == np->first_rx.ex) 1786 less_rx = np->last_rx.ex; 1787 1788 while (np->put_rx.ex != less_rx) { 1789 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1790 if (skb) { 1791 np->put_rx_ctx->skb = skb; 1792 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1793 skb->data, 1794 skb_tailroom(skb), 1795 PCI_DMA_FROMDEVICE); 1796 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1797 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); 1798 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); 1799 wmb(); 1800 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1801 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) 1802 np->put_rx.ex = np->first_rx.ex; 1803 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1804 np->put_rx_ctx = np->first_rx_ctx; 1805 } else { 1806 return 1; 1807 } 1808 } 1809 return 0; 1810} 1811 1812/* If rx bufs are exhausted called after 50ms to attempt to refresh */ 1813#ifdef CONFIG_FORCEDETH_NAPI 1814static void nv_do_rx_refill(unsigned long data) 1815{ 1816 struct net_device *dev = (struct net_device *) data; 1817 struct fe_priv *np = netdev_priv(dev); 1818 1819 /* Just reschedule NAPI rx processing */ 1820 napi_schedule(&np->napi); 1821} 1822#else 1823static void nv_do_rx_refill(unsigned long data) 1824{ 1825 struct net_device *dev = (struct net_device *) data; 1826 struct fe_priv *np = netdev_priv(dev); 1827 int retcode; 1828 1829 if (!using_multi_irqs(dev)) { 1830 if (np->msi_flags & NV_MSI_X_ENABLED) 1831 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1832 else 1833 disable_irq(np->pci_dev->irq); 1834 } else { 1835 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1836 } 1837 if (!nv_optimized(np)) 1838 retcode = nv_alloc_rx(dev); 1839 else 1840 retcode = nv_alloc_rx_optimized(dev); 1841 if (retcode) { 1842 spin_lock_irq(&np->lock); 1843 if (!np->in_shutdown) 1844 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1845 spin_unlock_irq(&np->lock); 1846 } 1847 if (!using_multi_irqs(dev)) { 1848 if (np->msi_flags & NV_MSI_X_ENABLED) 1849 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1850 else 1851 enable_irq(np->pci_dev->irq); 1852 } else { 1853 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1854 } 1855} 1856#endif 1857 1858static void nv_init_rx(struct net_device *dev) 1859{ 1860 struct fe_priv *np = netdev_priv(dev); 1861 int i; 1862 1863 np->get_rx = np->put_rx = np->first_rx = np->rx_ring; 1864 1865 if (!nv_optimized(np)) 1866 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; 1867 else 1868 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; 1869 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; 1870 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; 1871 1872 for (i = 0; i < np->rx_ring_size; i++) { 1873 if (!nv_optimized(np)) { 1874 np->rx_ring.orig[i].flaglen = 0; 1875 np->rx_ring.orig[i].buf = 0; 1876 } else { 1877 np->rx_ring.ex[i].flaglen = 0; 1878 np->rx_ring.ex[i].txvlan = 0; 1879 np->rx_ring.ex[i].bufhigh = 0; 1880 np->rx_ring.ex[i].buflow = 0; 1881 } 1882 np->rx_skb[i].skb = NULL; 1883 np->rx_skb[i].dma = 0; 1884 } 1885} 1886 1887static void nv_init_tx(struct net_device *dev) 1888{ 1889 struct fe_priv *np = netdev_priv(dev); 1890 int i; 1891 1892 np->get_tx = np->put_tx = np->first_tx = np->tx_ring; 1893 1894 if (!nv_optimized(np)) 1895 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; 1896 else 1897 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; 1898 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; 1899 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; 1900 np->tx_pkts_in_progress = 0; 1901 np->tx_change_owner = NULL; 1902 np->tx_end_flip = NULL; 1903 np->tx_stop = 0; 1904 1905 for (i = 0; i < np->tx_ring_size; i++) { 1906 if (!nv_optimized(np)) { 1907 np->tx_ring.orig[i].flaglen = 0; 1908 np->tx_ring.orig[i].buf = 0; 1909 } else { 1910 np->tx_ring.ex[i].flaglen = 0; 1911 np->tx_ring.ex[i].txvlan = 0; 1912 np->tx_ring.ex[i].bufhigh = 0; 1913 np->tx_ring.ex[i].buflow = 0; 1914 } 1915 np->tx_skb[i].skb = NULL; 1916 np->tx_skb[i].dma = 0; 1917 np->tx_skb[i].dma_len = 0; 1918 np->tx_skb[i].dma_single = 0; 1919 np->tx_skb[i].first_tx_desc = NULL; 1920 np->tx_skb[i].next_tx_ctx = NULL; 1921 } 1922} 1923 1924static int nv_init_ring(struct net_device *dev) 1925{ 1926 struct fe_priv *np = netdev_priv(dev); 1927 1928 nv_init_tx(dev); 1929 nv_init_rx(dev); 1930 1931 if (!nv_optimized(np)) 1932 return nv_alloc_rx(dev); 1933 else 1934 return nv_alloc_rx_optimized(dev); 1935} 1936 1937static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) 1938{ 1939 if (tx_skb->dma) { 1940 if (tx_skb->dma_single) 1941 pci_unmap_single(np->pci_dev, tx_skb->dma, 1942 tx_skb->dma_len, 1943 PCI_DMA_TODEVICE); 1944 else 1945 pci_unmap_page(np->pci_dev, tx_skb->dma, 1946 tx_skb->dma_len, 1947 PCI_DMA_TODEVICE); 1948 tx_skb->dma = 0; 1949 } 1950} 1951 1952static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) 1953{ 1954 nv_unmap_txskb(np, tx_skb); 1955 if (tx_skb->skb) { 1956 dev_kfree_skb_any(tx_skb->skb); 1957 tx_skb->skb = NULL; 1958 return 1; 1959 } 1960 return 0; 1961} 1962 1963static void nv_drain_tx(struct net_device *dev) 1964{ 1965 struct fe_priv *np = netdev_priv(dev); 1966 unsigned int i; 1967 1968 for (i = 0; i < np->tx_ring_size; i++) { 1969 if (!nv_optimized(np)) { 1970 np->tx_ring.orig[i].flaglen = 0; 1971 np->tx_ring.orig[i].buf = 0; 1972 } else { 1973 np->tx_ring.ex[i].flaglen = 0; 1974 np->tx_ring.ex[i].txvlan = 0; 1975 np->tx_ring.ex[i].bufhigh = 0; 1976 np->tx_ring.ex[i].buflow = 0; 1977 } 1978 if (nv_release_txskb(np, &np->tx_skb[i])) 1979 dev->stats.tx_dropped++; 1980 np->tx_skb[i].dma = 0; 1981 np->tx_skb[i].dma_len = 0; 1982 np->tx_skb[i].dma_single = 0; 1983 np->tx_skb[i].first_tx_desc = NULL; 1984 np->tx_skb[i].next_tx_ctx = NULL; 1985 } 1986 np->tx_pkts_in_progress = 0; 1987 np->tx_change_owner = NULL; 1988 np->tx_end_flip = NULL; 1989} 1990 1991static void nv_drain_rx(struct net_device *dev) 1992{ 1993 struct fe_priv *np = netdev_priv(dev); 1994 int i; 1995 1996 for (i = 0; i < np->rx_ring_size; i++) { 1997 if (!nv_optimized(np)) { 1998 np->rx_ring.orig[i].flaglen = 0; 1999 np->rx_ring.orig[i].buf = 0; 2000 } else { 2001 np->rx_ring.ex[i].flaglen = 0; 2002 np->rx_ring.ex[i].txvlan = 0; 2003 np->rx_ring.ex[i].bufhigh = 0; 2004 np->rx_ring.ex[i].buflow = 0; 2005 } 2006 wmb(); 2007 if (np->rx_skb[i].skb) { 2008 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, 2009 (skb_end_pointer(np->rx_skb[i].skb) - 2010 np->rx_skb[i].skb->data), 2011 PCI_DMA_FROMDEVICE); 2012 dev_kfree_skb(np->rx_skb[i].skb); 2013 np->rx_skb[i].skb = NULL; 2014 } 2015 } 2016} 2017 2018static void nv_drain_rxtx(struct net_device *dev) 2019{ 2020 nv_drain_tx(dev); 2021 nv_drain_rx(dev); 2022} 2023 2024static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) 2025{ 2026 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); 2027} 2028 2029static void nv_legacybackoff_reseed(struct net_device *dev) 2030{ 2031 u8 __iomem *base = get_hwbase(dev); 2032 u32 reg; 2033 u32 low; 2034 int tx_status = 0; 2035 2036 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK; 2037 get_random_bytes(&low, sizeof(low)); 2038 reg |= low & NVREG_SLOTTIME_MASK; 2039 2040 /* Need to stop tx before change takes effect. 2041 * Caller has already gained np->lock. 2042 */ 2043 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START; 2044 if (tx_status) 2045 nv_stop_tx(dev); 2046 nv_stop_rx(dev); 2047 writel(reg, base + NvRegSlotTime); 2048 if (tx_status) 2049 nv_start_tx(dev); 2050 nv_start_rx(dev); 2051} 2052 2053/* Gear Backoff Seeds */ 2054#define BACKOFF_SEEDSET_ROWS 8 2055#define BACKOFF_SEEDSET_LFSRS 15 2056 2057/* Known Good seed sets */ 2058static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2059 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2060 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, 2061 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2062 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, 2063 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, 2064 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, 2065 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, 2066 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}}; 2067 2068static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2069 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2070 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2071 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, 2072 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2073 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2074 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2075 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2076 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}}; 2077 2078static void nv_gear_backoff_reseed(struct net_device *dev) 2079{ 2080 u8 __iomem *base = get_hwbase(dev); 2081 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed; 2082 u32 temp, seedset, combinedSeed; 2083 int i; 2084 2085 /* Setup seed for free running LFSR */ 2086 /* We are going to read the time stamp counter 3 times 2087 and swizzle bits around to increase randomness */ 2088 get_random_bytes(&miniseed1, sizeof(miniseed1)); 2089 miniseed1 &= 0x0fff; 2090 if (miniseed1 == 0) 2091 miniseed1 = 0xabc; 2092 2093 get_random_bytes(&miniseed2, sizeof(miniseed2)); 2094 miniseed2 &= 0x0fff; 2095 if (miniseed2 == 0) 2096 miniseed2 = 0xabc; 2097 miniseed2_reversed = 2098 ((miniseed2 & 0xF00) >> 8) | 2099 (miniseed2 & 0x0F0) | 2100 ((miniseed2 & 0x00F) << 8); 2101 2102 get_random_bytes(&miniseed3, sizeof(miniseed3)); 2103 miniseed3 &= 0x0fff; 2104 if (miniseed3 == 0) 2105 miniseed3 = 0xabc; 2106 miniseed3_reversed = 2107 ((miniseed3 & 0xF00) >> 8) | 2108 (miniseed3 & 0x0F0) | 2109 ((miniseed3 & 0x00F) << 8); 2110 2111 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) | 2112 (miniseed2 ^ miniseed3_reversed); 2113 2114 /* Seeds can not be zero */ 2115 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0) 2116 combinedSeed |= 0x08; 2117 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0) 2118 combinedSeed |= 0x8000; 2119 2120 /* No need to disable tx here */ 2121 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); 2122 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; 2123 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; 2124 writel(temp,base + NvRegBackOffControl); 2125 2126 /* Setup seeds for all gear LFSRs. */ 2127 get_random_bytes(&seedset, sizeof(seedset)); 2128 seedset = seedset % BACKOFF_SEEDSET_ROWS; 2129 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) 2130 { 2131 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); 2132 temp |= main_seedset[seedset][i-1] & 0x3ff; 2133 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); 2134 writel(temp, base + NvRegBackOffControl); 2135 } 2136} 2137 2138/* 2139 * nv_start_xmit: dev->hard_start_xmit function 2140 * Called with netif_tx_lock held. 2141 */ 2142static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 2143{ 2144 struct fe_priv *np = netdev_priv(dev); 2145 u32 tx_flags = 0; 2146 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 2147 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2148 unsigned int i; 2149 u32 offset = 0; 2150 u32 bcnt; 2151 u32 size = skb->len-skb->data_len; 2152 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2153 u32 empty_slots; 2154 struct ring_desc* put_tx; 2155 struct ring_desc* start_tx; 2156 struct ring_desc* prev_tx; 2157 struct nv_skb_map* prev_tx_ctx; 2158 unsigned long flags; 2159 2160 /* add fragments to entries count */ 2161 for (i = 0; i < fragments; i++) { 2162 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 2163 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2164 } 2165 2166 spin_lock_irqsave(&np->lock, flags); 2167 empty_slots = nv_get_empty_tx_slots(np); 2168 if (unlikely(empty_slots <= entries)) { 2169 netif_stop_queue(dev); 2170 np->tx_stop = 1; 2171 spin_unlock_irqrestore(&np->lock, flags); 2172 return NETDEV_TX_BUSY; 2173 } 2174 spin_unlock_irqrestore(&np->lock, flags); 2175 2176 start_tx = put_tx = np->put_tx.orig; 2177 2178 /* setup the header buffer */ 2179 do { 2180 prev_tx = put_tx; 2181 prev_tx_ctx = np->put_tx_ctx; 2182 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2183 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2184 PCI_DMA_TODEVICE); 2185 np->put_tx_ctx->dma_len = bcnt; 2186 np->put_tx_ctx->dma_single = 1; 2187 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2188 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2189 2190 tx_flags = np->tx_flags; 2191 offset += bcnt; 2192 size -= bcnt; 2193 if (unlikely(put_tx++ == np->last_tx.orig)) 2194 put_tx = np->first_tx.orig; 2195 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2196 np->put_tx_ctx = np->first_tx_ctx; 2197 } while (size); 2198 2199 /* setup the fragments */ 2200 for (i = 0; i < fragments; i++) { 2201 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2202 u32 size = frag->size; 2203 offset = 0; 2204 2205 do { 2206 prev_tx = put_tx; 2207 prev_tx_ctx = np->put_tx_ctx; 2208 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2209 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 2210 PCI_DMA_TODEVICE); 2211 np->put_tx_ctx->dma_len = bcnt; 2212 np->put_tx_ctx->dma_single = 0; 2213 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2214 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2215 2216 offset += bcnt; 2217 size -= bcnt; 2218 if (unlikely(put_tx++ == np->last_tx.orig)) 2219 put_tx = np->first_tx.orig; 2220 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2221 np->put_tx_ctx = np->first_tx_ctx; 2222 } while (size); 2223 } 2224 2225 /* set last fragment flag */ 2226 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); 2227 2228 /* save skb in this slot's context area */ 2229 prev_tx_ctx->skb = skb; 2230 2231 if (skb_is_gso(skb)) 2232 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2233 else 2234 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2235 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2236 2237 spin_lock_irqsave(&np->lock, flags); 2238 2239 /* set tx flags */ 2240 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2241 np->put_tx.orig = put_tx; 2242 2243 spin_unlock_irqrestore(&np->lock, flags); 2244 2245 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", 2246 dev->name, entries, tx_flags_extra); 2247 { 2248 int j; 2249 for (j=0; j<64; j++) { 2250 if ((j%16) == 0) 2251 dprintk("\n%03x:", j); 2252 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2253 } 2254 dprintk("\n"); 2255 } 2256 2257 dev->trans_start = jiffies; 2258 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2259 return NETDEV_TX_OK; 2260} 2261 2262static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, 2263 struct net_device *dev) 2264{ 2265 struct fe_priv *np = netdev_priv(dev); 2266 u32 tx_flags = 0; 2267 u32 tx_flags_extra; 2268 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2269 unsigned int i; 2270 u32 offset = 0; 2271 u32 bcnt; 2272 u32 size = skb->len-skb->data_len; 2273 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2274 u32 empty_slots; 2275 struct ring_desc_ex* put_tx; 2276 struct ring_desc_ex* start_tx; 2277 struct ring_desc_ex* prev_tx; 2278 struct nv_skb_map* prev_tx_ctx; 2279 struct nv_skb_map* start_tx_ctx; 2280 unsigned long flags; 2281 2282 /* add fragments to entries count */ 2283 for (i = 0; i < fragments; i++) { 2284 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 2285 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2286 } 2287 2288 spin_lock_irqsave(&np->lock, flags); 2289 empty_slots = nv_get_empty_tx_slots(np); 2290 if (unlikely(empty_slots <= entries)) { 2291 netif_stop_queue(dev); 2292 np->tx_stop = 1; 2293 spin_unlock_irqrestore(&np->lock, flags); 2294 return NETDEV_TX_BUSY; 2295 } 2296 spin_unlock_irqrestore(&np->lock, flags); 2297 2298 start_tx = put_tx = np->put_tx.ex; 2299 start_tx_ctx = np->put_tx_ctx; 2300 2301 /* setup the header buffer */ 2302 do { 2303 prev_tx = put_tx; 2304 prev_tx_ctx = np->put_tx_ctx; 2305 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2306 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2307 PCI_DMA_TODEVICE); 2308 np->put_tx_ctx->dma_len = bcnt; 2309 np->put_tx_ctx->dma_single = 1; 2310 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2311 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2312 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2313 2314 tx_flags = NV_TX2_VALID; 2315 offset += bcnt; 2316 size -= bcnt; 2317 if (unlikely(put_tx++ == np->last_tx.ex)) 2318 put_tx = np->first_tx.ex; 2319 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2320 np->put_tx_ctx = np->first_tx_ctx; 2321 } while (size); 2322 2323 /* setup the fragments */ 2324 for (i = 0; i < fragments; i++) { 2325 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2326 u32 size = frag->size; 2327 offset = 0; 2328 2329 do { 2330 prev_tx = put_tx; 2331 prev_tx_ctx = np->put_tx_ctx; 2332 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2333 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 2334 PCI_DMA_TODEVICE); 2335 np->put_tx_ctx->dma_len = bcnt; 2336 np->put_tx_ctx->dma_single = 0; 2337 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2338 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2339 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2340 2341 offset += bcnt; 2342 size -= bcnt; 2343 if (unlikely(put_tx++ == np->last_tx.ex)) 2344 put_tx = np->first_tx.ex; 2345 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2346 np->put_tx_ctx = np->first_tx_ctx; 2347 } while (size); 2348 } 2349 2350 /* set last fragment flag */ 2351 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); 2352 2353 /* save skb in this slot's context area */ 2354 prev_tx_ctx->skb = skb; 2355 2356 if (skb_is_gso(skb)) 2357 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2358 else 2359 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2360 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2361 2362 /* vlan tag */ 2363 if (likely(!np->vlangrp)) { 2364 start_tx->txvlan = 0; 2365 } else { 2366 if (vlan_tx_tag_present(skb)) 2367 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb)); 2368 else 2369 start_tx->txvlan = 0; 2370 } 2371 2372 spin_lock_irqsave(&np->lock, flags); 2373 2374 if (np->tx_limit) { 2375 /* Limit the number of outstanding tx. Setup all fragments, but 2376 * do not set the VALID bit on the first descriptor. Save a pointer 2377 * to that descriptor and also for next skb_map element. 2378 */ 2379 2380 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) { 2381 if (!np->tx_change_owner) 2382 np->tx_change_owner = start_tx_ctx; 2383 2384 /* remove VALID bit */ 2385 tx_flags &= ~NV_TX2_VALID; 2386 start_tx_ctx->first_tx_desc = start_tx; 2387 start_tx_ctx->next_tx_ctx = np->put_tx_ctx; 2388 np->tx_end_flip = np->put_tx_ctx; 2389 } else { 2390 np->tx_pkts_in_progress++; 2391 } 2392 } 2393 2394 /* set tx flags */ 2395 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2396 np->put_tx.ex = put_tx; 2397 2398 spin_unlock_irqrestore(&np->lock, flags); 2399 2400 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", 2401 dev->name, entries, tx_flags_extra); 2402 { 2403 int j; 2404 for (j=0; j<64; j++) { 2405 if ((j%16) == 0) 2406 dprintk("\n%03x:", j); 2407 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2408 } 2409 dprintk("\n"); 2410 } 2411 2412 dev->trans_start = jiffies; 2413 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2414 return NETDEV_TX_OK; 2415} 2416 2417static inline void nv_tx_flip_ownership(struct net_device *dev) 2418{ 2419 struct fe_priv *np = netdev_priv(dev); 2420 2421 np->tx_pkts_in_progress--; 2422 if (np->tx_change_owner) { 2423 np->tx_change_owner->first_tx_desc->flaglen |= 2424 cpu_to_le32(NV_TX2_VALID); 2425 np->tx_pkts_in_progress++; 2426 2427 np->tx_change_owner = np->tx_change_owner->next_tx_ctx; 2428 if (np->tx_change_owner == np->tx_end_flip) 2429 np->tx_change_owner = NULL; 2430 2431 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2432 } 2433} 2434 2435/* 2436 * nv_tx_done: check for completed packets, release the skbs. 2437 * 2438 * Caller must own np->lock. 2439 */ 2440static int nv_tx_done(struct net_device *dev, int limit) 2441{ 2442 struct fe_priv *np = netdev_priv(dev); 2443 u32 flags; 2444 int tx_work = 0; 2445 struct ring_desc* orig_get_tx = np->get_tx.orig; 2446 2447 while ((np->get_tx.orig != np->put_tx.orig) && 2448 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && 2449 (tx_work < limit)) { 2450 2451 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", 2452 dev->name, flags); 2453 2454 nv_unmap_txskb(np, np->get_tx_ctx); 2455 2456 if (np->desc_ver == DESC_VER_1) { 2457 if (flags & NV_TX_LASTPACKET) { 2458 if (flags & NV_TX_ERROR) { 2459 if (flags & NV_TX_UNDERFLOW) 2460 dev->stats.tx_fifo_errors++; 2461 if (flags & NV_TX_CARRIERLOST) 2462 dev->stats.tx_carrier_errors++; 2463 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) 2464 nv_legacybackoff_reseed(dev); 2465 dev->stats.tx_errors++; 2466 } else { 2467 dev->stats.tx_packets++; 2468 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2469 } 2470 dev_kfree_skb_any(np->get_tx_ctx->skb); 2471 np->get_tx_ctx->skb = NULL; 2472 tx_work++; 2473 } 2474 } else { 2475 if (flags & NV_TX2_LASTPACKET) { 2476 if (flags & NV_TX2_ERROR) { 2477 if (flags & NV_TX2_UNDERFLOW) 2478 dev->stats.tx_fifo_errors++; 2479 if (flags & NV_TX2_CARRIERLOST) 2480 dev->stats.tx_carrier_errors++; 2481 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2482 nv_legacybackoff_reseed(dev); 2483 dev->stats.tx_errors++; 2484 } else { 2485 dev->stats.tx_packets++; 2486 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2487 } 2488 dev_kfree_skb_any(np->get_tx_ctx->skb); 2489 np->get_tx_ctx->skb = NULL; 2490 tx_work++; 2491 } 2492 } 2493 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) 2494 np->get_tx.orig = np->first_tx.orig; 2495 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2496 np->get_tx_ctx = np->first_tx_ctx; 2497 } 2498 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { 2499 np->tx_stop = 0; 2500 netif_wake_queue(dev); 2501 } 2502 return tx_work; 2503} 2504 2505static int nv_tx_done_optimized(struct net_device *dev, int limit) 2506{ 2507 struct fe_priv *np = netdev_priv(dev); 2508 u32 flags; 2509 int tx_work = 0; 2510 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 2511 2512 while ((np->get_tx.ex != np->put_tx.ex) && 2513 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) && 2514 (tx_work < limit)) { 2515 2516 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", 2517 dev->name, flags); 2518 2519 nv_unmap_txskb(np, np->get_tx_ctx); 2520 2521 if (flags & NV_TX2_LASTPACKET) { 2522 if (!(flags & NV_TX2_ERROR)) 2523 dev->stats.tx_packets++; 2524 else { 2525 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { 2526 if (np->driver_data & DEV_HAS_GEAR_MODE) 2527 nv_gear_backoff_reseed(dev); 2528 else 2529 nv_legacybackoff_reseed(dev); 2530 } 2531 } 2532 2533 dev_kfree_skb_any(np->get_tx_ctx->skb); 2534 np->get_tx_ctx->skb = NULL; 2535 tx_work++; 2536 2537 if (np->tx_limit) { 2538 nv_tx_flip_ownership(dev); 2539 } 2540 } 2541 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 2542 np->get_tx.ex = np->first_tx.ex; 2543 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2544 np->get_tx_ctx = np->first_tx_ctx; 2545 } 2546 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { 2547 np->tx_stop = 0; 2548 netif_wake_queue(dev); 2549 } 2550 return tx_work; 2551} 2552 2553/* 2554 * nv_tx_timeout: dev->tx_timeout function 2555 * Called with netif_tx_lock held. 2556 */ 2557static void nv_tx_timeout(struct net_device *dev) 2558{ 2559 struct fe_priv *np = netdev_priv(dev); 2560 u8 __iomem *base = get_hwbase(dev); 2561 u32 status; 2562 union ring_type put_tx; 2563 int saved_tx_limit; 2564 2565 if (np->msi_flags & NV_MSI_X_ENABLED) 2566 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2567 else 2568 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2569 2570 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); 2571 2572 { 2573 int i; 2574 2575 printk(KERN_INFO "%s: Ring at %lx\n", 2576 dev->name, (unsigned long)np->ring_addr); 2577 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 2578 for (i=0;i<=np->register_size;i+= 32) { 2579 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 2580 i, 2581 readl(base + i + 0), readl(base + i + 4), 2582 readl(base + i + 8), readl(base + i + 12), 2583 readl(base + i + 16), readl(base + i + 20), 2584 readl(base + i + 24), readl(base + i + 28)); 2585 } 2586 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2587 for (i=0;i<np->tx_ring_size;i+= 4) { 2588 if (!nv_optimized(np)) { 2589 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2590 i, 2591 le32_to_cpu(np->tx_ring.orig[i].buf), 2592 le32_to_cpu(np->tx_ring.orig[i].flaglen), 2593 le32_to_cpu(np->tx_ring.orig[i+1].buf), 2594 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), 2595 le32_to_cpu(np->tx_ring.orig[i+2].buf), 2596 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), 2597 le32_to_cpu(np->tx_ring.orig[i+3].buf), 2598 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); 2599 } else { 2600 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 2601 i, 2602 le32_to_cpu(np->tx_ring.ex[i].bufhigh), 2603 le32_to_cpu(np->tx_ring.ex[i].buflow), 2604 le32_to_cpu(np->tx_ring.ex[i].flaglen), 2605 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), 2606 le32_to_cpu(np->tx_ring.ex[i+1].buflow), 2607 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), 2608 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), 2609 le32_to_cpu(np->tx_ring.ex[i+2].buflow), 2610 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), 2611 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), 2612 le32_to_cpu(np->tx_ring.ex[i+3].buflow), 2613 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); 2614 } 2615 } 2616 } 2617 2618 spin_lock_irq(&np->lock); 2619 2620 /* 1) stop tx engine */ 2621 nv_stop_tx(dev); 2622 2623 /* 2) complete any outstanding tx and do not give HW any limited tx pkts */ 2624 saved_tx_limit = np->tx_limit; 2625 np->tx_limit = 0; /* prevent giving HW any limited pkts */ 2626 np->tx_stop = 0; /* prevent waking tx queue */ 2627 if (!nv_optimized(np)) 2628 nv_tx_done(dev, np->tx_ring_size); 2629 else 2630 nv_tx_done_optimized(dev, np->tx_ring_size); 2631 2632 /* save current HW postion */ 2633 if (np->tx_change_owner) 2634 put_tx.ex = np->tx_change_owner->first_tx_desc; 2635 else 2636 put_tx = np->put_tx; 2637 2638 /* 3) clear all tx state */ 2639 nv_drain_tx(dev); 2640 nv_init_tx(dev); 2641 2642 /* 4) restore state to current HW position */ 2643 np->get_tx = np->put_tx = put_tx; 2644 np->tx_limit = saved_tx_limit; 2645 2646 /* 5) restart tx engine */ 2647 nv_start_tx(dev); 2648 netif_wake_queue(dev); 2649 spin_unlock_irq(&np->lock); 2650} 2651 2652/* 2653 * Called when the nic notices a mismatch between the actual data len on the 2654 * wire and the len indicated in the 802 header 2655 */ 2656static int nv_getlen(struct net_device *dev, void *packet, int datalen) 2657{ 2658 int hdrlen; /* length of the 802 header */ 2659 int protolen; /* length as stored in the proto field */ 2660 2661 /* 1) calculate len according to header */ 2662 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2663 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 2664 hdrlen = VLAN_HLEN; 2665 } else { 2666 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 2667 hdrlen = ETH_HLEN; 2668 } 2669 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", 2670 dev->name, datalen, protolen, hdrlen); 2671 if (protolen > ETH_DATA_LEN) 2672 return datalen; /* Value in proto field not a len, no checks possible */ 2673 2674 protolen += hdrlen; 2675 /* consistency checks: */ 2676 if (datalen > ETH_ZLEN) { 2677 if (datalen >= protolen) { 2678 /* more data on wire than in 802 header, trim of 2679 * additional data. 2680 */ 2681 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2682 dev->name, protolen); 2683 return protolen; 2684 } else { 2685 /* less data on wire than mentioned in header. 2686 * Discard the packet. 2687 */ 2688 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", 2689 dev->name); 2690 return -1; 2691 } 2692 } else { 2693 /* short packet. Accept only if 802 values are also short */ 2694 if (protolen > ETH_ZLEN) { 2695 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", 2696 dev->name); 2697 return -1; 2698 } 2699 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2700 dev->name, datalen); 2701 return datalen; 2702 } 2703} 2704 2705static int nv_rx_process(struct net_device *dev, int limit) 2706{ 2707 struct fe_priv *np = netdev_priv(dev); 2708 u32 flags; 2709 int rx_work = 0; 2710 struct sk_buff *skb; 2711 int len; 2712 2713 while((np->get_rx.orig != np->put_rx.orig) && 2714 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2715 (rx_work < limit)) { 2716 2717 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", 2718 dev->name, flags); 2719 2720 /* 2721 * the packet is for us - immediately tear down the pci mapping. 2722 * TODO: check if a prefetch of the first cacheline improves 2723 * the performance. 2724 */ 2725 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2726 np->get_rx_ctx->dma_len, 2727 PCI_DMA_FROMDEVICE); 2728 skb = np->get_rx_ctx->skb; 2729 np->get_rx_ctx->skb = NULL; 2730 2731 { 2732 int j; 2733 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2734 for (j=0; j<64; j++) { 2735 if ((j%16) == 0) 2736 dprintk("\n%03x:", j); 2737 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2738 } 2739 dprintk("\n"); 2740 } 2741 /* look at what we actually got: */ 2742 if (np->desc_ver == DESC_VER_1) { 2743 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2744 len = flags & LEN_MASK_V1; 2745 if (unlikely(flags & NV_RX_ERROR)) { 2746 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { 2747 len = nv_getlen(dev, skb->data, len); 2748 if (len < 0) { 2749 dev->stats.rx_errors++; 2750 dev_kfree_skb(skb); 2751 goto next_pkt; 2752 } 2753 } 2754 /* framing errors are soft errors */ 2755 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { 2756 if (flags & NV_RX_SUBSTRACT1) { 2757 len--; 2758 } 2759 } 2760 /* the rest are hard errors */ 2761 else { 2762 if (flags & NV_RX_MISSEDFRAME) 2763 dev->stats.rx_missed_errors++; 2764 if (flags & NV_RX_CRCERR) 2765 dev->stats.rx_crc_errors++; 2766 if (flags & NV_RX_OVERFLOW) 2767 dev->stats.rx_over_errors++; 2768 dev->stats.rx_errors++; 2769 dev_kfree_skb(skb); 2770 goto next_pkt; 2771 } 2772 } 2773 } else { 2774 dev_kfree_skb(skb); 2775 goto next_pkt; 2776 } 2777 } else { 2778 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2779 len = flags & LEN_MASK_V2; 2780 if (unlikely(flags & NV_RX2_ERROR)) { 2781 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2782 len = nv_getlen(dev, skb->data, len); 2783 if (len < 0) { 2784 dev->stats.rx_errors++; 2785 dev_kfree_skb(skb); 2786 goto next_pkt; 2787 } 2788 } 2789 /* framing errors are soft errors */ 2790 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2791 if (flags & NV_RX2_SUBSTRACT1) { 2792 len--; 2793 } 2794 } 2795 /* the rest are hard errors */ 2796 else { 2797 if (flags & NV_RX2_CRCERR) 2798 dev->stats.rx_crc_errors++; 2799 if (flags & NV_RX2_OVERFLOW) 2800 dev->stats.rx_over_errors++; 2801 dev->stats.rx_errors++; 2802 dev_kfree_skb(skb); 2803 goto next_pkt; 2804 } 2805 } 2806 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2807 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2808 skb->ip_summed = CHECKSUM_UNNECESSARY; 2809 } else { 2810 dev_kfree_skb(skb); 2811 goto next_pkt; 2812 } 2813 } 2814 /* got a valid packet - forward it to the network core */ 2815 skb_put(skb, len); 2816 skb->protocol = eth_type_trans(skb, dev); 2817 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", 2818 dev->name, len, skb->protocol); 2819#ifdef CONFIG_FORCEDETH_NAPI 2820 netif_receive_skb(skb); 2821#else 2822 netif_rx(skb); 2823#endif 2824 dev->stats.rx_packets++; 2825 dev->stats.rx_bytes += len; 2826next_pkt: 2827 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2828 np->get_rx.orig = np->first_rx.orig; 2829 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2830 np->get_rx_ctx = np->first_rx_ctx; 2831 2832 rx_work++; 2833 } 2834 2835 return rx_work; 2836} 2837 2838static int nv_rx_process_optimized(struct net_device *dev, int limit) 2839{ 2840 struct fe_priv *np = netdev_priv(dev); 2841 u32 flags; 2842 u32 vlanflags = 0; 2843 int rx_work = 0; 2844 struct sk_buff *skb; 2845 int len; 2846 2847 while((np->get_rx.ex != np->put_rx.ex) && 2848 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2849 (rx_work < limit)) { 2850 2851 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", 2852 dev->name, flags); 2853 2854 /* 2855 * the packet is for us - immediately tear down the pci mapping. 2856 * TODO: check if a prefetch of the first cacheline improves 2857 * the performance. 2858 */ 2859 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2860 np->get_rx_ctx->dma_len, 2861 PCI_DMA_FROMDEVICE); 2862 skb = np->get_rx_ctx->skb; 2863 np->get_rx_ctx->skb = NULL; 2864 2865 { 2866 int j; 2867 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2868 for (j=0; j<64; j++) { 2869 if ((j%16) == 0) 2870 dprintk("\n%03x:", j); 2871 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2872 } 2873 dprintk("\n"); 2874 } 2875 /* look at what we actually got: */ 2876 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2877 len = flags & LEN_MASK_V2; 2878 if (unlikely(flags & NV_RX2_ERROR)) { 2879 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2880 len = nv_getlen(dev, skb->data, len); 2881 if (len < 0) { 2882 dev_kfree_skb(skb); 2883 goto next_pkt; 2884 } 2885 } 2886 /* framing errors are soft errors */ 2887 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2888 if (flags & NV_RX2_SUBSTRACT1) { 2889 len--; 2890 } 2891 } 2892 /* the rest are hard errors */ 2893 else { 2894 dev_kfree_skb(skb); 2895 goto next_pkt; 2896 } 2897 } 2898 2899 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2900 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2901 skb->ip_summed = CHECKSUM_UNNECESSARY; 2902 2903 /* got a valid packet - forward it to the network core */ 2904 skb_put(skb, len); 2905 skb->protocol = eth_type_trans(skb, dev); 2906 prefetch(skb->data); 2907 2908 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n", 2909 dev->name, len, skb->protocol); 2910 2911 if (likely(!np->vlangrp)) { 2912#ifdef CONFIG_FORCEDETH_NAPI 2913 netif_receive_skb(skb); 2914#else 2915 netif_rx(skb); 2916#endif 2917 } else { 2918 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2919 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2920#ifdef CONFIG_FORCEDETH_NAPI 2921 vlan_hwaccel_receive_skb(skb, np->vlangrp, 2922 vlanflags & NV_RX3_VLAN_TAG_MASK); 2923#else 2924 vlan_hwaccel_rx(skb, np->vlangrp, 2925 vlanflags & NV_RX3_VLAN_TAG_MASK); 2926#endif 2927 } else { 2928#ifdef CONFIG_FORCEDETH_NAPI 2929 netif_receive_skb(skb); 2930#else 2931 netif_rx(skb); 2932#endif 2933 } 2934 } 2935 2936 dev->stats.rx_packets++; 2937 dev->stats.rx_bytes += len; 2938 } else { 2939 dev_kfree_skb(skb); 2940 } 2941next_pkt: 2942 if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) 2943 np->get_rx.ex = np->first_rx.ex; 2944 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2945 np->get_rx_ctx = np->first_rx_ctx; 2946 2947 rx_work++; 2948 } 2949 2950 return rx_work; 2951} 2952 2953static void set_bufsize(struct net_device *dev) 2954{ 2955 struct fe_priv *np = netdev_priv(dev); 2956 2957 if (dev->mtu <= ETH_DATA_LEN) 2958 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; 2959 else 2960 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; 2961} 2962 2963/* 2964 * nv_change_mtu: dev->change_mtu function 2965 * Called with dev_base_lock held for read. 2966 */ 2967static int nv_change_mtu(struct net_device *dev, int new_mtu) 2968{ 2969 struct fe_priv *np = netdev_priv(dev); 2970 int old_mtu; 2971 2972 if (new_mtu < 64 || new_mtu > np->pkt_limit) 2973 return -EINVAL; 2974 2975 old_mtu = dev->mtu; 2976 dev->mtu = new_mtu; 2977 2978 /* return early if the buffer sizes will not change */ 2979 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) 2980 return 0; 2981 if (old_mtu == new_mtu) 2982 return 0; 2983 2984 /* synchronized against open : rtnl_lock() held by caller */ 2985 if (netif_running(dev)) { 2986 u8 __iomem *base = get_hwbase(dev); 2987 /* 2988 * It seems that the nic preloads valid ring entries into an 2989 * internal buffer. The procedure for flushing everything is 2990 * guessed, there is probably a simpler approach. 2991 * Changing the MTU is a rare event, it shouldn't matter. 2992 */ 2993 nv_disable_irq(dev); 2994 nv_napi_disable(dev); 2995 netif_tx_lock_bh(dev); 2996 netif_addr_lock(dev); 2997 spin_lock(&np->lock); 2998 /* stop engines */ 2999 nv_stop_rxtx(dev); 3000 nv_txrx_reset(dev); 3001 /* drain rx queue */ 3002 nv_drain_rxtx(dev); 3003 /* reinit driver view of the rx queue */ 3004 set_bufsize(dev); 3005 if (nv_init_ring(dev)) { 3006 if (!np->in_shutdown) 3007 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3008 } 3009 /* reinit nic view of the rx queue */ 3010 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3011 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3012 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3013 base + NvRegRingSizes); 3014 pci_push(base); 3015 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3016 pci_push(base); 3017 3018 /* restart rx engine */ 3019 nv_start_rxtx(dev); 3020 spin_unlock(&np->lock); 3021 netif_addr_unlock(dev); 3022 netif_tx_unlock_bh(dev); 3023 nv_napi_enable(dev); 3024 nv_enable_irq(dev); 3025 } 3026 return 0; 3027} 3028 3029static void nv_copy_mac_to_hw(struct net_device *dev) 3030{ 3031 u8 __iomem *base = get_hwbase(dev); 3032 u32 mac[2]; 3033 3034 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 3035 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 3036 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 3037 3038 writel(mac[0], base + NvRegMacAddrA); 3039 writel(mac[1], base + NvRegMacAddrB); 3040} 3041 3042/* 3043 * nv_set_mac_address: dev->set_mac_address function 3044 * Called with rtnl_lock() held. 3045 */ 3046static int nv_set_mac_address(struct net_device *dev, void *addr) 3047{ 3048 struct fe_priv *np = netdev_priv(dev); 3049 struct sockaddr *macaddr = (struct sockaddr*)addr; 3050 3051 if (!is_valid_ether_addr(macaddr->sa_data)) 3052 return -EADDRNOTAVAIL; 3053 3054 /* synchronized against open : rtnl_lock() held by caller */ 3055 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 3056 3057 if (netif_running(dev)) { 3058 netif_tx_lock_bh(dev); 3059 netif_addr_lock(dev); 3060 spin_lock_irq(&np->lock); 3061 3062 /* stop rx engine */ 3063 nv_stop_rx(dev); 3064 3065 /* set mac address */ 3066 nv_copy_mac_to_hw(dev); 3067 3068 /* restart rx engine */ 3069 nv_start_rx(dev); 3070 spin_unlock_irq(&np->lock); 3071 netif_addr_unlock(dev); 3072 netif_tx_unlock_bh(dev); 3073 } else { 3074 nv_copy_mac_to_hw(dev); 3075 } 3076 return 0; 3077} 3078 3079/* 3080 * nv_set_multicast: dev->set_multicast function 3081 * Called with netif_tx_lock held. 3082 */ 3083static void nv_set_multicast(struct net_device *dev) 3084{ 3085 struct fe_priv *np = netdev_priv(dev); 3086 u8 __iomem *base = get_hwbase(dev); 3087 u32 addr[2]; 3088 u32 mask[2]; 3089 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; 3090 3091 memset(addr, 0, sizeof(addr)); 3092 memset(mask, 0, sizeof(mask)); 3093 3094 if (dev->flags & IFF_PROMISC) { 3095 pff |= NVREG_PFF_PROMISC; 3096 } else { 3097 pff |= NVREG_PFF_MYADDR; 3098 3099 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { 3100 u32 alwaysOff[2]; 3101 u32 alwaysOn[2]; 3102 3103 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; 3104 if (dev->flags & IFF_ALLMULTI) { 3105 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 3106 } else { 3107 struct dev_mc_list *walk; 3108 3109 netdev_for_each_mc_addr(walk, dev) { 3110 u32 a, b; 3111 a = le32_to_cpu(*(__le32 *) walk->dmi_addr); 3112 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4])); 3113 alwaysOn[0] &= a; 3114 alwaysOff[0] &= ~a; 3115 alwaysOn[1] &= b; 3116 alwaysOff[1] &= ~b; 3117 } 3118 } 3119 addr[0] = alwaysOn[0]; 3120 addr[1] = alwaysOn[1]; 3121 mask[0] = alwaysOn[0] | alwaysOff[0]; 3122 mask[1] = alwaysOn[1] | alwaysOff[1]; 3123 } else { 3124 mask[0] = NVREG_MCASTMASKA_NONE; 3125 mask[1] = NVREG_MCASTMASKB_NONE; 3126 } 3127 } 3128 addr[0] |= NVREG_MCASTADDRA_FORCE; 3129 pff |= NVREG_PFF_ALWAYS; 3130 spin_lock_irq(&np->lock); 3131 nv_stop_rx(dev); 3132 writel(addr[0], base + NvRegMulticastAddrA); 3133 writel(addr[1], base + NvRegMulticastAddrB); 3134 writel(mask[0], base + NvRegMulticastMaskA); 3135 writel(mask[1], base + NvRegMulticastMaskB); 3136 writel(pff, base + NvRegPacketFilterFlags); 3137 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", 3138 dev->name); 3139 nv_start_rx(dev); 3140 spin_unlock_irq(&np->lock); 3141} 3142 3143static void nv_update_pause(struct net_device *dev, u32 pause_flags) 3144{ 3145 struct fe_priv *np = netdev_priv(dev); 3146 u8 __iomem *base = get_hwbase(dev); 3147 3148 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); 3149 3150 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { 3151 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; 3152 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { 3153 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); 3154 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3155 } else { 3156 writel(pff, base + NvRegPacketFilterFlags); 3157 } 3158 } 3159 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 3160 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 3161 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 3162 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; 3163 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) 3164 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; 3165 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { 3166 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; 3167 /* limit the number of tx pause frames to a default of 8 */ 3168 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit); 3169 } 3170 writel(pause_enable, base + NvRegTxPauseFrame); 3171 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 3172 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3173 } else { 3174 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 3175 writel(regmisc, base + NvRegMisc1); 3176 } 3177 } 3178} 3179 3180/** 3181 * nv_update_linkspeed: Setup the MAC according to the link partner 3182 * @dev: Network device to be configured 3183 * 3184 * The function queries the PHY and checks if there is a link partner. 3185 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is 3186 * set to 10 MBit HD. 3187 * 3188 * The function returns 0 if there is no link partner and 1 if there is 3189 * a good link partner. 3190 */ 3191static int nv_update_linkspeed(struct net_device *dev) 3192{ 3193 struct fe_priv *np = netdev_priv(dev); 3194 u8 __iomem *base = get_hwbase(dev); 3195 int adv = 0; 3196 int lpa = 0; 3197 int adv_lpa, adv_pause, lpa_pause; 3198 int newls = np->linkspeed; 3199 int newdup = np->duplex; 3200 int mii_status; 3201 int retval = 0; 3202 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 3203 u32 txrxFlags = 0; 3204 u32 phy_exp; 3205 3206 /* BMSR_LSTATUS is latched, read it twice: 3207 * we want the current value. 3208 */ 3209 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3210 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3211 3212 if (!(mii_status & BMSR_LSTATUS)) { 3213 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", 3214 dev->name); 3215 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3216 newdup = 0; 3217 retval = 0; 3218 goto set_speed; 3219 } 3220 3221 if (np->autoneg == 0) { 3222 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", 3223 dev->name, np->fixed_mode); 3224 if (np->fixed_mode & LPA_100FULL) { 3225 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3226 newdup = 1; 3227 } else if (np->fixed_mode & LPA_100HALF) { 3228 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3229 newdup = 0; 3230 } else if (np->fixed_mode & LPA_10FULL) { 3231 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3232 newdup = 1; 3233 } else { 3234 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3235 newdup = 0; 3236 } 3237 retval = 1; 3238 goto set_speed; 3239 } 3240 /* check auto negotiation is complete */ 3241 if (!(mii_status & BMSR_ANEGCOMPLETE)) { 3242 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ 3243 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3244 newdup = 0; 3245 retval = 0; 3246 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); 3247 goto set_speed; 3248 } 3249 3250 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3251 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 3252 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", 3253 dev->name, adv, lpa); 3254 3255 retval = 1; 3256 if (np->gigabit == PHY_GIGABIT) { 3257 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3258 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); 3259 3260 if ((control_1000 & ADVERTISE_1000FULL) && 3261 (status_1000 & LPA_1000FULL)) { 3262 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", 3263 dev->name); 3264 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 3265 newdup = 1; 3266 goto set_speed; 3267 } 3268 } 3269 3270 /* FIXME: handle parallel detection properly */ 3271 adv_lpa = lpa & adv; 3272 if (adv_lpa & LPA_100FULL) { 3273 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3274 newdup = 1; 3275 } else if (adv_lpa & LPA_100HALF) { 3276 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3277 newdup = 0; 3278 } else if (adv_lpa & LPA_10FULL) { 3279 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3280 newdup = 1; 3281 } else if (adv_lpa & LPA_10HALF) { 3282 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3283 newdup = 0; 3284 } else { 3285 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); 3286 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3287 newdup = 0; 3288 } 3289 3290set_speed: 3291 if (np->duplex == newdup && np->linkspeed == newls) 3292 return retval; 3293 3294 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", 3295 dev->name, np->linkspeed, np->duplex, newls, newdup); 3296 3297 np->duplex = newdup; 3298 np->linkspeed = newls; 3299 3300 /* The transmitter and receiver must be restarted for safe update */ 3301 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) { 3302 txrxFlags |= NV_RESTART_TX; 3303 nv_stop_tx(dev); 3304 } 3305 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { 3306 txrxFlags |= NV_RESTART_RX; 3307 nv_stop_rx(dev); 3308 } 3309 3310 if (np->gigabit == PHY_GIGABIT) { 3311 phyreg = readl(base + NvRegSlotTime); 3312 phyreg &= ~(0x3FF00); 3313 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) || 3314 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)) 3315 phyreg |= NVREG_SLOTTIME_10_100_FULL; 3316 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 3317 phyreg |= NVREG_SLOTTIME_1000_FULL; 3318 writel(phyreg, base + NvRegSlotTime); 3319 } 3320 3321 phyreg = readl(base + NvRegPhyInterface); 3322 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 3323 if (np->duplex == 0) 3324 phyreg |= PHY_HALF; 3325 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 3326 phyreg |= PHY_100; 3327 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3328 phyreg |= PHY_1000; 3329 writel(phyreg, base + NvRegPhyInterface); 3330 3331 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ 3332 if (phyreg & PHY_RGMII) { 3333 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { 3334 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 3335 } else { 3336 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { 3337 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) 3338 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10; 3339 else 3340 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100; 3341 } else { 3342 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 3343 } 3344 } 3345 } else { 3346 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) 3347 txreg = NVREG_TX_DEFERRAL_MII_STRETCH; 3348 else 3349 txreg = NVREG_TX_DEFERRAL_DEFAULT; 3350 } 3351 writel(txreg, base + NvRegTxDeferral); 3352 3353 if (np->desc_ver == DESC_VER_1) { 3354 txreg = NVREG_TX_WM_DESC1_DEFAULT; 3355 } else { 3356 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3357 txreg = NVREG_TX_WM_DESC2_3_1000; 3358 else 3359 txreg = NVREG_TX_WM_DESC2_3_DEFAULT; 3360 } 3361 writel(txreg, base + NvRegTxWatermark); 3362 3363 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 3364 base + NvRegMisc1); 3365 pci_push(base); 3366 writel(np->linkspeed, base + NvRegLinkSpeed); 3367 pci_push(base); 3368 3369 pause_flags = 0; 3370 /* setup pause frame */ 3371 if (np->duplex != 0) { 3372 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3373 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 3374 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 3375 3376 switch (adv_pause) { 3377 case ADVERTISE_PAUSE_CAP: 3378 if (lpa_pause & LPA_PAUSE_CAP) { 3379 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3380 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3381 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3382 } 3383 break; 3384 case ADVERTISE_PAUSE_ASYM: 3385 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 3386 { 3387 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3388 } 3389 break; 3390 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: 3391 if (lpa_pause & LPA_PAUSE_CAP) 3392 { 3393 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3394 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3395 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3396 } 3397 if (lpa_pause == LPA_PAUSE_ASYM) 3398 { 3399 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3400 } 3401 break; 3402 } 3403 } else { 3404 pause_flags = np->pause_flags; 3405 } 3406 } 3407 nv_update_pause(dev, pause_flags); 3408 3409 if (txrxFlags & NV_RESTART_TX) 3410 nv_start_tx(dev); 3411 if (txrxFlags & NV_RESTART_RX) 3412 nv_start_rx(dev); 3413 3414 return retval; 3415} 3416 3417static void nv_linkchange(struct net_device *dev) 3418{ 3419 if (nv_update_linkspeed(dev)) { 3420 if (!netif_carrier_ok(dev)) { 3421 netif_carrier_on(dev); 3422 printk(KERN_INFO "%s: link up.\n", dev->name); 3423 nv_txrx_gate(dev, false); 3424 nv_start_rx(dev); 3425 } 3426 } else { 3427 if (netif_carrier_ok(dev)) { 3428 netif_carrier_off(dev); 3429 printk(KERN_INFO "%s: link down.\n", dev->name); 3430 nv_txrx_gate(dev, true); 3431 nv_stop_rx(dev); 3432 } 3433 } 3434} 3435 3436static void nv_link_irq(struct net_device *dev) 3437{ 3438 u8 __iomem *base = get_hwbase(dev); 3439 u32 miistat; 3440 3441 miistat = readl(base + NvRegMIIStatus); 3442 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus); 3443 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); 3444 3445 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 3446 nv_linkchange(dev); 3447 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); 3448} 3449 3450static void nv_msi_workaround(struct fe_priv *np) 3451{ 3452 3453 /* Need to toggle the msi irq mask within the ethernet device, 3454 * otherwise, future interrupts will not be detected. 3455 */ 3456 if (np->msi_flags & NV_MSI_ENABLED) { 3457 u8 __iomem *base = np->base; 3458 3459 writel(0, base + NvRegMSIIrqMask); 3460 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 3461 } 3462} 3463 3464static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work) 3465{ 3466 struct fe_priv *np = netdev_priv(dev); 3467 3468 if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) { 3469 if (total_work > NV_DYNAMIC_THRESHOLD) { 3470 /* transition to poll based interrupts */ 3471 np->quiet_count = 0; 3472 if (np->irqmask != NVREG_IRQMASK_CPU) { 3473 np->irqmask = NVREG_IRQMASK_CPU; 3474 return 1; 3475 } 3476 } else { 3477 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) { 3478 np->quiet_count++; 3479 } else { 3480 /* reached a period of low activity, switch 3481 to per tx/rx packet interrupts */ 3482 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) { 3483 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 3484 return 1; 3485 } 3486 } 3487 } 3488 } 3489 return 0; 3490} 3491 3492static irqreturn_t nv_nic_irq(int foo, void *data) 3493{ 3494 struct net_device *dev = (struct net_device *) data; 3495 struct fe_priv *np = netdev_priv(dev); 3496 u8 __iomem *base = get_hwbase(dev); 3497#ifndef CONFIG_FORCEDETH_NAPI 3498 int total_work = 0; 3499 int loop_count = 0; 3500#endif 3501 3502 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 3503 3504 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3505 np->events = readl(base + NvRegIrqStatus); 3506 writel(np->events, base + NvRegIrqStatus); 3507 } else { 3508 np->events = readl(base + NvRegMSIXIrqStatus); 3509 writel(np->events, base + NvRegMSIXIrqStatus); 3510 } 3511 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events); 3512 if (!(np->events & np->irqmask)) 3513 return IRQ_NONE; 3514 3515 nv_msi_workaround(np); 3516 3517#ifdef CONFIG_FORCEDETH_NAPI 3518 if (napi_schedule_prep(&np->napi)) { 3519 /* 3520 * Disable further irq's (msix not enabled with napi) 3521 */ 3522 writel(0, base + NvRegIrqMask); 3523 __napi_schedule(&np->napi); 3524 } 3525 3526#else 3527 do 3528 { 3529 int work = 0; 3530 if ((work = nv_rx_process(dev, RX_WORK_PER_LOOP))) { 3531 if (unlikely(nv_alloc_rx(dev))) { 3532 spin_lock(&np->lock); 3533 if (!np->in_shutdown) 3534 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3535 spin_unlock(&np->lock); 3536 } 3537 } 3538 3539 spin_lock(&np->lock); 3540 work += nv_tx_done(dev, TX_WORK_PER_LOOP); 3541 spin_unlock(&np->lock); 3542 3543 if (!work) 3544 break; 3545 3546 total_work += work; 3547 3548 loop_count++; 3549 } 3550 while (loop_count < max_interrupt_work); 3551 3552 if (nv_change_interrupt_mode(dev, total_work)) { 3553 /* setup new irq mask */ 3554 writel(np->irqmask, base + NvRegIrqMask); 3555 } 3556 3557 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3558 spin_lock(&np->lock); 3559 nv_link_irq(dev); 3560 spin_unlock(&np->lock); 3561 } 3562 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3563 spin_lock(&np->lock); 3564 nv_linkchange(dev); 3565 spin_unlock(&np->lock); 3566 np->link_timeout = jiffies + LINK_TIMEOUT; 3567 } 3568 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { 3569 spin_lock(&np->lock); 3570 /* disable interrupts on the nic */ 3571 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3572 writel(0, base + NvRegIrqMask); 3573 else 3574 writel(np->irqmask, base + NvRegIrqMask); 3575 pci_push(base); 3576 3577 if (!np->in_shutdown) { 3578 np->nic_poll_irq = np->irqmask; 3579 np->recover_error = 1; 3580 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3581 } 3582 spin_unlock(&np->lock); 3583 } 3584#endif 3585 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3586 3587 return IRQ_HANDLED; 3588} 3589 3590/** 3591 * All _optimized functions are used to help increase performance 3592 * (reduce CPU and increase throughput). They use descripter version 3, 3593 * compiler directives, and reduce memory accesses. 3594 */ 3595static irqreturn_t nv_nic_irq_optimized(int foo, void *data) 3596{ 3597 struct net_device *dev = (struct net_device *) data; 3598 struct fe_priv *np = netdev_priv(dev); 3599 u8 __iomem *base = get_hwbase(dev); 3600#ifndef CONFIG_FORCEDETH_NAPI 3601 int total_work = 0; 3602 int loop_count = 0; 3603#endif 3604 3605 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); 3606 3607 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3608 np->events = readl(base + NvRegIrqStatus); 3609 writel(np->events, base + NvRegIrqStatus); 3610 } else { 3611 np->events = readl(base + NvRegMSIXIrqStatus); 3612 writel(np->events, base + NvRegMSIXIrqStatus); 3613 } 3614 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events); 3615 if (!(np->events & np->irqmask)) 3616 return IRQ_NONE; 3617 3618 nv_msi_workaround(np); 3619 3620#ifdef CONFIG_FORCEDETH_NAPI 3621 if (napi_schedule_prep(&np->napi)) { 3622 /* 3623 * Disable further irq's (msix not enabled with napi) 3624 */ 3625 writel(0, base + NvRegIrqMask); 3626 __napi_schedule(&np->napi); 3627 } 3628#else 3629 do 3630 { 3631 int work = 0; 3632 if ((work = nv_rx_process_optimized(dev, RX_WORK_PER_LOOP))) { 3633 if (unlikely(nv_alloc_rx_optimized(dev))) { 3634 spin_lock(&np->lock); 3635 if (!np->in_shutdown) 3636 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3637 spin_unlock(&np->lock); 3638 } 3639 } 3640 3641 spin_lock(&np->lock); 3642 work += nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3643 spin_unlock(&np->lock); 3644 3645 if (!work) 3646 break; 3647 3648 total_work += work; 3649 3650 loop_count++; 3651 } 3652 while (loop_count < max_interrupt_work); 3653 3654 if (nv_change_interrupt_mode(dev, total_work)) { 3655 /* setup new irq mask */ 3656 writel(np->irqmask, base + NvRegIrqMask); 3657 } 3658 3659 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3660 spin_lock(&np->lock); 3661 nv_link_irq(dev); 3662 spin_unlock(&np->lock); 3663 } 3664 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3665 spin_lock(&np->lock); 3666 nv_linkchange(dev); 3667 spin_unlock(&np->lock); 3668 np->link_timeout = jiffies + LINK_TIMEOUT; 3669 } 3670 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { 3671 spin_lock(&np->lock); 3672 /* disable interrupts on the nic */ 3673 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3674 writel(0, base + NvRegIrqMask); 3675 else 3676 writel(np->irqmask, base + NvRegIrqMask); 3677 pci_push(base); 3678 3679 if (!np->in_shutdown) { 3680 np->nic_poll_irq = np->irqmask; 3681 np->recover_error = 1; 3682 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3683 } 3684 spin_unlock(&np->lock); 3685 } 3686 3687#endif 3688 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); 3689 3690 return IRQ_HANDLED; 3691} 3692 3693static irqreturn_t nv_nic_irq_tx(int foo, void *data) 3694{ 3695 struct net_device *dev = (struct net_device *) data; 3696 struct fe_priv *np = netdev_priv(dev); 3697 u8 __iomem *base = get_hwbase(dev); 3698 u32 events; 3699 int i; 3700 unsigned long flags; 3701 3702 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 3703 3704 for (i=0; ; i++) { 3705 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3706 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3707 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 3708 if (!(events & np->irqmask)) 3709 break; 3710 3711 spin_lock_irqsave(&np->lock, flags); 3712 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3713 spin_unlock_irqrestore(&np->lock, flags); 3714 3715 if (unlikely(i > max_interrupt_work)) { 3716 spin_lock_irqsave(&np->lock, flags); 3717 /* disable interrupts on the nic */ 3718 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 3719 pci_push(base); 3720 3721 if (!np->in_shutdown) { 3722 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; 3723 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3724 } 3725 spin_unlock_irqrestore(&np->lock, flags); 3726 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 3727 break; 3728 } 3729 3730 } 3731 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); 3732 3733 return IRQ_RETVAL(i); 3734} 3735 3736#ifdef CONFIG_FORCEDETH_NAPI 3737static int nv_napi_poll(struct napi_struct *napi, int budget) 3738{ 3739 struct fe_priv *np = container_of(napi, struct fe_priv, napi); 3740 struct net_device *dev = np->dev; 3741 u8 __iomem *base = get_hwbase(dev); 3742 unsigned long flags; 3743 int retcode; 3744 int tx_work, rx_work; 3745 3746 if (!nv_optimized(np)) { 3747 spin_lock_irqsave(&np->lock, flags); 3748 tx_work = nv_tx_done(dev, np->tx_ring_size); 3749 spin_unlock_irqrestore(&np->lock, flags); 3750 3751 rx_work = nv_rx_process(dev, budget); 3752 retcode = nv_alloc_rx(dev); 3753 } else { 3754 spin_lock_irqsave(&np->lock, flags); 3755 tx_work = nv_tx_done_optimized(dev, np->tx_ring_size); 3756 spin_unlock_irqrestore(&np->lock, flags); 3757 3758 rx_work = nv_rx_process_optimized(dev, budget); 3759 retcode = nv_alloc_rx_optimized(dev); 3760 } 3761 3762 if (retcode) { 3763 spin_lock_irqsave(&np->lock, flags); 3764 if (!np->in_shutdown) 3765 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3766 spin_unlock_irqrestore(&np->lock, flags); 3767 } 3768 3769 nv_change_interrupt_mode(dev, tx_work + rx_work); 3770 3771 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3772 spin_lock_irqsave(&np->lock, flags); 3773 nv_link_irq(dev); 3774 spin_unlock_irqrestore(&np->lock, flags); 3775 } 3776 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3777 spin_lock_irqsave(&np->lock, flags); 3778 nv_linkchange(dev); 3779 spin_unlock_irqrestore(&np->lock, flags); 3780 np->link_timeout = jiffies + LINK_TIMEOUT; 3781 } 3782 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { 3783 spin_lock_irqsave(&np->lock, flags); 3784 if (!np->in_shutdown) { 3785 np->nic_poll_irq = np->irqmask; 3786 np->recover_error = 1; 3787 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3788 } 3789 spin_unlock_irqrestore(&np->lock, flags); 3790 napi_complete(napi); 3791 return rx_work; 3792 } 3793 3794 if (rx_work < budget) { 3795 /* re-enable interrupts 3796 (msix not enabled in napi) */ 3797 napi_complete(napi); 3798 3799 writel(np->irqmask, base + NvRegIrqMask); 3800 } 3801 return rx_work; 3802} 3803#endif 3804 3805static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3806{ 3807 struct net_device *dev = (struct net_device *) data; 3808 struct fe_priv *np = netdev_priv(dev); 3809 u8 __iomem *base = get_hwbase(dev); 3810 u32 events; 3811 int i; 3812 unsigned long flags; 3813 3814 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 3815 3816 for (i=0; ; i++) { 3817 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3818 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3819 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 3820 if (!(events & np->irqmask)) 3821 break; 3822 3823 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3824 if (unlikely(nv_alloc_rx_optimized(dev))) { 3825 spin_lock_irqsave(&np->lock, flags); 3826 if (!np->in_shutdown) 3827 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3828 spin_unlock_irqrestore(&np->lock, flags); 3829 } 3830 } 3831 3832 if (unlikely(i > max_interrupt_work)) { 3833 spin_lock_irqsave(&np->lock, flags); 3834 /* disable interrupts on the nic */ 3835 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3836 pci_push(base); 3837 3838 if (!np->in_shutdown) { 3839 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; 3840 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3841 } 3842 spin_unlock_irqrestore(&np->lock, flags); 3843 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 3844 break; 3845 } 3846 } 3847 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 3848 3849 return IRQ_RETVAL(i); 3850} 3851 3852static irqreturn_t nv_nic_irq_other(int foo, void *data) 3853{ 3854 struct net_device *dev = (struct net_device *) data; 3855 struct fe_priv *np = netdev_priv(dev); 3856 u8 __iomem *base = get_hwbase(dev); 3857 u32 events; 3858 int i; 3859 unsigned long flags; 3860 3861 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 3862 3863 for (i=0; ; i++) { 3864 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3865 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3866 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3867 if (!(events & np->irqmask)) 3868 break; 3869 3870 /* check tx in case we reached max loop limit in tx isr */ 3871 spin_lock_irqsave(&np->lock, flags); 3872 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3873 spin_unlock_irqrestore(&np->lock, flags); 3874 3875 if (events & NVREG_IRQ_LINK) { 3876 spin_lock_irqsave(&np->lock, flags); 3877 nv_link_irq(dev); 3878 spin_unlock_irqrestore(&np->lock, flags); 3879 } 3880 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 3881 spin_lock_irqsave(&np->lock, flags); 3882 nv_linkchange(dev); 3883 spin_unlock_irqrestore(&np->lock, flags); 3884 np->link_timeout = jiffies + LINK_TIMEOUT; 3885 } 3886 if (events & NVREG_IRQ_RECOVER_ERROR) { 3887 spin_lock_irq(&np->lock); 3888 /* disable interrupts on the nic */ 3889 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3890 pci_push(base); 3891 3892 if (!np->in_shutdown) { 3893 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3894 np->recover_error = 1; 3895 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3896 } 3897 spin_unlock_irq(&np->lock); 3898 break; 3899 } 3900 if (unlikely(i > max_interrupt_work)) { 3901 spin_lock_irqsave(&np->lock, flags); 3902 /* disable interrupts on the nic */ 3903 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3904 pci_push(base); 3905 3906 if (!np->in_shutdown) { 3907 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3908 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3909 } 3910 spin_unlock_irqrestore(&np->lock, flags); 3911 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 3912 break; 3913 } 3914 3915 } 3916 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); 3917 3918 return IRQ_RETVAL(i); 3919} 3920 3921static irqreturn_t nv_nic_irq_test(int foo, void *data) 3922{ 3923 struct net_device *dev = (struct net_device *) data; 3924 struct fe_priv *np = netdev_priv(dev); 3925 u8 __iomem *base = get_hwbase(dev); 3926 u32 events; 3927 3928 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); 3929 3930 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3931 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3932 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3933 } else { 3934 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3935 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3936 } 3937 pci_push(base); 3938 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3939 if (!(events & NVREG_IRQ_TIMER)) 3940 return IRQ_RETVAL(0); 3941 3942 nv_msi_workaround(np); 3943 3944 spin_lock(&np->lock); 3945 np->intr_test = 1; 3946 spin_unlock(&np->lock); 3947 3948 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); 3949 3950 return IRQ_RETVAL(1); 3951} 3952 3953static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) 3954{ 3955 u8 __iomem *base = get_hwbase(dev); 3956 int i; 3957 u32 msixmap = 0; 3958 3959 /* Each interrupt bit can be mapped to a MSIX vector (4 bits). 3960 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents 3961 * the remaining 8 interrupts. 3962 */ 3963 for (i = 0; i < 8; i++) { 3964 if ((irqmask >> i) & 0x1) { 3965 msixmap |= vector << (i << 2); 3966 } 3967 } 3968 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3969 3970 msixmap = 0; 3971 for (i = 0; i < 8; i++) { 3972 if ((irqmask >> (i + 8)) & 0x1) { 3973 msixmap |= vector << (i << 2); 3974 } 3975 } 3976 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3977} 3978 3979static int nv_request_irq(struct net_device *dev, int intr_test) 3980{ 3981 struct fe_priv *np = get_nvpriv(dev); 3982 u8 __iomem *base = get_hwbase(dev); 3983 int ret = 1; 3984 int i; 3985 irqreturn_t (*handler)(int foo, void *data); 3986 3987 if (intr_test) { 3988 handler = nv_nic_irq_test; 3989 } else { 3990 if (nv_optimized(np)) 3991 handler = nv_nic_irq_optimized; 3992 else 3993 handler = nv_nic_irq; 3994 } 3995 3996 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3997 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3998 np->msi_x_entry[i].entry = i; 3999 } 4000 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 4001 np->msi_flags |= NV_MSI_X_ENABLED; 4002 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 4003 /* Request irq for rx handling */ 4004 sprintf(np->name_rx, "%s-rx", dev->name); 4005 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, 4006 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) { 4007 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 4008 pci_disable_msix(np->pci_dev); 4009 np->msi_flags &= ~NV_MSI_X_ENABLED; 4010 goto out_err; 4011 } 4012 /* Request irq for tx handling */ 4013 sprintf(np->name_tx, "%s-tx", dev->name); 4014 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, 4015 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) { 4016 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 4017 pci_disable_msix(np->pci_dev); 4018 np->msi_flags &= ~NV_MSI_X_ENABLED; 4019 goto out_free_rx; 4020 } 4021 /* Request irq for link and timer handling */ 4022 sprintf(np->name_other, "%s-other", dev->name); 4023 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, 4024 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) { 4025 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 4026 pci_disable_msix(np->pci_dev); 4027 np->msi_flags &= ~NV_MSI_X_ENABLED; 4028 goto out_free_tx; 4029 } 4030 /* map interrupts to their respective vector */ 4031 writel(0, base + NvRegMSIXMap0); 4032 writel(0, base + NvRegMSIXMap1); 4033 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); 4034 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); 4035 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 4036 } else { 4037 /* Request irq for all interrupts */ 4038 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { 4039 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 4040 pci_disable_msix(np->pci_dev); 4041 np->msi_flags &= ~NV_MSI_X_ENABLED; 4042 goto out_err; 4043 } 4044 4045 /* map interrupts to vector 0 */ 4046 writel(0, base + NvRegMSIXMap0); 4047 writel(0, base + NvRegMSIXMap1); 4048 } 4049 } 4050 } 4051 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 4052 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 4053 np->msi_flags |= NV_MSI_ENABLED; 4054 dev->irq = np->pci_dev->irq; 4055 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 4056 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 4057 pci_disable_msi(np->pci_dev); 4058 np->msi_flags &= ~NV_MSI_ENABLED; 4059 dev->irq = np->pci_dev->irq; 4060 goto out_err; 4061 } 4062 4063 /* map interrupts to vector 0 */ 4064 writel(0, base + NvRegMSIMap0); 4065 writel(0, base + NvRegMSIMap1); 4066 /* enable msi vector 0 */ 4067 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 4068 } 4069 } 4070 if (ret != 0) { 4071 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) 4072 goto out_err; 4073 4074 } 4075 4076 return 0; 4077out_free_tx: 4078 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); 4079out_free_rx: 4080 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); 4081out_err: 4082 return 1; 4083} 4084 4085static void nv_free_irq(struct net_device *dev) 4086{ 4087 struct fe_priv *np = get_nvpriv(dev); 4088 int i; 4089 4090 if (np->msi_flags & NV_MSI_X_ENABLED) { 4091 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 4092 free_irq(np->msi_x_entry[i].vector, dev); 4093 } 4094 pci_disable_msix(np->pci_dev); 4095 np->msi_flags &= ~NV_MSI_X_ENABLED; 4096 } else { 4097 free_irq(np->pci_dev->irq, dev); 4098 if (np->msi_flags & NV_MSI_ENABLED) { 4099 pci_disable_msi(np->pci_dev); 4100 np->msi_flags &= ~NV_MSI_ENABLED; 4101 } 4102 } 4103} 4104 4105static void nv_do_nic_poll(unsigned long data) 4106{ 4107 struct net_device *dev = (struct net_device *) data; 4108 struct fe_priv *np = netdev_priv(dev); 4109 u8 __iomem *base = get_hwbase(dev); 4110 u32 mask = 0; 4111 4112 /* 4113 * First disable irq(s) and then 4114 * reenable interrupts on the nic, we have to do this before calling 4115 * nv_nic_irq because that may decide to do otherwise 4116 */ 4117 4118 if (!using_multi_irqs(dev)) { 4119 if (np->msi_flags & NV_MSI_X_ENABLED) 4120 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 4121 else 4122 disable_irq_lockdep(np->pci_dev->irq); 4123 mask = np->irqmask; 4124 } else { 4125 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4126 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 4127 mask |= NVREG_IRQ_RX_ALL; 4128 } 4129 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4130 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 4131 mask |= NVREG_IRQ_TX_ALL; 4132 } 4133 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4134 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 4135 mask |= NVREG_IRQ_OTHER; 4136 } 4137 } 4138 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */ 4139 4140 if (np->recover_error) { 4141 np->recover_error = 0; 4142 printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name); 4143 if (netif_running(dev)) { 4144 netif_tx_lock_bh(dev); 4145 netif_addr_lock(dev); 4146 spin_lock(&np->lock); 4147 /* stop engines */ 4148 nv_stop_rxtx(dev); 4149 if (np->driver_data & DEV_HAS_POWER_CNTRL) 4150 nv_mac_reset(dev); 4151 nv_txrx_reset(dev); 4152 /* drain rx queue */ 4153 nv_drain_rxtx(dev); 4154 /* reinit driver view of the rx queue */ 4155 set_bufsize(dev); 4156 if (nv_init_ring(dev)) { 4157 if (!np->in_shutdown) 4158 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4159 } 4160 /* reinit nic view of the rx queue */ 4161 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4162 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4163 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4164 base + NvRegRingSizes); 4165 pci_push(base); 4166 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4167 pci_push(base); 4168 /* clear interrupts */ 4169 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 4170 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4171 else 4172 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4173 4174 /* restart rx engine */ 4175 nv_start_rxtx(dev); 4176 spin_unlock(&np->lock); 4177 netif_addr_unlock(dev); 4178 netif_tx_unlock_bh(dev); 4179 } 4180 } 4181 4182 writel(mask, base + NvRegIrqMask); 4183 pci_push(base); 4184 4185 if (!using_multi_irqs(dev)) { 4186 np->nic_poll_irq = 0; 4187 if (nv_optimized(np)) 4188 nv_nic_irq_optimized(0, dev); 4189 else 4190 nv_nic_irq(0, dev); 4191 if (np->msi_flags & NV_MSI_X_ENABLED) 4192 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 4193 else 4194 enable_irq_lockdep(np->pci_dev->irq); 4195 } else { 4196 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4197 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; 4198 nv_nic_irq_rx(0, dev); 4199 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 4200 } 4201 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4202 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; 4203 nv_nic_irq_tx(0, dev); 4204 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 4205 } 4206 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4207 np->nic_poll_irq &= ~NVREG_IRQ_OTHER; 4208 nv_nic_irq_other(0, dev); 4209 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 4210 } 4211 } 4212 4213} 4214 4215#ifdef CONFIG_NET_POLL_CONTROLLER 4216static void nv_poll_controller(struct net_device *dev) 4217{ 4218 nv_do_nic_poll((unsigned long) dev); 4219} 4220#endif 4221 4222static void nv_do_stats_poll(unsigned long data) 4223{ 4224 struct net_device *dev = (struct net_device *) data; 4225 struct fe_priv *np = netdev_priv(dev); 4226 4227 nv_get_hw_stats(dev); 4228 4229 if (!np->in_shutdown) 4230 mod_timer(&np->stats_poll, 4231 round_jiffies(jiffies + STATS_INTERVAL)); 4232} 4233 4234static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 4235{ 4236 struct fe_priv *np = netdev_priv(dev); 4237 strcpy(info->driver, DRV_NAME); 4238 strcpy(info->version, FORCEDETH_VERSION); 4239 strcpy(info->bus_info, pci_name(np->pci_dev)); 4240} 4241 4242static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4243{ 4244 struct fe_priv *np = netdev_priv(dev); 4245 wolinfo->supported = WAKE_MAGIC; 4246 4247 spin_lock_irq(&np->lock); 4248 if (np->wolenabled) 4249 wolinfo->wolopts = WAKE_MAGIC; 4250 spin_unlock_irq(&np->lock); 4251} 4252 4253static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4254{ 4255 struct fe_priv *np = netdev_priv(dev); 4256 u8 __iomem *base = get_hwbase(dev); 4257 u32 flags = 0; 4258 4259 if (wolinfo->wolopts == 0) { 4260 np->wolenabled = 0; 4261 } else if (wolinfo->wolopts & WAKE_MAGIC) { 4262 np->wolenabled = 1; 4263 flags = NVREG_WAKEUPFLAGS_ENABLE; 4264 } 4265 if (netif_running(dev)) { 4266 spin_lock_irq(&np->lock); 4267 writel(flags, base + NvRegWakeUpFlags); 4268 spin_unlock_irq(&np->lock); 4269 } 4270 return 0; 4271} 4272 4273static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 4274{ 4275 struct fe_priv *np = netdev_priv(dev); 4276 int adv; 4277 4278 spin_lock_irq(&np->lock); 4279 ecmd->port = PORT_MII; 4280 if (!netif_running(dev)) { 4281 /* We do not track link speed / duplex setting if the 4282 * interface is disabled. Force a link check */ 4283 if (nv_update_linkspeed(dev)) { 4284 if (!netif_carrier_ok(dev)) 4285 netif_carrier_on(dev); 4286 } else { 4287 if (netif_carrier_ok(dev)) 4288 netif_carrier_off(dev); 4289 } 4290 } 4291 4292 if (netif_carrier_ok(dev)) { 4293 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 4294 case NVREG_LINKSPEED_10: 4295 ecmd->speed = SPEED_10; 4296 break; 4297 case NVREG_LINKSPEED_100: 4298 ecmd->speed = SPEED_100; 4299 break; 4300 case NVREG_LINKSPEED_1000: 4301 ecmd->speed = SPEED_1000; 4302 break; 4303 } 4304 ecmd->duplex = DUPLEX_HALF; 4305 if (np->duplex) 4306 ecmd->duplex = DUPLEX_FULL; 4307 } else { 4308 ecmd->speed = -1; 4309 ecmd->duplex = -1; 4310 } 4311 4312 ecmd->autoneg = np->autoneg; 4313 4314 ecmd->advertising = ADVERTISED_MII; 4315 if (np->autoneg) { 4316 ecmd->advertising |= ADVERTISED_Autoneg; 4317 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4318 if (adv & ADVERTISE_10HALF) 4319 ecmd->advertising |= ADVERTISED_10baseT_Half; 4320 if (adv & ADVERTISE_10FULL) 4321 ecmd->advertising |= ADVERTISED_10baseT_Full; 4322 if (adv & ADVERTISE_100HALF) 4323 ecmd->advertising |= ADVERTISED_100baseT_Half; 4324 if (adv & ADVERTISE_100FULL) 4325 ecmd->advertising |= ADVERTISED_100baseT_Full; 4326 if (np->gigabit == PHY_GIGABIT) { 4327 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4328 if (adv & ADVERTISE_1000FULL) 4329 ecmd->advertising |= ADVERTISED_1000baseT_Full; 4330 } 4331 } 4332 ecmd->supported = (SUPPORTED_Autoneg | 4333 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 4334 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 4335 SUPPORTED_MII); 4336 if (np->gigabit == PHY_GIGABIT) 4337 ecmd->supported |= SUPPORTED_1000baseT_Full; 4338 4339 ecmd->phy_address = np->phyaddr; 4340 ecmd->transceiver = XCVR_EXTERNAL; 4341 4342 /* ignore maxtxpkt, maxrxpkt for now */ 4343 spin_unlock_irq(&np->lock); 4344 return 0; 4345} 4346 4347static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 4348{ 4349 struct fe_priv *np = netdev_priv(dev); 4350 4351 if (ecmd->port != PORT_MII) 4352 return -EINVAL; 4353 if (ecmd->transceiver != XCVR_EXTERNAL) 4354 return -EINVAL; 4355 if (ecmd->phy_address != np->phyaddr) { 4356 /* TODO: support switching between multiple phys. Should be 4357 * trivial, but not enabled due to lack of test hardware. */ 4358 return -EINVAL; 4359 } 4360 if (ecmd->autoneg == AUTONEG_ENABLE) { 4361 u32 mask; 4362 4363 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 4364 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 4365 if (np->gigabit == PHY_GIGABIT) 4366 mask |= ADVERTISED_1000baseT_Full; 4367 4368 if ((ecmd->advertising & mask) == 0) 4369 return -EINVAL; 4370 4371 } else if (ecmd->autoneg == AUTONEG_DISABLE) { 4372 /* Note: autonegotiation disable, speed 1000 intentionally 4373 * forbidden - noone should need that. */ 4374 4375 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) 4376 return -EINVAL; 4377 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 4378 return -EINVAL; 4379 } else { 4380 return -EINVAL; 4381 } 4382 4383 netif_carrier_off(dev); 4384 if (netif_running(dev)) { 4385 unsigned long flags; 4386 4387 nv_disable_irq(dev); 4388 netif_tx_lock_bh(dev); 4389 netif_addr_lock(dev); 4390 /* with plain spinlock lockdep complains */ 4391 spin_lock_irqsave(&np->lock, flags); 4392 /* stop engines */ 4393 /* FIXME: 4394 * this can take some time, and interrupts are disabled 4395 * due to spin_lock_irqsave, but let's hope no daemon 4396 * is going to change the settings very often... 4397 * Worst case: 4398 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX 4399 * + some minor delays, which is up to a second approximately 4400 */ 4401 nv_stop_rxtx(dev); 4402 spin_unlock_irqrestore(&np->lock, flags); 4403 netif_addr_unlock(dev); 4404 netif_tx_unlock_bh(dev); 4405 } 4406 4407 if (ecmd->autoneg == AUTONEG_ENABLE) { 4408 int adv, bmcr; 4409 4410 np->autoneg = 1; 4411 4412 /* advertise only what has been requested */ 4413 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4414 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4415 if (ecmd->advertising & ADVERTISED_10baseT_Half) 4416 adv |= ADVERTISE_10HALF; 4417 if (ecmd->advertising & ADVERTISED_10baseT_Full) 4418 adv |= ADVERTISE_10FULL; 4419 if (ecmd->advertising & ADVERTISED_100baseT_Half) 4420 adv |= ADVERTISE_100HALF; 4421 if (ecmd->advertising & ADVERTISED_100baseT_Full) 4422 adv |= ADVERTISE_100FULL; 4423 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 4424 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4425 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4426 adv |= ADVERTISE_PAUSE_ASYM; 4427 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4428 4429 if (np->gigabit == PHY_GIGABIT) { 4430 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4431 adv &= ~ADVERTISE_1000FULL; 4432 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 4433 adv |= ADVERTISE_1000FULL; 4434 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4435 } 4436 4437 if (netif_running(dev)) 4438 printk(KERN_INFO "%s: link down.\n", dev->name); 4439 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4440 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4441 bmcr |= BMCR_ANENABLE; 4442 /* reset the phy in order for settings to stick, 4443 * and cause autoneg to start */ 4444 if (phy_reset(dev, bmcr)) { 4445 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4446 return -EINVAL; 4447 } 4448 } else { 4449 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4450 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4451 } 4452 } else { 4453 int adv, bmcr; 4454 4455 np->autoneg = 0; 4456 4457 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4458 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4459 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) 4460 adv |= ADVERTISE_10HALF; 4461 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) 4462 adv |= ADVERTISE_10FULL; 4463 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) 4464 adv |= ADVERTISE_100HALF; 4465 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) 4466 adv |= ADVERTISE_100FULL; 4467 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4468 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ 4469 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4470 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4471 } 4472 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { 4473 adv |= ADVERTISE_PAUSE_ASYM; 4474 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4475 } 4476 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4477 np->fixed_mode = adv; 4478 4479 if (np->gigabit == PHY_GIGABIT) { 4480 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4481 adv &= ~ADVERTISE_1000FULL; 4482 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4483 } 4484 4485 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4486 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); 4487 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) 4488 bmcr |= BMCR_FULLDPLX; 4489 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 4490 bmcr |= BMCR_SPEED100; 4491 if (np->phy_oui == PHY_OUI_MARVELL) { 4492 /* reset the phy in order for forced mode settings to stick */ 4493 if (phy_reset(dev, bmcr)) { 4494 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4495 return -EINVAL; 4496 } 4497 } else { 4498 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4499 if (netif_running(dev)) { 4500 /* Wait a bit and then reconfigure the nic. */ 4501 udelay(10); 4502 nv_linkchange(dev); 4503 } 4504 } 4505 } 4506 4507 if (netif_running(dev)) { 4508 nv_start_rxtx(dev); 4509 nv_enable_irq(dev); 4510 } 4511 4512 return 0; 4513} 4514 4515#define FORCEDETH_REGS_VER 1 4516 4517static int nv_get_regs_len(struct net_device *dev) 4518{ 4519 struct fe_priv *np = netdev_priv(dev); 4520 return np->register_size; 4521} 4522 4523static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 4524{ 4525 struct fe_priv *np = netdev_priv(dev); 4526 u8 __iomem *base = get_hwbase(dev); 4527 u32 *rbuf = buf; 4528 int i; 4529 4530 regs->version = FORCEDETH_REGS_VER; 4531 spin_lock_irq(&np->lock); 4532 for (i = 0;i <= np->register_size/sizeof(u32); i++) 4533 rbuf[i] = readl(base + i*sizeof(u32)); 4534 spin_unlock_irq(&np->lock); 4535} 4536 4537static int nv_nway_reset(struct net_device *dev) 4538{ 4539 struct fe_priv *np = netdev_priv(dev); 4540 int ret; 4541 4542 if (np->autoneg) { 4543 int bmcr; 4544 4545 netif_carrier_off(dev); 4546 if (netif_running(dev)) { 4547 nv_disable_irq(dev); 4548 netif_tx_lock_bh(dev); 4549 netif_addr_lock(dev); 4550 spin_lock(&np->lock); 4551 /* stop engines */ 4552 nv_stop_rxtx(dev); 4553 spin_unlock(&np->lock); 4554 netif_addr_unlock(dev); 4555 netif_tx_unlock_bh(dev); 4556 printk(KERN_INFO "%s: link down.\n", dev->name); 4557 } 4558 4559 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4560 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4561 bmcr |= BMCR_ANENABLE; 4562 /* reset the phy in order for settings to stick*/ 4563 if (phy_reset(dev, bmcr)) { 4564 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4565 return -EINVAL; 4566 } 4567 } else { 4568 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4569 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4570 } 4571 4572 if (netif_running(dev)) { 4573 nv_start_rxtx(dev); 4574 nv_enable_irq(dev); 4575 } 4576 ret = 0; 4577 } else { 4578 ret = -EINVAL; 4579 } 4580 4581 return ret; 4582} 4583 4584static int nv_set_tso(struct net_device *dev, u32 value) 4585{ 4586 struct fe_priv *np = netdev_priv(dev); 4587 4588 if ((np->driver_data & DEV_HAS_CHECKSUM)) 4589 return ethtool_op_set_tso(dev, value); 4590 else 4591 return -EOPNOTSUPP; 4592} 4593 4594static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4595{ 4596 struct fe_priv *np = netdev_priv(dev); 4597 4598 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4599 ring->rx_mini_max_pending = 0; 4600 ring->rx_jumbo_max_pending = 0; 4601 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4602 4603 ring->rx_pending = np->rx_ring_size; 4604 ring->rx_mini_pending = 0; 4605 ring->rx_jumbo_pending = 0; 4606 ring->tx_pending = np->tx_ring_size; 4607} 4608 4609static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4610{ 4611 struct fe_priv *np = netdev_priv(dev); 4612 u8 __iomem *base = get_hwbase(dev); 4613 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; 4614 dma_addr_t ring_addr; 4615 4616 if (ring->rx_pending < RX_RING_MIN || 4617 ring->tx_pending < TX_RING_MIN || 4618 ring->rx_mini_pending != 0 || 4619 ring->rx_jumbo_pending != 0 || 4620 (np->desc_ver == DESC_VER_1 && 4621 (ring->rx_pending > RING_MAX_DESC_VER_1 || 4622 ring->tx_pending > RING_MAX_DESC_VER_1)) || 4623 (np->desc_ver != DESC_VER_1 && 4624 (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 4625 ring->tx_pending > RING_MAX_DESC_VER_2_3))) { 4626 return -EINVAL; 4627 } 4628 4629 /* allocate new rings */ 4630 if (!nv_optimized(np)) { 4631 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4632 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4633 &ring_addr); 4634 } else { 4635 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4636 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4637 &ring_addr); 4638 } 4639 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); 4640 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); 4641 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { 4642 /* fall back to old rings */ 4643 if (!nv_optimized(np)) { 4644 if (rxtx_ring) 4645 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4646 rxtx_ring, ring_addr); 4647 } else { 4648 if (rxtx_ring) 4649 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4650 rxtx_ring, ring_addr); 4651 } 4652 if (rx_skbuff) 4653 kfree(rx_skbuff); 4654 if (tx_skbuff) 4655 kfree(tx_skbuff); 4656 goto exit; 4657 } 4658 4659 if (netif_running(dev)) { 4660 nv_disable_irq(dev); 4661 nv_napi_disable(dev); 4662 netif_tx_lock_bh(dev); 4663 netif_addr_lock(dev); 4664 spin_lock(&np->lock); 4665 /* stop engines */ 4666 nv_stop_rxtx(dev); 4667 nv_txrx_reset(dev); 4668 /* drain queues */ 4669 nv_drain_rxtx(dev); 4670 /* delete queues */ 4671 free_rings(dev); 4672 } 4673 4674 /* set new values */ 4675 np->rx_ring_size = ring->rx_pending; 4676 np->tx_ring_size = ring->tx_pending; 4677 4678 if (!nv_optimized(np)) { 4679 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4680 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4681 } else { 4682 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4683 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4684 } 4685 np->rx_skb = (struct nv_skb_map*)rx_skbuff; 4686 np->tx_skb = (struct nv_skb_map*)tx_skbuff; 4687 np->ring_addr = ring_addr; 4688 4689 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4690 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); 4691 4692 if (netif_running(dev)) { 4693 /* reinit driver view of the queues */ 4694 set_bufsize(dev); 4695 if (nv_init_ring(dev)) { 4696 if (!np->in_shutdown) 4697 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4698 } 4699 4700 /* reinit nic view of the queues */ 4701 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4702 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4703 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4704 base + NvRegRingSizes); 4705 pci_push(base); 4706 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4707 pci_push(base); 4708 4709 /* restart engines */ 4710 nv_start_rxtx(dev); 4711 spin_unlock(&np->lock); 4712 netif_addr_unlock(dev); 4713 netif_tx_unlock_bh(dev); 4714 nv_napi_enable(dev); 4715 nv_enable_irq(dev); 4716 } 4717 return 0; 4718exit: 4719 return -ENOMEM; 4720} 4721 4722static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4723{ 4724 struct fe_priv *np = netdev_priv(dev); 4725 4726 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; 4727 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; 4728 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; 4729} 4730 4731static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4732{ 4733 struct fe_priv *np = netdev_priv(dev); 4734 int adv, bmcr; 4735 4736 if ((!np->autoneg && np->duplex == 0) || 4737 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 4738 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 4739 dev->name); 4740 return -EINVAL; 4741 } 4742 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 4743 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); 4744 return -EINVAL; 4745 } 4746 4747 netif_carrier_off(dev); 4748 if (netif_running(dev)) { 4749 nv_disable_irq(dev); 4750 netif_tx_lock_bh(dev); 4751 netif_addr_lock(dev); 4752 spin_lock(&np->lock); 4753 /* stop engines */ 4754 nv_stop_rxtx(dev); 4755 spin_unlock(&np->lock); 4756 netif_addr_unlock(dev); 4757 netif_tx_unlock_bh(dev); 4758 } 4759 4760 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); 4761 if (pause->rx_pause) 4762 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; 4763 if (pause->tx_pause) 4764 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; 4765 4766 if (np->autoneg && pause->autoneg) { 4767 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; 4768 4769 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4770 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4771 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 4772 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4773 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4774 adv |= ADVERTISE_PAUSE_ASYM; 4775 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4776 4777 if (netif_running(dev)) 4778 printk(KERN_INFO "%s: link down.\n", dev->name); 4779 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4780 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4781 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4782 } else { 4783 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4784 if (pause->rx_pause) 4785 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4786 if (pause->tx_pause) 4787 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4788 4789 if (!netif_running(dev)) 4790 nv_update_linkspeed(dev); 4791 else 4792 nv_update_pause(dev, np->pause_flags); 4793 } 4794 4795 if (netif_running(dev)) { 4796 nv_start_rxtx(dev); 4797 nv_enable_irq(dev); 4798 } 4799 return 0; 4800} 4801 4802static u32 nv_get_rx_csum(struct net_device *dev) 4803{ 4804 struct fe_priv *np = netdev_priv(dev); 4805 return (np->rx_csum) != 0; 4806} 4807 4808static int nv_set_rx_csum(struct net_device *dev, u32 data) 4809{ 4810 struct fe_priv *np = netdev_priv(dev); 4811 u8 __iomem *base = get_hwbase(dev); 4812 int retcode = 0; 4813 4814 if (np->driver_data & DEV_HAS_CHECKSUM) { 4815 if (data) { 4816 np->rx_csum = 1; 4817 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4818 } else { 4819 np->rx_csum = 0; 4820 /* vlan is dependent on rx checksum offload */ 4821 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) 4822 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; 4823 } 4824 if (netif_running(dev)) { 4825 spin_lock_irq(&np->lock); 4826 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4827 spin_unlock_irq(&np->lock); 4828 } 4829 } else { 4830 return -EINVAL; 4831 } 4832 4833 return retcode; 4834} 4835 4836static int nv_set_tx_csum(struct net_device *dev, u32 data) 4837{ 4838 struct fe_priv *np = netdev_priv(dev); 4839 4840 if (np->driver_data & DEV_HAS_CHECKSUM) 4841 return ethtool_op_set_tx_csum(dev, data); 4842 else 4843 return -EOPNOTSUPP; 4844} 4845 4846static int nv_set_sg(struct net_device *dev, u32 data) 4847{ 4848 struct fe_priv *np = netdev_priv(dev); 4849 4850 if (np->driver_data & DEV_HAS_CHECKSUM) 4851 return ethtool_op_set_sg(dev, data); 4852 else 4853 return -EOPNOTSUPP; 4854} 4855 4856static int nv_get_sset_count(struct net_device *dev, int sset) 4857{ 4858 struct fe_priv *np = netdev_priv(dev); 4859 4860 switch (sset) { 4861 case ETH_SS_TEST: 4862 if (np->driver_data & DEV_HAS_TEST_EXTENDED) 4863 return NV_TEST_COUNT_EXTENDED; 4864 else 4865 return NV_TEST_COUNT_BASE; 4866 case ETH_SS_STATS: 4867 if (np->driver_data & DEV_HAS_STATISTICS_V3) 4868 return NV_DEV_STATISTICS_V3_COUNT; 4869 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4870 return NV_DEV_STATISTICS_V2_COUNT; 4871 else if (np->driver_data & DEV_HAS_STATISTICS_V1) 4872 return NV_DEV_STATISTICS_V1_COUNT; 4873 else 4874 return 0; 4875 default: 4876 return -EOPNOTSUPP; 4877 } 4878} 4879 4880static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) 4881{ 4882 struct fe_priv *np = netdev_priv(dev); 4883 4884 /* update stats */ 4885 nv_do_stats_poll((unsigned long)dev); 4886 4887 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); 4888} 4889 4890static int nv_link_test(struct net_device *dev) 4891{ 4892 struct fe_priv *np = netdev_priv(dev); 4893 int mii_status; 4894 4895 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4896 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4897 4898 /* check phy link status */ 4899 if (!(mii_status & BMSR_LSTATUS)) 4900 return 0; 4901 else 4902 return 1; 4903} 4904 4905static int nv_register_test(struct net_device *dev) 4906{ 4907 u8 __iomem *base = get_hwbase(dev); 4908 int i = 0; 4909 u32 orig_read, new_read; 4910 4911 do { 4912 orig_read = readl(base + nv_registers_test[i].reg); 4913 4914 /* xor with mask to toggle bits */ 4915 orig_read ^= nv_registers_test[i].mask; 4916 4917 writel(orig_read, base + nv_registers_test[i].reg); 4918 4919 new_read = readl(base + nv_registers_test[i].reg); 4920 4921 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) 4922 return 0; 4923 4924 /* restore original value */ 4925 orig_read ^= nv_registers_test[i].mask; 4926 writel(orig_read, base + nv_registers_test[i].reg); 4927 4928 } while (nv_registers_test[++i].reg != 0); 4929 4930 return 1; 4931} 4932 4933static int nv_interrupt_test(struct net_device *dev) 4934{ 4935 struct fe_priv *np = netdev_priv(dev); 4936 u8 __iomem *base = get_hwbase(dev); 4937 int ret = 1; 4938 int testcnt; 4939 u32 save_msi_flags, save_poll_interval = 0; 4940 4941 if (netif_running(dev)) { 4942 /* free current irq */ 4943 nv_free_irq(dev); 4944 save_poll_interval = readl(base+NvRegPollingInterval); 4945 } 4946 4947 /* flag to test interrupt handler */ 4948 np->intr_test = 0; 4949 4950 /* setup test irq */ 4951 save_msi_flags = np->msi_flags; 4952 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; 4953 np->msi_flags |= 0x001; /* setup 1 vector */ 4954 if (nv_request_irq(dev, 1)) 4955 return 0; 4956 4957 /* setup timer interrupt */ 4958 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 4959 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4960 4961 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4962 4963 /* wait for at least one interrupt */ 4964 msleep(100); 4965 4966 spin_lock_irq(&np->lock); 4967 4968 /* flag should be set within ISR */ 4969 testcnt = np->intr_test; 4970 if (!testcnt) 4971 ret = 2; 4972 4973 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4974 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 4975 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4976 else 4977 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4978 4979 spin_unlock_irq(&np->lock); 4980 4981 nv_free_irq(dev); 4982 4983 np->msi_flags = save_msi_flags; 4984 4985 if (netif_running(dev)) { 4986 writel(save_poll_interval, base + NvRegPollingInterval); 4987 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4988 /* restore original irq */ 4989 if (nv_request_irq(dev, 0)) 4990 return 0; 4991 } 4992 4993 return ret; 4994} 4995 4996static int nv_loopback_test(struct net_device *dev) 4997{ 4998 struct fe_priv *np = netdev_priv(dev); 4999 u8 __iomem *base = get_hwbase(dev); 5000 struct sk_buff *tx_skb, *rx_skb; 5001 dma_addr_t test_dma_addr; 5002 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 5003 u32 flags; 5004 int len, i, pkt_len; 5005 u8 *pkt_data; 5006 u32 filter_flags = 0; 5007 u32 misc1_flags = 0; 5008 int ret = 1; 5009 5010 if (netif_running(dev)) { 5011 nv_disable_irq(dev); 5012 filter_flags = readl(base + NvRegPacketFilterFlags); 5013 misc1_flags = readl(base + NvRegMisc1); 5014 } else { 5015 nv_txrx_reset(dev); 5016 } 5017 5018 /* reinit driver view of the rx queue */ 5019 set_bufsize(dev); 5020 nv_init_ring(dev); 5021 5022 /* setup hardware for loopback */ 5023 writel(NVREG_MISC1_FORCE, base + NvRegMisc1); 5024 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); 5025 5026 /* reinit nic view of the rx queue */ 5027 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5028 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5029 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5030 base + NvRegRingSizes); 5031 pci_push(base); 5032 5033 /* restart rx engine */ 5034 nv_start_rxtx(dev); 5035 5036 /* setup packet for tx */ 5037 pkt_len = ETH_DATA_LEN; 5038 tx_skb = dev_alloc_skb(pkt_len); 5039 if (!tx_skb) { 5040 printk(KERN_ERR "dev_alloc_skb() failed during loopback test" 5041 " of %s\n", dev->name); 5042 ret = 0; 5043 goto out; 5044 } 5045 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, 5046 skb_tailroom(tx_skb), 5047 PCI_DMA_FROMDEVICE); 5048 pkt_data = skb_put(tx_skb, pkt_len); 5049 for (i = 0; i < pkt_len; i++) 5050 pkt_data[i] = (u8)(i & 0xff); 5051 5052 if (!nv_optimized(np)) { 5053 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 5054 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 5055 } else { 5056 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); 5057 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); 5058 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 5059 } 5060 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5061 pci_push(get_hwbase(dev)); 5062 5063 msleep(500); 5064 5065 /* check for rx of the packet */ 5066 if (!nv_optimized(np)) { 5067 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); 5068 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 5069 5070 } else { 5071 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); 5072 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 5073 } 5074 5075 if (flags & NV_RX_AVAIL) { 5076 ret = 0; 5077 } else if (np->desc_ver == DESC_VER_1) { 5078 if (flags & NV_RX_ERROR) 5079 ret = 0; 5080 } else { 5081 if (flags & NV_RX2_ERROR) { 5082 ret = 0; 5083 } 5084 } 5085 5086 if (ret) { 5087 if (len != pkt_len) { 5088 ret = 0; 5089 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 5090 dev->name, len, pkt_len); 5091 } else { 5092 rx_skb = np->rx_skb[0].skb; 5093 for (i = 0; i < pkt_len; i++) { 5094 if (rx_skb->data[i] != (u8)(i & 0xff)) { 5095 ret = 0; 5096 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", 5097 dev->name, i); 5098 break; 5099 } 5100 } 5101 } 5102 } else { 5103 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); 5104 } 5105 5106 pci_unmap_single(np->pci_dev, test_dma_addr, 5107 (skb_end_pointer(tx_skb) - tx_skb->data), 5108 PCI_DMA_TODEVICE); 5109 dev_kfree_skb_any(tx_skb); 5110 out: 5111 /* stop engines */ 5112 nv_stop_rxtx(dev); 5113 nv_txrx_reset(dev); 5114 /* drain rx queue */ 5115 nv_drain_rxtx(dev); 5116 5117 if (netif_running(dev)) { 5118 writel(misc1_flags, base + NvRegMisc1); 5119 writel(filter_flags, base + NvRegPacketFilterFlags); 5120 nv_enable_irq(dev); 5121 } 5122 5123 return ret; 5124} 5125 5126static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) 5127{ 5128 struct fe_priv *np = netdev_priv(dev); 5129 u8 __iomem *base = get_hwbase(dev); 5130 int result; 5131 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); 5132 5133 if (!nv_link_test(dev)) { 5134 test->flags |= ETH_TEST_FL_FAILED; 5135 buffer[0] = 1; 5136 } 5137 5138 if (test->flags & ETH_TEST_FL_OFFLINE) { 5139 if (netif_running(dev)) { 5140 netif_stop_queue(dev); 5141 nv_napi_disable(dev); 5142 netif_tx_lock_bh(dev); 5143 netif_addr_lock(dev); 5144 spin_lock_irq(&np->lock); 5145 nv_disable_hw_interrupts(dev, np->irqmask); 5146 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 5147 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5148 } else { 5149 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 5150 } 5151 /* stop engines */ 5152 nv_stop_rxtx(dev); 5153 nv_txrx_reset(dev); 5154 /* drain rx queue */ 5155 nv_drain_rxtx(dev); 5156 spin_unlock_irq(&np->lock); 5157 netif_addr_unlock(dev); 5158 netif_tx_unlock_bh(dev); 5159 } 5160 5161 if (!nv_register_test(dev)) { 5162 test->flags |= ETH_TEST_FL_FAILED; 5163 buffer[1] = 1; 5164 } 5165 5166 result = nv_interrupt_test(dev); 5167 if (result != 1) { 5168 test->flags |= ETH_TEST_FL_FAILED; 5169 buffer[2] = 1; 5170 } 5171 if (result == 0) { 5172 /* bail out */ 5173 return; 5174 } 5175 5176 if (!nv_loopback_test(dev)) { 5177 test->flags |= ETH_TEST_FL_FAILED; 5178 buffer[3] = 1; 5179 } 5180 5181 if (netif_running(dev)) { 5182 /* reinit driver view of the rx queue */ 5183 set_bufsize(dev); 5184 if (nv_init_ring(dev)) { 5185 if (!np->in_shutdown) 5186 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5187 } 5188 /* reinit nic view of the rx queue */ 5189 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5190 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5191 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5192 base + NvRegRingSizes); 5193 pci_push(base); 5194 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5195 pci_push(base); 5196 /* restart rx engine */ 5197 nv_start_rxtx(dev); 5198 netif_start_queue(dev); 5199 nv_napi_enable(dev); 5200 nv_enable_hw_interrupts(dev, np->irqmask); 5201 } 5202 } 5203} 5204 5205static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) 5206{ 5207 switch (stringset) { 5208 case ETH_SS_STATS: 5209 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); 5210 break; 5211 case ETH_SS_TEST: 5212 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); 5213 break; 5214 } 5215} 5216 5217static const struct ethtool_ops ops = { 5218 .get_drvinfo = nv_get_drvinfo, 5219 .get_link = ethtool_op_get_link, 5220 .get_wol = nv_get_wol, 5221 .set_wol = nv_set_wol, 5222 .get_settings = nv_get_settings, 5223 .set_settings = nv_set_settings, 5224 .get_regs_len = nv_get_regs_len, 5225 .get_regs = nv_get_regs, 5226 .nway_reset = nv_nway_reset, 5227 .set_tso = nv_set_tso, 5228 .get_ringparam = nv_get_ringparam, 5229 .set_ringparam = nv_set_ringparam, 5230 .get_pauseparam = nv_get_pauseparam, 5231 .set_pauseparam = nv_set_pauseparam, 5232 .get_rx_csum = nv_get_rx_csum, 5233 .set_rx_csum = nv_set_rx_csum, 5234 .set_tx_csum = nv_set_tx_csum, 5235 .set_sg = nv_set_sg, 5236 .get_strings = nv_get_strings, 5237 .get_ethtool_stats = nv_get_ethtool_stats, 5238 .get_sset_count = nv_get_sset_count, 5239 .self_test = nv_self_test, 5240}; 5241 5242static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 5243{ 5244 struct fe_priv *np = get_nvpriv(dev); 5245 5246 spin_lock_irq(&np->lock); 5247 5248 /* save vlan group */ 5249 np->vlangrp = grp; 5250 5251 if (grp) { 5252 /* enable vlan on MAC */ 5253 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; 5254 } else { 5255 /* disable vlan on MAC */ 5256 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; 5257 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; 5258 } 5259 5260 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5261 5262 spin_unlock_irq(&np->lock); 5263} 5264 5265/* The mgmt unit and driver use a semaphore to access the phy during init */ 5266static int nv_mgmt_acquire_sema(struct net_device *dev) 5267{ 5268 struct fe_priv *np = netdev_priv(dev); 5269 u8 __iomem *base = get_hwbase(dev); 5270 int i; 5271 u32 tx_ctrl, mgmt_sema; 5272 5273 for (i = 0; i < 10; i++) { 5274 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; 5275 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) 5276 break; 5277 msleep(500); 5278 } 5279 5280 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) 5281 return 0; 5282 5283 for (i = 0; i < 2; i++) { 5284 tx_ctrl = readl(base + NvRegTransmitterControl); 5285 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; 5286 writel(tx_ctrl, base + NvRegTransmitterControl); 5287 5288 /* verify that semaphore was acquired */ 5289 tx_ctrl = readl(base + NvRegTransmitterControl); 5290 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && 5291 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) { 5292 np->mgmt_sema = 1; 5293 return 1; 5294 } 5295 else 5296 udelay(50); 5297 } 5298 5299 return 0; 5300} 5301 5302static void nv_mgmt_release_sema(struct net_device *dev) 5303{ 5304 struct fe_priv *np = netdev_priv(dev); 5305 u8 __iomem *base = get_hwbase(dev); 5306 u32 tx_ctrl; 5307 5308 if (np->driver_data & DEV_HAS_MGMT_UNIT) { 5309 if (np->mgmt_sema) { 5310 tx_ctrl = readl(base + NvRegTransmitterControl); 5311 tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ; 5312 writel(tx_ctrl, base + NvRegTransmitterControl); 5313 } 5314 } 5315} 5316 5317 5318static int nv_mgmt_get_version(struct net_device *dev) 5319{ 5320 struct fe_priv *np = netdev_priv(dev); 5321 u8 __iomem *base = get_hwbase(dev); 5322 u32 data_ready = readl(base + NvRegTransmitterControl); 5323 u32 data_ready2 = 0; 5324 unsigned long start; 5325 int ready = 0; 5326 5327 writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion); 5328 writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl); 5329 start = jiffies; 5330 while (time_before(jiffies, start + 5*HZ)) { 5331 data_ready2 = readl(base + NvRegTransmitterControl); 5332 if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) { 5333 ready = 1; 5334 break; 5335 } 5336 schedule_timeout_uninterruptible(1); 5337 } 5338 5339 if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR)) 5340 return 0; 5341 5342 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION; 5343 5344 return 1; 5345} 5346 5347static int nv_open(struct net_device *dev) 5348{ 5349 struct fe_priv *np = netdev_priv(dev); 5350 u8 __iomem *base = get_hwbase(dev); 5351 int ret = 1; 5352 int oom, i; 5353 u32 low; 5354 5355 dprintk(KERN_DEBUG "nv_open: begin\n"); 5356 5357 /* power up phy */ 5358 mii_rw(dev, np->phyaddr, MII_BMCR, 5359 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); 5360 5361 nv_txrx_gate(dev, false); 5362 /* erase previous misconfiguration */ 5363 if (np->driver_data & DEV_HAS_POWER_CNTRL) 5364 nv_mac_reset(dev); 5365 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5366 writel(0, base + NvRegMulticastAddrB); 5367 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5368 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5369 writel(0, base + NvRegPacketFilterFlags); 5370 5371 writel(0, base + NvRegTransmitterControl); 5372 writel(0, base + NvRegReceiverControl); 5373 5374 writel(0, base + NvRegAdapterControl); 5375 5376 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 5377 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 5378 5379 /* initialize descriptor rings */ 5380 set_bufsize(dev); 5381 oom = nv_init_ring(dev); 5382 5383 writel(0, base + NvRegLinkSpeed); 5384 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5385 nv_txrx_reset(dev); 5386 writel(0, base + NvRegUnknownSetupReg6); 5387 5388 np->in_shutdown = 0; 5389 5390 /* give hw rings */ 5391 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5392 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5393 base + NvRegRingSizes); 5394 5395 writel(np->linkspeed, base + NvRegLinkSpeed); 5396 if (np->desc_ver == DESC_VER_1) 5397 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 5398 else 5399 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); 5400 writel(np->txrxctl_bits, base + NvRegTxRxControl); 5401 writel(np->vlanctl_bits, base + NvRegVlanControl); 5402 pci_push(base); 5403 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 5404 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 5405 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 5406 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 5407 5408 writel(0, base + NvRegMIIMask); 5409 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5410 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5411 5412 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 5413 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 5414 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 5415 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5416 5417 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 5418 5419 get_random_bytes(&low, sizeof(low)); 5420 low &= NVREG_SLOTTIME_MASK; 5421 if (np->desc_ver == DESC_VER_1) { 5422 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime); 5423 } else { 5424 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) { 5425 /* setup legacy backoff */ 5426 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime); 5427 } else { 5428 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime); 5429 nv_gear_backoff_reseed(dev); 5430 } 5431 } 5432 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); 5433 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); 5434 if (poll_interval == -1) { 5435 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 5436 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 5437 else 5438 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 5439 } 5440 else 5441 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 5442 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5443 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 5444 base + NvRegAdapterControl); 5445 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); 5446 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); 5447 if (np->wolenabled) 5448 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 5449 5450 i = readl(base + NvRegPowerState); 5451 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 5452 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 5453 5454 pci_push(base); 5455 udelay(10); 5456 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 5457 5458 nv_disable_hw_interrupts(dev, np->irqmask); 5459 pci_push(base); 5460 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5461 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5462 pci_push(base); 5463 5464 if (nv_request_irq(dev, 0)) { 5465 goto out_drain; 5466 } 5467 5468 /* ask for interrupts */ 5469 nv_enable_hw_interrupts(dev, np->irqmask); 5470 5471 spin_lock_irq(&np->lock); 5472 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5473 writel(0, base + NvRegMulticastAddrB); 5474 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5475 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5476 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5477 /* One manual link speed update: Interrupts are enabled, future link 5478 * speed changes cause interrupts and are handled by nv_link_irq(). 5479 */ 5480 { 5481 u32 miistat; 5482 miistat = readl(base + NvRegMIIStatus); 5483 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5484 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); 5485 } 5486 /* set linkspeed to invalid value, thus force nv_update_linkspeed 5487 * to init hw */ 5488 np->linkspeed = 0; 5489 ret = nv_update_linkspeed(dev); 5490 nv_start_rxtx(dev); 5491 netif_start_queue(dev); 5492 nv_napi_enable(dev); 5493 5494 if (ret) { 5495 netif_carrier_on(dev); 5496 } else { 5497 printk(KERN_INFO "%s: no link during initialization.\n", dev->name); 5498 netif_carrier_off(dev); 5499 } 5500 if (oom) 5501 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5502 5503 /* start statistics timer */ 5504 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) 5505 mod_timer(&np->stats_poll, 5506 round_jiffies(jiffies + STATS_INTERVAL)); 5507 5508 spin_unlock_irq(&np->lock); 5509 5510 return 0; 5511out_drain: 5512 nv_drain_rxtx(dev); 5513 return ret; 5514} 5515 5516static int nv_close(struct net_device *dev) 5517{ 5518 struct fe_priv *np = netdev_priv(dev); 5519 u8 __iomem *base; 5520 5521 spin_lock_irq(&np->lock); 5522 np->in_shutdown = 1; 5523 spin_unlock_irq(&np->lock); 5524 nv_napi_disable(dev); 5525 synchronize_irq(np->pci_dev->irq); 5526 5527 del_timer_sync(&np->oom_kick); 5528 del_timer_sync(&np->nic_poll); 5529 del_timer_sync(&np->stats_poll); 5530 5531 netif_stop_queue(dev); 5532 spin_lock_irq(&np->lock); 5533 nv_stop_rxtx(dev); 5534 nv_txrx_reset(dev); 5535 5536 /* disable interrupts on the nic or we will lock up */ 5537 base = get_hwbase(dev); 5538 nv_disable_hw_interrupts(dev, np->irqmask); 5539 pci_push(base); 5540 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 5541 5542 spin_unlock_irq(&np->lock); 5543 5544 nv_free_irq(dev); 5545 5546 nv_drain_rxtx(dev); 5547 5548 if (np->wolenabled || !phy_power_down) { 5549 nv_txrx_gate(dev, false); 5550 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5551 nv_start_rx(dev); 5552 } else { 5553 /* power down phy */ 5554 mii_rw(dev, np->phyaddr, MII_BMCR, 5555 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN); 5556 nv_txrx_gate(dev, true); 5557 } 5558 5559 /* FIXME: power down nic */ 5560 5561 return 0; 5562} 5563 5564static const struct net_device_ops nv_netdev_ops = { 5565 .ndo_open = nv_open, 5566 .ndo_stop = nv_close, 5567 .ndo_get_stats = nv_get_stats, 5568 .ndo_start_xmit = nv_start_xmit, 5569 .ndo_tx_timeout = nv_tx_timeout, 5570 .ndo_change_mtu = nv_change_mtu, 5571 .ndo_validate_addr = eth_validate_addr, 5572 .ndo_set_mac_address = nv_set_mac_address, 5573 .ndo_set_multicast_list = nv_set_multicast, 5574 .ndo_vlan_rx_register = nv_vlan_rx_register, 5575#ifdef CONFIG_NET_POLL_CONTROLLER 5576 .ndo_poll_controller = nv_poll_controller, 5577#endif 5578}; 5579 5580static const struct net_device_ops nv_netdev_ops_optimized = { 5581 .ndo_open = nv_open, 5582 .ndo_stop = nv_close, 5583 .ndo_get_stats = nv_get_stats, 5584 .ndo_start_xmit = nv_start_xmit_optimized, 5585 .ndo_tx_timeout = nv_tx_timeout, 5586 .ndo_change_mtu = nv_change_mtu, 5587 .ndo_validate_addr = eth_validate_addr, 5588 .ndo_set_mac_address = nv_set_mac_address, 5589 .ndo_set_multicast_list = nv_set_multicast, 5590 .ndo_vlan_rx_register = nv_vlan_rx_register, 5591#ifdef CONFIG_NET_POLL_CONTROLLER 5592 .ndo_poll_controller = nv_poll_controller, 5593#endif 5594}; 5595 5596static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 5597{ 5598 struct net_device *dev; 5599 struct fe_priv *np; 5600 unsigned long addr; 5601 u8 __iomem *base; 5602 int err, i; 5603 u32 powerstate, txreg; 5604 u32 phystate_orig = 0, phystate; 5605 int phyinitialized = 0; 5606 static int printed_version; 5607 5608 if (!printed_version++) 5609 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet" 5610 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION); 5611 5612 dev = alloc_etherdev(sizeof(struct fe_priv)); 5613 err = -ENOMEM; 5614 if (!dev) 5615 goto out; 5616 5617 np = netdev_priv(dev); 5618 np->dev = dev; 5619 np->pci_dev = pci_dev; 5620 spin_lock_init(&np->lock); 5621 SET_NETDEV_DEV(dev, &pci_dev->dev); 5622 5623 init_timer(&np->oom_kick); 5624 np->oom_kick.data = (unsigned long) dev; 5625 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ 5626 init_timer(&np->nic_poll); 5627 np->nic_poll.data = (unsigned long) dev; 5628 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ 5629 init_timer(&np->stats_poll); 5630 np->stats_poll.data = (unsigned long) dev; 5631 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ 5632 5633 err = pci_enable_device(pci_dev); 5634 if (err) 5635 goto out_free; 5636 5637 pci_set_master(pci_dev); 5638 5639 err = pci_request_regions(pci_dev, DRV_NAME); 5640 if (err < 0) 5641 goto out_disable; 5642 5643 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) 5644 np->register_size = NV_PCI_REGSZ_VER3; 5645 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 5646 np->register_size = NV_PCI_REGSZ_VER2; 5647 else 5648 np->register_size = NV_PCI_REGSZ_VER1; 5649 5650 err = -EINVAL; 5651 addr = 0; 5652 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5653 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", 5654 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), 5655 pci_resource_len(pci_dev, i), 5656 pci_resource_flags(pci_dev, i)); 5657 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 5658 pci_resource_len(pci_dev, i) >= np->register_size) { 5659 addr = pci_resource_start(pci_dev, i); 5660 break; 5661 } 5662 } 5663 if (i == DEVICE_COUNT_RESOURCE) { 5664 dev_printk(KERN_INFO, &pci_dev->dev, 5665 "Couldn't find register window\n"); 5666 goto out_relreg; 5667 } 5668 5669 /* copy of driver data */ 5670 np->driver_data = id->driver_data; 5671 /* copy of device id */ 5672 np->device_id = id->device; 5673 5674 /* handle different descriptor versions */ 5675 if (id->driver_data & DEV_HAS_HIGH_DMA) { 5676 /* packet format 3: supports 40-bit addressing */ 5677 np->desc_ver = DESC_VER_3; 5678 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 5679 if (dma_64bit) { 5680 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39))) 5681 dev_printk(KERN_INFO, &pci_dev->dev, 5682 "64-bit DMA failed, using 32-bit addressing\n"); 5683 else 5684 dev->features |= NETIF_F_HIGHDMA; 5685 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) { 5686 dev_printk(KERN_INFO, &pci_dev->dev, 5687 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); 5688 } 5689 } 5690 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 5691 /* packet format 2: supports jumbo frames */ 5692 np->desc_ver = DESC_VER_2; 5693 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; 5694 } else { 5695 /* original packet format */ 5696 np->desc_ver = DESC_VER_1; 5697 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; 5698 } 5699 5700 np->pkt_limit = NV_PKTLIMIT_1; 5701 if (id->driver_data & DEV_HAS_LARGEDESC) 5702 np->pkt_limit = NV_PKTLIMIT_2; 5703 5704 if (id->driver_data & DEV_HAS_CHECKSUM) { 5705 np->rx_csum = 1; 5706 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5707 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 5708 dev->features |= NETIF_F_TSO; 5709 } 5710 5711 np->vlanctl_bits = 0; 5712 if (id->driver_data & DEV_HAS_VLAN) { 5713 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 5714 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; 5715 } 5716 5717 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 5718 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || 5719 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || 5720 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) { 5721 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 5722 } 5723 5724 5725 err = -ENOMEM; 5726 np->base = ioremap(addr, np->register_size); 5727 if (!np->base) 5728 goto out_relreg; 5729 dev->base_addr = (unsigned long)np->base; 5730 5731 dev->irq = pci_dev->irq; 5732 5733 np->rx_ring_size = RX_RING_DEFAULT; 5734 np->tx_ring_size = TX_RING_DEFAULT; 5735 5736 if (!nv_optimized(np)) { 5737 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 5738 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 5739 &np->ring_addr); 5740 if (!np->rx_ring.orig) 5741 goto out_unmap; 5742 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 5743 } else { 5744 np->rx_ring.ex = pci_alloc_consistent(pci_dev, 5745 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 5746 &np->ring_addr); 5747 if (!np->rx_ring.ex) 5748 goto out_unmap; 5749 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 5750 } 5751 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5752 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5753 if (!np->rx_skb || !np->tx_skb) 5754 goto out_freering; 5755 5756 if (!nv_optimized(np)) 5757 dev->netdev_ops = &nv_netdev_ops; 5758 else 5759 dev->netdev_ops = &nv_netdev_ops_optimized; 5760 5761#ifdef CONFIG_FORCEDETH_NAPI 5762 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5763#endif 5764 SET_ETHTOOL_OPS(dev, &ops); 5765 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5766 5767 pci_set_drvdata(pci_dev, dev); 5768 5769 /* read the mac address */ 5770 base = get_hwbase(dev); 5771 np->orig_mac[0] = readl(base + NvRegMacAddrA); 5772 np->orig_mac[1] = readl(base + NvRegMacAddrB); 5773 5774 /* check the workaround bit for correct mac address order */ 5775 txreg = readl(base + NvRegTransmitPoll); 5776 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) { 5777 /* mac address is already in correct order */ 5778 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5779 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5780 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5781 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5782 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5783 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5784 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { 5785 /* mac address is already in correct order */ 5786 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5787 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5788 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5789 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5790 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5791 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5792 /* 5793 * Set orig mac address back to the reversed version. 5794 * This flag will be cleared during low power transition. 5795 * Therefore, we should always put back the reversed address. 5796 */ 5797 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) + 5798 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24); 5799 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8); 5800 } else { 5801 /* need to reverse mac address to correct order */ 5802 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 5803 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 5804 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 5805 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 5806 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 5807 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 5808 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5809 printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n"); 5810 } 5811 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 5812 5813 if (!is_valid_ether_addr(dev->perm_addr)) { 5814 /* 5815 * Bad mac address. At least one bios sets the mac address 5816 * to 01:23:45:67:89:ab 5817 */ 5818 dev_printk(KERN_ERR, &pci_dev->dev, 5819 "Invalid Mac address detected: %pM\n", 5820 dev->dev_addr); 5821 dev_printk(KERN_ERR, &pci_dev->dev, 5822 "Please complain to your hardware vendor. Switching to a random MAC.\n"); 5823 random_ether_addr(dev->dev_addr); 5824 } 5825 5826 dprintk(KERN_DEBUG "%s: MAC Address %pM\n", 5827 pci_name(pci_dev), dev->dev_addr); 5828 5829 /* set mac address */ 5830 nv_copy_mac_to_hw(dev); 5831 5832 /* Workaround current PCI init glitch: wakeup bits aren't 5833 * being set from PCI PM capability. 5834 */ 5835 device_init_wakeup(&pci_dev->dev, 1); 5836 5837 /* disable WOL */ 5838 writel(0, base + NvRegWakeUpFlags); 5839 np->wolenabled = 0; 5840 5841 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5842 5843 /* take phy and nic out of low power mode */ 5844 powerstate = readl(base + NvRegPowerState2); 5845 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; 5846 if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) && 5847 pci_dev->revision >= 0xA3) 5848 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; 5849 writel(powerstate, base + NvRegPowerState2); 5850 } 5851 5852 if (np->desc_ver == DESC_VER_1) { 5853 np->tx_flags = NV_TX_VALID; 5854 } else { 5855 np->tx_flags = NV_TX2_VALID; 5856 } 5857 5858 np->msi_flags = 0; 5859 if ((id->driver_data & DEV_HAS_MSI) && msi) { 5860 np->msi_flags |= NV_MSI_CAPABLE; 5861 } 5862 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 5863 /* msix has had reported issues when modifying irqmask 5864 as in the case of napi, therefore, disable for now 5865 */ 5866#ifndef CONFIG_FORCEDETH_NAPI 5867 np->msi_flags |= NV_MSI_X_CAPABLE; 5868#endif 5869 } 5870 5871 if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) { 5872 np->irqmask = NVREG_IRQMASK_CPU; 5873 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5874 np->msi_flags |= 0x0001; 5875 } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC && 5876 !(id->driver_data & DEV_NEED_TIMERIRQ)) { 5877 /* start off in throughput mode */ 5878 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5879 /* remove support for msix mode */ 5880 np->msi_flags &= ~NV_MSI_X_CAPABLE; 5881 } else { 5882 optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; 5883 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5884 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5885 np->msi_flags |= 0x0003; 5886 } 5887 5888 if (id->driver_data & DEV_NEED_TIMERIRQ) 5889 np->irqmask |= NVREG_IRQ_TIMER; 5890 if (id->driver_data & DEV_NEED_LINKTIMER) { 5891 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); 5892 np->need_linktimer = 1; 5893 np->link_timeout = jiffies + LINK_TIMEOUT; 5894 } else { 5895 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); 5896 np->need_linktimer = 0; 5897 } 5898 5899 /* Limit the number of tx's outstanding for hw bug */ 5900 if (id->driver_data & DEV_NEED_TX_LIMIT) { 5901 np->tx_limit = 1; 5902 if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) && 5903 pci_dev->revision >= 0xA2) 5904 np->tx_limit = 0; 5905 } 5906 5907 /* clear phy state and temporarily halt phy interrupts */ 5908 writel(0, base + NvRegMIIMask); 5909 phystate = readl(base + NvRegAdapterControl); 5910 if (phystate & NVREG_ADAPTCTL_RUNNING) { 5911 phystate_orig = 1; 5912 phystate &= ~NVREG_ADAPTCTL_RUNNING; 5913 writel(phystate, base + NvRegAdapterControl); 5914 } 5915 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5916 5917 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 5918 /* management unit running on the mac? */ 5919 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) && 5920 (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) && 5921 nv_mgmt_acquire_sema(dev) && 5922 nv_mgmt_get_version(dev)) { 5923 np->mac_in_use = 1; 5924 if (np->mgmt_version > 0) { 5925 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; 5926 } 5927 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", 5928 pci_name(pci_dev), np->mac_in_use); 5929 /* management unit setup the phy already? */ 5930 if (np->mac_in_use && 5931 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 5932 NVREG_XMITCTL_SYNC_PHY_INIT)) { 5933 /* phy is inited by mgmt unit */ 5934 phyinitialized = 1; 5935 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", 5936 pci_name(pci_dev)); 5937 } else { 5938 /* we need to init the phy */ 5939 } 5940 } 5941 } 5942 5943 /* find a suitable phy */ 5944 for (i = 1; i <= 32; i++) { 5945 int id1, id2; 5946 int phyaddr = i & 0x1F; 5947 5948 spin_lock_irq(&np->lock); 5949 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); 5950 spin_unlock_irq(&np->lock); 5951 if (id1 < 0 || id1 == 0xffff) 5952 continue; 5953 spin_lock_irq(&np->lock); 5954 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); 5955 spin_unlock_irq(&np->lock); 5956 if (id2 < 0 || id2 == 0xffff) 5957 continue; 5958 5959 np->phy_model = id2 & PHYID2_MODEL_MASK; 5960 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 5961 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 5962 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 5963 pci_name(pci_dev), id1, id2, phyaddr); 5964 np->phyaddr = phyaddr; 5965 np->phy_oui = id1 | id2; 5966 5967 /* Realtek hardcoded phy id1 to all zero's on certain phys */ 5968 if (np->phy_oui == PHY_OUI_REALTEK2) 5969 np->phy_oui = PHY_OUI_REALTEK; 5970 /* Setup phy revision for Realtek */ 5971 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211) 5972 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK; 5973 5974 break; 5975 } 5976 if (i == 33) { 5977 dev_printk(KERN_INFO, &pci_dev->dev, 5978 "open: Could not find a valid PHY.\n"); 5979 goto out_error; 5980 } 5981 5982 if (!phyinitialized) { 5983 /* reset it */ 5984 phy_init(dev); 5985 } else { 5986 /* see if it is a gigabit phy */ 5987 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 5988 if (mii_status & PHY_GIGABIT) { 5989 np->gigabit = PHY_GIGABIT; 5990 } 5991 } 5992 5993 /* set default link speed settings */ 5994 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 5995 np->duplex = 0; 5996 np->autoneg = 1; 5997 5998 err = register_netdev(dev); 5999 if (err) { 6000 dev_printk(KERN_INFO, &pci_dev->dev, 6001 "unable to register netdev: %d\n", err); 6002 goto out_error; 6003 } 6004 6005 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, " 6006 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 6007 dev->name, 6008 np->phy_oui, 6009 np->phyaddr, 6010 dev->dev_addr[0], 6011 dev->dev_addr[1], 6012 dev->dev_addr[2], 6013 dev->dev_addr[3], 6014 dev->dev_addr[4], 6015 dev->dev_addr[5]); 6016 6017 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 6018 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 6019 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? 6020 "csum " : "", 6021 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 6022 "vlan " : "", 6023 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", 6024 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", 6025 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", 6026 np->gigabit == PHY_GIGABIT ? "gbit " : "", 6027 np->need_linktimer ? "lnktim " : "", 6028 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", 6029 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", 6030 np->desc_ver); 6031 6032 return 0; 6033 6034out_error: 6035 if (phystate_orig) 6036 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 6037 pci_set_drvdata(pci_dev, NULL); 6038out_freering: 6039 free_rings(dev); 6040out_unmap: 6041 iounmap(get_hwbase(dev)); 6042out_relreg: 6043 pci_release_regions(pci_dev); 6044out_disable: 6045 pci_disable_device(pci_dev); 6046out_free: 6047 free_netdev(dev); 6048out: 6049 return err; 6050} 6051 6052static void nv_restore_phy(struct net_device *dev) 6053{ 6054 struct fe_priv *np = netdev_priv(dev); 6055 u16 phy_reserved, mii_control; 6056 6057 if (np->phy_oui == PHY_OUI_REALTEK && 6058 np->phy_model == PHY_MODEL_REALTEK_8201 && 6059 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 6060 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3); 6061 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); 6062 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 6063 phy_reserved |= PHY_REALTEK_INIT8; 6064 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved); 6065 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1); 6066 6067 /* restart auto negotiation */ 6068 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 6069 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 6070 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control); 6071 } 6072} 6073 6074static void nv_restore_mac_addr(struct pci_dev *pci_dev) 6075{ 6076 struct net_device *dev = pci_get_drvdata(pci_dev); 6077 struct fe_priv *np = netdev_priv(dev); 6078 u8 __iomem *base = get_hwbase(dev); 6079 6080 /* special op: write back the misordered MAC address - otherwise 6081 * the next nv_probe would see a wrong address. 6082 */ 6083 writel(np->orig_mac[0], base + NvRegMacAddrA); 6084 writel(np->orig_mac[1], base + NvRegMacAddrB); 6085 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV, 6086 base + NvRegTransmitPoll); 6087} 6088 6089static void __devexit nv_remove(struct pci_dev *pci_dev) 6090{ 6091 struct net_device *dev = pci_get_drvdata(pci_dev); 6092 6093 unregister_netdev(dev); 6094 6095 nv_restore_mac_addr(pci_dev); 6096 6097 /* restore any phy related changes */ 6098 nv_restore_phy(dev); 6099 6100 nv_mgmt_release_sema(dev); 6101 6102 /* free all structures */ 6103 free_rings(dev); 6104 iounmap(get_hwbase(dev)); 6105 pci_release_regions(pci_dev); 6106 pci_disable_device(pci_dev); 6107 free_netdev(dev); 6108 pci_set_drvdata(pci_dev, NULL); 6109} 6110 6111#ifdef CONFIG_PM 6112static int nv_suspend(struct pci_dev *pdev, pm_message_t state) 6113{ 6114 struct net_device *dev = pci_get_drvdata(pdev); 6115 struct fe_priv *np = netdev_priv(dev); 6116 u8 __iomem *base = get_hwbase(dev); 6117 int i; 6118 6119 if (netif_running(dev)) { 6120 // Gross. 6121 nv_close(dev); 6122 } 6123 netif_device_detach(dev); 6124 6125 /* save non-pci configuration space */ 6126 for (i = 0;i <= np->register_size/sizeof(u32); i++) 6127 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 6128 6129 pci_save_state(pdev); 6130 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); 6131 pci_disable_device(pdev); 6132 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 6133 return 0; 6134} 6135 6136static int nv_resume(struct pci_dev *pdev) 6137{ 6138 struct net_device *dev = pci_get_drvdata(pdev); 6139 struct fe_priv *np = netdev_priv(dev); 6140 u8 __iomem *base = get_hwbase(dev); 6141 int i, rc = 0; 6142 6143 pci_set_power_state(pdev, PCI_D0); 6144 pci_restore_state(pdev); 6145 /* ack any pending wake events, disable PME */ 6146 pci_enable_wake(pdev, PCI_D0, 0); 6147 6148 /* restore non-pci configuration space */ 6149 for (i = 0;i <= np->register_size/sizeof(u32); i++) 6150 writel(np->saved_config_space[i], base+i*sizeof(u32)); 6151 6152 if (np->driver_data & DEV_NEED_MSI_FIX) 6153 pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE); 6154 6155 /* restore phy state, including autoneg */ 6156 phy_init(dev); 6157 6158 netif_device_attach(dev); 6159 if (netif_running(dev)) { 6160 rc = nv_open(dev); 6161 nv_set_multicast(dev); 6162 } 6163 return rc; 6164} 6165 6166static void nv_shutdown(struct pci_dev *pdev) 6167{ 6168 struct net_device *dev = pci_get_drvdata(pdev); 6169 struct fe_priv *np = netdev_priv(dev); 6170 6171 if (netif_running(dev)) 6172 nv_close(dev); 6173 6174 /* 6175 * Restore the MAC so a kernel started by kexec won't get confused. 6176 * If we really go for poweroff, we must not restore the MAC, 6177 * otherwise the MAC for WOL will be reversed at least on some boards. 6178 */ 6179 if (system_state != SYSTEM_POWER_OFF) { 6180 nv_restore_mac_addr(pdev); 6181 } 6182 6183 pci_disable_device(pdev); 6184 /* 6185 * Apparently it is not possible to reinitialise from D3 hot, 6186 * only put the device into D3 if we really go for poweroff. 6187 */ 6188 if (system_state == SYSTEM_POWER_OFF) { 6189 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled)) 6190 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled); 6191 pci_set_power_state(pdev, PCI_D3hot); 6192 } 6193} 6194#else 6195#define nv_suspend NULL 6196#define nv_shutdown NULL 6197#define nv_resume NULL 6198#endif /* CONFIG_PM */ 6199 6200static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { 6201 { /* nForce Ethernet Controller */ 6202 PCI_DEVICE(0x10DE, 0x01C3), 6203 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6204 }, 6205 { /* nForce2 Ethernet Controller */ 6206 PCI_DEVICE(0x10DE, 0x0066), 6207 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6208 }, 6209 { /* nForce3 Ethernet Controller */ 6210 PCI_DEVICE(0x10DE, 0x00D6), 6211 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6212 }, 6213 { /* nForce3 Ethernet Controller */ 6214 PCI_DEVICE(0x10DE, 0x0086), 6215 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6216 }, 6217 { /* nForce3 Ethernet Controller */ 6218 PCI_DEVICE(0x10DE, 0x008C), 6219 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6220 }, 6221 { /* nForce3 Ethernet Controller */ 6222 PCI_DEVICE(0x10DE, 0x00E6), 6223 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6224 }, 6225 { /* nForce3 Ethernet Controller */ 6226 PCI_DEVICE(0x10DE, 0x00DF), 6227 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6228 }, 6229 { /* CK804 Ethernet Controller */ 6230 PCI_DEVICE(0x10DE, 0x0056), 6231 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6232 }, 6233 { /* CK804 Ethernet Controller */ 6234 PCI_DEVICE(0x10DE, 0x0057), 6235 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6236 }, 6237 { /* MCP04 Ethernet Controller */ 6238 PCI_DEVICE(0x10DE, 0x0037), 6239 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6240 }, 6241 { /* MCP04 Ethernet Controller */ 6242 PCI_DEVICE(0x10DE, 0x0038), 6243 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6244 }, 6245 { /* MCP51 Ethernet Controller */ 6246 PCI_DEVICE(0x10DE, 0x0268), 6247 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, 6248 }, 6249 { /* MCP51 Ethernet Controller */ 6250 PCI_DEVICE(0x10DE, 0x0269), 6251 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, 6252 }, 6253 { /* MCP55 Ethernet Controller */ 6254 PCI_DEVICE(0x10DE, 0x0372), 6255 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, 6256 }, 6257 { /* MCP55 Ethernet Controller */ 6258 PCI_DEVICE(0x10DE, 0x0373), 6259 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, 6260 }, 6261 { /* MCP61 Ethernet Controller */ 6262 PCI_DEVICE(0x10DE, 0x03E5), 6263 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6264 }, 6265 { /* MCP61 Ethernet Controller */ 6266 PCI_DEVICE(0x10DE, 0x03E6), 6267 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6268 }, 6269 { /* MCP61 Ethernet Controller */ 6270 PCI_DEVICE(0x10DE, 0x03EE), 6271 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6272 }, 6273 { /* MCP61 Ethernet Controller */ 6274 PCI_DEVICE(0x10DE, 0x03EF), 6275 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6276 }, 6277 { /* MCP65 Ethernet Controller */ 6278 PCI_DEVICE(0x10DE, 0x0450), 6279 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6280 }, 6281 { /* MCP65 Ethernet Controller */ 6282 PCI_DEVICE(0x10DE, 0x0451), 6283 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6284 }, 6285 { /* MCP65 Ethernet Controller */ 6286 PCI_DEVICE(0x10DE, 0x0452), 6287 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6288 }, 6289 { /* MCP65 Ethernet Controller */ 6290 PCI_DEVICE(0x10DE, 0x0453), 6291 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6292 }, 6293 { /* MCP67 Ethernet Controller */ 6294 PCI_DEVICE(0x10DE, 0x054C), 6295 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6296 }, 6297 { /* MCP67 Ethernet Controller */ 6298 PCI_DEVICE(0x10DE, 0x054D), 6299 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6300 }, 6301 { /* MCP67 Ethernet Controller */ 6302 PCI_DEVICE(0x10DE, 0x054E), 6303 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6304 }, 6305 { /* MCP67 Ethernet Controller */ 6306 PCI_DEVICE(0x10DE, 0x054F), 6307 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6308 }, 6309 { /* MCP73 Ethernet Controller */ 6310 PCI_DEVICE(0x10DE, 0x07DC), 6311 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6312 }, 6313 { /* MCP73 Ethernet Controller */ 6314 PCI_DEVICE(0x10DE, 0x07DD), 6315 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6316 }, 6317 { /* MCP73 Ethernet Controller */ 6318 PCI_DEVICE(0x10DE, 0x07DE), 6319 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6320 }, 6321 { /* MCP73 Ethernet Controller */ 6322 PCI_DEVICE(0x10DE, 0x07DF), 6323 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6324 }, 6325 { /* MCP77 Ethernet Controller */ 6326 PCI_DEVICE(0x10DE, 0x0760), 6327 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6328 }, 6329 { /* MCP77 Ethernet Controller */ 6330 PCI_DEVICE(0x10DE, 0x0761), 6331 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6332 }, 6333 { /* MCP77 Ethernet Controller */ 6334 PCI_DEVICE(0x10DE, 0x0762), 6335 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6336 }, 6337 { /* MCP77 Ethernet Controller */ 6338 PCI_DEVICE(0x10DE, 0x0763), 6339 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6340 }, 6341 { /* MCP79 Ethernet Controller */ 6342 PCI_DEVICE(0x10DE, 0x0AB0), 6343 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6344 }, 6345 { /* MCP79 Ethernet Controller */ 6346 PCI_DEVICE(0x10DE, 0x0AB1), 6347 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6348 }, 6349 { /* MCP79 Ethernet Controller */ 6350 PCI_DEVICE(0x10DE, 0x0AB2), 6351 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6352 }, 6353 { /* MCP79 Ethernet Controller */ 6354 PCI_DEVICE(0x10DE, 0x0AB3), 6355 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6356 }, 6357 { /* MCP89 Ethernet Controller */ 6358 PCI_DEVICE(0x10DE, 0x0D7D), 6359 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX, 6360 }, 6361 {0,}, 6362}; 6363 6364static struct pci_driver driver = { 6365 .name = DRV_NAME, 6366 .id_table = pci_tbl, 6367 .probe = nv_probe, 6368 .remove = __devexit_p(nv_remove), 6369 .suspend = nv_suspend, 6370 .resume = nv_resume, 6371 .shutdown = nv_shutdown, 6372}; 6373 6374static int __init init_nic(void) 6375{ 6376 return pci_register_driver(&driver); 6377} 6378 6379static void __exit exit_nic(void) 6380{ 6381 pci_unregister_driver(&driver); 6382} 6383 6384module_param(max_interrupt_work, int, 0); 6385MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 6386module_param(optimization_mode, int, 0); 6387MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load."); 6388module_param(poll_interval, int, 0); 6389MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 6390module_param(msi, int, 0); 6391MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); 6392module_param(msix, int, 0); 6393MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); 6394module_param(dma_64bit, int, 0); 6395MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 6396module_param(phy_cross, int, 0); 6397MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); 6398module_param(phy_power_down, int, 0); 6399MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0)."); 6400 6401MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 6402MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 6403MODULE_LICENSE("GPL"); 6404 6405MODULE_DEVICE_TABLE(pci, pci_tbl); 6406 6407module_init(init_nic); 6408module_exit(exit_nic);