Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.37-rc3 6222 lines 193 kB view raw
1/* 2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. 3 * 4 * Note: This driver is a cleanroom reimplementation based on reverse 5 * engineered documentation written by Carl-Daniel Hailfinger 6 * and Andrew de Quincey. 7 * 8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered 9 * trademarks of NVIDIA Corporation in the United States and other 10 * countries. 11 * 12 * Copyright (C) 2003,4,5 Manfred Spraul 13 * Copyright (C) 2004 Andrew de Quincey (wol support) 14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 15 * IRQ rate fixes, bigendian fixes, cleanups, verification) 16 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation 17 * 18 * This program is free software; you can redistribute it and/or modify 19 * it under the terms of the GNU General Public License as published by 20 * the Free Software Foundation; either version 2 of the License, or 21 * (at your option) any later version. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, write to the Free Software 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 31 * 32 * Known bugs: 33 * We suspect that on some hardware no TX done interrupts are generated. 34 * This means recovery from netif_stop_queue only happens if the hw timer 35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) 36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance. 37 * If your hardware reliably generates tx done interrupts, then you can remove 38 * DEV_NEED_TIMERIRQ from the driver_data flags. 39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 40 * superfluous timer interrupts from the nic. 41 */ 42#define FORCEDETH_VERSION "0.64" 43#define DRV_NAME "forcedeth" 44 45#include <linux/module.h> 46#include <linux/types.h> 47#include <linux/pci.h> 48#include <linux/interrupt.h> 49#include <linux/netdevice.h> 50#include <linux/etherdevice.h> 51#include <linux/delay.h> 52#include <linux/sched.h> 53#include <linux/spinlock.h> 54#include <linux/ethtool.h> 55#include <linux/timer.h> 56#include <linux/skbuff.h> 57#include <linux/mii.h> 58#include <linux/random.h> 59#include <linux/init.h> 60#include <linux/if_vlan.h> 61#include <linux/dma-mapping.h> 62#include <linux/slab.h> 63 64#include <asm/irq.h> 65#include <asm/io.h> 66#include <asm/uaccess.h> 67#include <asm/system.h> 68 69#if 0 70#define dprintk printk 71#else 72#define dprintk(x...) do { } while (0) 73#endif 74 75#define TX_WORK_PER_LOOP 64 76#define RX_WORK_PER_LOOP 64 77 78/* 79 * Hardware access: 80 */ 81 82#define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */ 83#define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */ 84#define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */ 85#define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */ 86#define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */ 87#define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */ 88#define DEV_HAS_MSI 0x0000040 /* device supports MSI */ 89#define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */ 90#define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */ 91#define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */ 92#define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */ 93#define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */ 94#define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */ 95#define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */ 96#define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */ 97#define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */ 98#define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */ 99#define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */ 100#define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */ 101#define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */ 102#define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */ 103#define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */ 104#define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */ 105#define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */ 106#define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */ 107#define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */ 108#define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */ 109 110enum { 111 NvRegIrqStatus = 0x000, 112#define NVREG_IRQSTAT_MIIEVENT 0x040 113#define NVREG_IRQSTAT_MASK 0x83ff 114 NvRegIrqMask = 0x004, 115#define NVREG_IRQ_RX_ERROR 0x0001 116#define NVREG_IRQ_RX 0x0002 117#define NVREG_IRQ_RX_NOBUF 0x0004 118#define NVREG_IRQ_TX_ERR 0x0008 119#define NVREG_IRQ_TX_OK 0x0010 120#define NVREG_IRQ_TIMER 0x0020 121#define NVREG_IRQ_LINK 0x0040 122#define NVREG_IRQ_RX_FORCED 0x0080 123#define NVREG_IRQ_TX_FORCED 0x0100 124#define NVREG_IRQ_RECOVER_ERROR 0x8200 125#define NVREG_IRQMASK_THROUGHPUT 0x00df 126#define NVREG_IRQMASK_CPU 0x0060 127#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) 128#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) 129#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) 130 131 NvRegUnknownSetupReg6 = 0x008, 132#define NVREG_UNKSETUP6_VAL 3 133 134/* 135 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic 136 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 137 */ 138 NvRegPollingInterval = 0x00c, 139#define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */ 140#define NVREG_POLL_DEFAULT_CPU 13 141 NvRegMSIMap0 = 0x020, 142 NvRegMSIMap1 = 0x024, 143 NvRegMSIIrqMask = 0x030, 144#define NVREG_MSI_VECTOR_0_ENABLED 0x01 145 NvRegMisc1 = 0x080, 146#define NVREG_MISC1_PAUSE_TX 0x01 147#define NVREG_MISC1_HD 0x02 148#define NVREG_MISC1_FORCE 0x3b0f3c 149 150 NvRegMacReset = 0x34, 151#define NVREG_MAC_RESET_ASSERT 0x0F3 152 NvRegTransmitterControl = 0x084, 153#define NVREG_XMITCTL_START 0x01 154#define NVREG_XMITCTL_MGMT_ST 0x40000000 155#define NVREG_XMITCTL_SYNC_MASK 0x000f0000 156#define NVREG_XMITCTL_SYNC_NOT_READY 0x0 157#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 158#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 159#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 160#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 161#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 162#define NVREG_XMITCTL_HOST_LOADED 0x00004000 163#define NVREG_XMITCTL_TX_PATH_EN 0x01000000 164#define NVREG_XMITCTL_DATA_START 0x00100000 165#define NVREG_XMITCTL_DATA_READY 0x00010000 166#define NVREG_XMITCTL_DATA_ERROR 0x00020000 167 NvRegTransmitterStatus = 0x088, 168#define NVREG_XMITSTAT_BUSY 0x01 169 170 NvRegPacketFilterFlags = 0x8c, 171#define NVREG_PFF_PAUSE_RX 0x08 172#define NVREG_PFF_ALWAYS 0x7F0000 173#define NVREG_PFF_PROMISC 0x80 174#define NVREG_PFF_MYADDR 0x20 175#define NVREG_PFF_LOOPBACK 0x10 176 177 NvRegOffloadConfig = 0x90, 178#define NVREG_OFFLOAD_HOMEPHY 0x601 179#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 180 NvRegReceiverControl = 0x094, 181#define NVREG_RCVCTL_START 0x01 182#define NVREG_RCVCTL_RX_PATH_EN 0x01000000 183 NvRegReceiverStatus = 0x98, 184#define NVREG_RCVSTAT_BUSY 0x01 185 186 NvRegSlotTime = 0x9c, 187#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 188#define NVREG_SLOTTIME_10_100_FULL 0x00007f00 189#define NVREG_SLOTTIME_1000_FULL 0x0003ff00 190#define NVREG_SLOTTIME_HALF 0x0000ff00 191#define NVREG_SLOTTIME_DEFAULT 0x00007f00 192#define NVREG_SLOTTIME_MASK 0x000000ff 193 194 NvRegTxDeferral = 0xA0, 195#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 196#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 197#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 198#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f 199#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f 200#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000 201 NvRegRxDeferral = 0xA4, 202#define NVREG_RX_DEFERRAL_DEFAULT 0x16 203 NvRegMacAddrA = 0xA8, 204 NvRegMacAddrB = 0xAC, 205 NvRegMulticastAddrA = 0xB0, 206#define NVREG_MCASTADDRA_FORCE 0x01 207 NvRegMulticastAddrB = 0xB4, 208 NvRegMulticastMaskA = 0xB8, 209#define NVREG_MCASTMASKA_NONE 0xffffffff 210 NvRegMulticastMaskB = 0xBC, 211#define NVREG_MCASTMASKB_NONE 0xffff 212 213 NvRegPhyInterface = 0xC0, 214#define PHY_RGMII 0x10000000 215 NvRegBackOffControl = 0xC4, 216#define NVREG_BKOFFCTRL_DEFAULT 0x70000000 217#define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff 218#define NVREG_BKOFFCTRL_SELECT 24 219#define NVREG_BKOFFCTRL_GEAR 12 220 221 NvRegTxRingPhysAddr = 0x100, 222 NvRegRxRingPhysAddr = 0x104, 223 NvRegRingSizes = 0x108, 224#define NVREG_RINGSZ_TXSHIFT 0 225#define NVREG_RINGSZ_RXSHIFT 16 226 NvRegTransmitPoll = 0x10c, 227#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 228 NvRegLinkSpeed = 0x110, 229#define NVREG_LINKSPEED_FORCE 0x10000 230#define NVREG_LINKSPEED_10 1000 231#define NVREG_LINKSPEED_100 100 232#define NVREG_LINKSPEED_1000 50 233#define NVREG_LINKSPEED_MASK (0xFFF) 234 NvRegUnknownSetupReg5 = 0x130, 235#define NVREG_UNKSETUP5_BIT31 (1<<31) 236 NvRegTxWatermark = 0x13c, 237#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 238#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 239#define NVREG_TX_WM_DESC2_3_1000 0xfe08000 240 NvRegTxRxControl = 0x144, 241#define NVREG_TXRXCTL_KICK 0x0001 242#define NVREG_TXRXCTL_BIT1 0x0002 243#define NVREG_TXRXCTL_BIT2 0x0004 244#define NVREG_TXRXCTL_IDLE 0x0008 245#define NVREG_TXRXCTL_RESET 0x0010 246#define NVREG_TXRXCTL_RXCHECK 0x0400 247#define NVREG_TXRXCTL_DESC_1 0 248#define NVREG_TXRXCTL_DESC_2 0x002100 249#define NVREG_TXRXCTL_DESC_3 0xc02200 250#define NVREG_TXRXCTL_VLANSTRIP 0x00040 251#define NVREG_TXRXCTL_VLANINS 0x00080 252 NvRegTxRingPhysAddrHigh = 0x148, 253 NvRegRxRingPhysAddrHigh = 0x14C, 254 NvRegTxPauseFrame = 0x170, 255#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080 256#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 257#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 258#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 259 NvRegTxPauseFrameLimit = 0x174, 260#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000 261 NvRegMIIStatus = 0x180, 262#define NVREG_MIISTAT_ERROR 0x0001 263#define NVREG_MIISTAT_LINKCHANGE 0x0008 264#define NVREG_MIISTAT_MASK_RW 0x0007 265#define NVREG_MIISTAT_MASK_ALL 0x000f 266 NvRegMIIMask = 0x184, 267#define NVREG_MII_LINKCHANGE 0x0008 268 269 NvRegAdapterControl = 0x188, 270#define NVREG_ADAPTCTL_START 0x02 271#define NVREG_ADAPTCTL_LINKUP 0x04 272#define NVREG_ADAPTCTL_PHYVALID 0x40000 273#define NVREG_ADAPTCTL_RUNNING 0x100000 274#define NVREG_ADAPTCTL_PHYSHIFT 24 275 NvRegMIISpeed = 0x18c, 276#define NVREG_MIISPEED_BIT8 (1<<8) 277#define NVREG_MIIDELAY 5 278 NvRegMIIControl = 0x190, 279#define NVREG_MIICTL_INUSE 0x08000 280#define NVREG_MIICTL_WRITE 0x00400 281#define NVREG_MIICTL_ADDRSHIFT 5 282 NvRegMIIData = 0x194, 283 NvRegTxUnicast = 0x1a0, 284 NvRegTxMulticast = 0x1a4, 285 NvRegTxBroadcast = 0x1a8, 286 NvRegWakeUpFlags = 0x200, 287#define NVREG_WAKEUPFLAGS_VAL 0x7770 288#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 289#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 290#define NVREG_WAKEUPFLAGS_D3SHIFT 12 291#define NVREG_WAKEUPFLAGS_D2SHIFT 8 292#define NVREG_WAKEUPFLAGS_D1SHIFT 4 293#define NVREG_WAKEUPFLAGS_D0SHIFT 0 294#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 295#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 296#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 297#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 298 299 NvRegMgmtUnitGetVersion = 0x204, 300#define NVREG_MGMTUNITGETVERSION 0x01 301 NvRegMgmtUnitVersion = 0x208, 302#define NVREG_MGMTUNITVERSION 0x08 303 NvRegPowerCap = 0x268, 304#define NVREG_POWERCAP_D3SUPP (1<<30) 305#define NVREG_POWERCAP_D2SUPP (1<<26) 306#define NVREG_POWERCAP_D1SUPP (1<<25) 307 NvRegPowerState = 0x26c, 308#define NVREG_POWERSTATE_POWEREDUP 0x8000 309#define NVREG_POWERSTATE_VALID 0x0100 310#define NVREG_POWERSTATE_MASK 0x0003 311#define NVREG_POWERSTATE_D0 0x0000 312#define NVREG_POWERSTATE_D1 0x0001 313#define NVREG_POWERSTATE_D2 0x0002 314#define NVREG_POWERSTATE_D3 0x0003 315 NvRegMgmtUnitControl = 0x278, 316#define NVREG_MGMTUNITCONTROL_INUSE 0x20000 317 NvRegTxCnt = 0x280, 318 NvRegTxZeroReXmt = 0x284, 319 NvRegTxOneReXmt = 0x288, 320 NvRegTxManyReXmt = 0x28c, 321 NvRegTxLateCol = 0x290, 322 NvRegTxUnderflow = 0x294, 323 NvRegTxLossCarrier = 0x298, 324 NvRegTxExcessDef = 0x29c, 325 NvRegTxRetryErr = 0x2a0, 326 NvRegRxFrameErr = 0x2a4, 327 NvRegRxExtraByte = 0x2a8, 328 NvRegRxLateCol = 0x2ac, 329 NvRegRxRunt = 0x2b0, 330 NvRegRxFrameTooLong = 0x2b4, 331 NvRegRxOverflow = 0x2b8, 332 NvRegRxFCSErr = 0x2bc, 333 NvRegRxFrameAlignErr = 0x2c0, 334 NvRegRxLenErr = 0x2c4, 335 NvRegRxUnicast = 0x2c8, 336 NvRegRxMulticast = 0x2cc, 337 NvRegRxBroadcast = 0x2d0, 338 NvRegTxDef = 0x2d4, 339 NvRegTxFrame = 0x2d8, 340 NvRegRxCnt = 0x2dc, 341 NvRegTxPause = 0x2e0, 342 NvRegRxPause = 0x2e4, 343 NvRegRxDropFrame = 0x2e8, 344 NvRegVlanControl = 0x300, 345#define NVREG_VLANCONTROL_ENABLE 0x2000 346 NvRegMSIXMap0 = 0x3e0, 347 NvRegMSIXMap1 = 0x3e4, 348 NvRegMSIXIrqStatus = 0x3f0, 349 350 NvRegPowerState2 = 0x600, 351#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15 352#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 353#define NVREG_POWERSTATE2_PHY_RESET 0x0004 354#define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00 355}; 356 357/* Big endian: should work, but is untested */ 358struct ring_desc { 359 __le32 buf; 360 __le32 flaglen; 361}; 362 363struct ring_desc_ex { 364 __le32 bufhigh; 365 __le32 buflow; 366 __le32 txvlan; 367 __le32 flaglen; 368}; 369 370union ring_type { 371 struct ring_desc* orig; 372 struct ring_desc_ex* ex; 373}; 374 375#define FLAG_MASK_V1 0xffff0000 376#define FLAG_MASK_V2 0xffffc000 377#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 378#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) 379 380#define NV_TX_LASTPACKET (1<<16) 381#define NV_TX_RETRYERROR (1<<19) 382#define NV_TX_RETRYCOUNT_MASK (0xF<<20) 383#define NV_TX_FORCED_INTERRUPT (1<<24) 384#define NV_TX_DEFERRED (1<<26) 385#define NV_TX_CARRIERLOST (1<<27) 386#define NV_TX_LATECOLLISION (1<<28) 387#define NV_TX_UNDERFLOW (1<<29) 388#define NV_TX_ERROR (1<<30) 389#define NV_TX_VALID (1<<31) 390 391#define NV_TX2_LASTPACKET (1<<29) 392#define NV_TX2_RETRYERROR (1<<18) 393#define NV_TX2_RETRYCOUNT_MASK (0xF<<19) 394#define NV_TX2_FORCED_INTERRUPT (1<<30) 395#define NV_TX2_DEFERRED (1<<25) 396#define NV_TX2_CARRIERLOST (1<<26) 397#define NV_TX2_LATECOLLISION (1<<27) 398#define NV_TX2_UNDERFLOW (1<<28) 399/* error and valid are the same for both */ 400#define NV_TX2_ERROR (1<<30) 401#define NV_TX2_VALID (1<<31) 402#define NV_TX2_TSO (1<<28) 403#define NV_TX2_TSO_SHIFT 14 404#define NV_TX2_TSO_MAX_SHIFT 14 405#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) 406#define NV_TX2_CHECKSUM_L3 (1<<27) 407#define NV_TX2_CHECKSUM_L4 (1<<26) 408 409#define NV_TX3_VLAN_TAG_PRESENT (1<<18) 410 411#define NV_RX_DESCRIPTORVALID (1<<16) 412#define NV_RX_MISSEDFRAME (1<<17) 413#define NV_RX_SUBSTRACT1 (1<<18) 414#define NV_RX_ERROR1 (1<<23) 415#define NV_RX_ERROR2 (1<<24) 416#define NV_RX_ERROR3 (1<<25) 417#define NV_RX_ERROR4 (1<<26) 418#define NV_RX_CRCERR (1<<27) 419#define NV_RX_OVERFLOW (1<<28) 420#define NV_RX_FRAMINGERR (1<<29) 421#define NV_RX_ERROR (1<<30) 422#define NV_RX_AVAIL (1<<31) 423#define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR) 424 425#define NV_RX2_CHECKSUMMASK (0x1C000000) 426#define NV_RX2_CHECKSUM_IP (0x10000000) 427#define NV_RX2_CHECKSUM_IP_TCP (0x14000000) 428#define NV_RX2_CHECKSUM_IP_UDP (0x18000000) 429#define NV_RX2_DESCRIPTORVALID (1<<29) 430#define NV_RX2_SUBSTRACT1 (1<<25) 431#define NV_RX2_ERROR1 (1<<18) 432#define NV_RX2_ERROR2 (1<<19) 433#define NV_RX2_ERROR3 (1<<20) 434#define NV_RX2_ERROR4 (1<<21) 435#define NV_RX2_CRCERR (1<<22) 436#define NV_RX2_OVERFLOW (1<<23) 437#define NV_RX2_FRAMINGERR (1<<24) 438/* error and avail are the same for both */ 439#define NV_RX2_ERROR (1<<30) 440#define NV_RX2_AVAIL (1<<31) 441#define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR) 442 443#define NV_RX3_VLAN_TAG_PRESENT (1<<16) 444#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 445 446/* Miscelaneous hardware related defines: */ 447#define NV_PCI_REGSZ_VER1 0x270 448#define NV_PCI_REGSZ_VER2 0x2d4 449#define NV_PCI_REGSZ_VER3 0x604 450#define NV_PCI_REGSZ_MAX 0x604 451 452/* various timeout delays: all in usec */ 453#define NV_TXRX_RESET_DELAY 4 454#define NV_TXSTOP_DELAY1 10 455#define NV_TXSTOP_DELAY1MAX 500000 456#define NV_TXSTOP_DELAY2 100 457#define NV_RXSTOP_DELAY1 10 458#define NV_RXSTOP_DELAY1MAX 500000 459#define NV_RXSTOP_DELAY2 100 460#define NV_SETUP5_DELAY 5 461#define NV_SETUP5_DELAYMAX 50000 462#define NV_POWERUP_DELAY 5 463#define NV_POWERUP_DELAYMAX 5000 464#define NV_MIIBUSY_DELAY 50 465#define NV_MIIPHY_DELAY 10 466#define NV_MIIPHY_DELAYMAX 10000 467#define NV_MAC_RESET_DELAY 64 468 469#define NV_WAKEUPPATTERNS 5 470#define NV_WAKEUPMASKENTRIES 4 471 472/* General driver defaults */ 473#define NV_WATCHDOG_TIMEO (5*HZ) 474 475#define RX_RING_DEFAULT 512 476#define TX_RING_DEFAULT 256 477#define RX_RING_MIN 128 478#define TX_RING_MIN 64 479#define RING_MAX_DESC_VER_1 1024 480#define RING_MAX_DESC_VER_2_3 16384 481 482/* rx/tx mac addr + type + vlan + align + slack*/ 483#define NV_RX_HEADERS (64) 484/* even more slack. */ 485#define NV_RX_ALLOC_PAD (64) 486 487/* maximum mtu size */ 488#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ 489#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ 490 491#define OOM_REFILL (1+HZ/20) 492#define POLL_WAIT (1+HZ/100) 493#define LINK_TIMEOUT (3*HZ) 494#define STATS_INTERVAL (10*HZ) 495 496/* 497 * desc_ver values: 498 * The nic supports three different descriptor types: 499 * - DESC_VER_1: Original 500 * - DESC_VER_2: support for jumbo frames. 501 * - DESC_VER_3: 64-bit format. 502 */ 503#define DESC_VER_1 1 504#define DESC_VER_2 2 505#define DESC_VER_3 3 506 507/* PHY defines */ 508#define PHY_OUI_MARVELL 0x5043 509#define PHY_OUI_CICADA 0x03f1 510#define PHY_OUI_VITESSE 0x01c1 511#define PHY_OUI_REALTEK 0x0732 512#define PHY_OUI_REALTEK2 0x0020 513#define PHYID1_OUI_MASK 0x03ff 514#define PHYID1_OUI_SHFT 6 515#define PHYID2_OUI_MASK 0xfc00 516#define PHYID2_OUI_SHFT 10 517#define PHYID2_MODEL_MASK 0x03f0 518#define PHY_MODEL_REALTEK_8211 0x0110 519#define PHY_REV_MASK 0x0001 520#define PHY_REV_REALTEK_8211B 0x0000 521#define PHY_REV_REALTEK_8211C 0x0001 522#define PHY_MODEL_REALTEK_8201 0x0200 523#define PHY_MODEL_MARVELL_E3016 0x0220 524#define PHY_MARVELL_E3016_INITMASK 0x0300 525#define PHY_CICADA_INIT1 0x0f000 526#define PHY_CICADA_INIT2 0x0e00 527#define PHY_CICADA_INIT3 0x01000 528#define PHY_CICADA_INIT4 0x0200 529#define PHY_CICADA_INIT5 0x0004 530#define PHY_CICADA_INIT6 0x02000 531#define PHY_VITESSE_INIT_REG1 0x1f 532#define PHY_VITESSE_INIT_REG2 0x10 533#define PHY_VITESSE_INIT_REG3 0x11 534#define PHY_VITESSE_INIT_REG4 0x12 535#define PHY_VITESSE_INIT_MSK1 0xc 536#define PHY_VITESSE_INIT_MSK2 0x0180 537#define PHY_VITESSE_INIT1 0x52b5 538#define PHY_VITESSE_INIT2 0xaf8a 539#define PHY_VITESSE_INIT3 0x8 540#define PHY_VITESSE_INIT4 0x8f8a 541#define PHY_VITESSE_INIT5 0xaf86 542#define PHY_VITESSE_INIT6 0x8f86 543#define PHY_VITESSE_INIT7 0xaf82 544#define PHY_VITESSE_INIT8 0x0100 545#define PHY_VITESSE_INIT9 0x8f82 546#define PHY_VITESSE_INIT10 0x0 547#define PHY_REALTEK_INIT_REG1 0x1f 548#define PHY_REALTEK_INIT_REG2 0x19 549#define PHY_REALTEK_INIT_REG3 0x13 550#define PHY_REALTEK_INIT_REG4 0x14 551#define PHY_REALTEK_INIT_REG5 0x18 552#define PHY_REALTEK_INIT_REG6 0x11 553#define PHY_REALTEK_INIT_REG7 0x01 554#define PHY_REALTEK_INIT1 0x0000 555#define PHY_REALTEK_INIT2 0x8e00 556#define PHY_REALTEK_INIT3 0x0001 557#define PHY_REALTEK_INIT4 0xad17 558#define PHY_REALTEK_INIT5 0xfb54 559#define PHY_REALTEK_INIT6 0xf5c7 560#define PHY_REALTEK_INIT7 0x1000 561#define PHY_REALTEK_INIT8 0x0003 562#define PHY_REALTEK_INIT9 0x0008 563#define PHY_REALTEK_INIT10 0x0005 564#define PHY_REALTEK_INIT11 0x0200 565#define PHY_REALTEK_INIT_MSK1 0x0003 566 567#define PHY_GIGABIT 0x0100 568 569#define PHY_TIMEOUT 0x1 570#define PHY_ERROR 0x2 571 572#define PHY_100 0x1 573#define PHY_1000 0x2 574#define PHY_HALF 0x100 575 576#define NV_PAUSEFRAME_RX_CAPABLE 0x0001 577#define NV_PAUSEFRAME_TX_CAPABLE 0x0002 578#define NV_PAUSEFRAME_RX_ENABLE 0x0004 579#define NV_PAUSEFRAME_TX_ENABLE 0x0008 580#define NV_PAUSEFRAME_RX_REQ 0x0010 581#define NV_PAUSEFRAME_TX_REQ 0x0020 582#define NV_PAUSEFRAME_AUTONEG 0x0040 583 584/* MSI/MSI-X defines */ 585#define NV_MSI_X_MAX_VECTORS 8 586#define NV_MSI_X_VECTORS_MASK 0x000f 587#define NV_MSI_CAPABLE 0x0010 588#define NV_MSI_X_CAPABLE 0x0020 589#define NV_MSI_ENABLED 0x0040 590#define NV_MSI_X_ENABLED 0x0080 591 592#define NV_MSI_X_VECTOR_ALL 0x0 593#define NV_MSI_X_VECTOR_RX 0x0 594#define NV_MSI_X_VECTOR_TX 0x1 595#define NV_MSI_X_VECTOR_OTHER 0x2 596 597#define NV_MSI_PRIV_OFFSET 0x68 598#define NV_MSI_PRIV_VALUE 0xffffffff 599 600#define NV_RESTART_TX 0x1 601#define NV_RESTART_RX 0x2 602 603#define NV_TX_LIMIT_COUNT 16 604 605#define NV_DYNAMIC_THRESHOLD 4 606#define NV_DYNAMIC_MAX_QUIET_COUNT 2048 607 608/* statistics */ 609struct nv_ethtool_str { 610 char name[ETH_GSTRING_LEN]; 611}; 612 613static const struct nv_ethtool_str nv_estats_str[] = { 614 { "tx_bytes" }, 615 { "tx_zero_rexmt" }, 616 { "tx_one_rexmt" }, 617 { "tx_many_rexmt" }, 618 { "tx_late_collision" }, 619 { "tx_fifo_errors" }, 620 { "tx_carrier_errors" }, 621 { "tx_excess_deferral" }, 622 { "tx_retry_error" }, 623 { "rx_frame_error" }, 624 { "rx_extra_byte" }, 625 { "rx_late_collision" }, 626 { "rx_runt" }, 627 { "rx_frame_too_long" }, 628 { "rx_over_errors" }, 629 { "rx_crc_errors" }, 630 { "rx_frame_align_error" }, 631 { "rx_length_error" }, 632 { "rx_unicast" }, 633 { "rx_multicast" }, 634 { "rx_broadcast" }, 635 { "rx_packets" }, 636 { "rx_errors_total" }, 637 { "tx_errors_total" }, 638 639 /* version 2 stats */ 640 { "tx_deferral" }, 641 { "tx_packets" }, 642 { "rx_bytes" }, 643 { "tx_pause" }, 644 { "rx_pause" }, 645 { "rx_drop_frame" }, 646 647 /* version 3 stats */ 648 { "tx_unicast" }, 649 { "tx_multicast" }, 650 { "tx_broadcast" } 651}; 652 653struct nv_ethtool_stats { 654 u64 tx_bytes; 655 u64 tx_zero_rexmt; 656 u64 tx_one_rexmt; 657 u64 tx_many_rexmt; 658 u64 tx_late_collision; 659 u64 tx_fifo_errors; 660 u64 tx_carrier_errors; 661 u64 tx_excess_deferral; 662 u64 tx_retry_error; 663 u64 rx_frame_error; 664 u64 rx_extra_byte; 665 u64 rx_late_collision; 666 u64 rx_runt; 667 u64 rx_frame_too_long; 668 u64 rx_over_errors; 669 u64 rx_crc_errors; 670 u64 rx_frame_align_error; 671 u64 rx_length_error; 672 u64 rx_unicast; 673 u64 rx_multicast; 674 u64 rx_broadcast; 675 u64 rx_packets; 676 u64 rx_errors_total; 677 u64 tx_errors_total; 678 679 /* version 2 stats */ 680 u64 tx_deferral; 681 u64 tx_packets; 682 u64 rx_bytes; 683 u64 tx_pause; 684 u64 rx_pause; 685 u64 rx_drop_frame; 686 687 /* version 3 stats */ 688 u64 tx_unicast; 689 u64 tx_multicast; 690 u64 tx_broadcast; 691}; 692 693#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 694#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3) 695#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 696 697/* diagnostics */ 698#define NV_TEST_COUNT_BASE 3 699#define NV_TEST_COUNT_EXTENDED 4 700 701static const struct nv_ethtool_str nv_etests_str[] = { 702 { "link (online/offline)" }, 703 { "register (offline) " }, 704 { "interrupt (offline) " }, 705 { "loopback (offline) " } 706}; 707 708struct register_test { 709 __u32 reg; 710 __u32 mask; 711}; 712 713static const struct register_test nv_registers_test[] = { 714 { NvRegUnknownSetupReg6, 0x01 }, 715 { NvRegMisc1, 0x03c }, 716 { NvRegOffloadConfig, 0x03ff }, 717 { NvRegMulticastAddrA, 0xffffffff }, 718 { NvRegTxWatermark, 0x0ff }, 719 { NvRegWakeUpFlags, 0x07777 }, 720 { 0,0 } 721}; 722 723struct nv_skb_map { 724 struct sk_buff *skb; 725 dma_addr_t dma; 726 unsigned int dma_len:31; 727 unsigned int dma_single:1; 728 struct ring_desc_ex *first_tx_desc; 729 struct nv_skb_map *next_tx_ctx; 730}; 731 732/* 733 * SMP locking: 734 * All hardware access under netdev_priv(dev)->lock, except the performance 735 * critical parts: 736 * - rx is (pseudo-) lockless: it relies on the single-threading provided 737 * by the arch code for interrupts. 738 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 739 * needs netdev_priv(dev)->lock :-( 740 * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 741 */ 742 743/* in dev: base, irq */ 744struct fe_priv { 745 spinlock_t lock; 746 747 struct net_device *dev; 748 struct napi_struct napi; 749 750 /* General data: 751 * Locking: spin_lock(&np->lock); */ 752 struct nv_ethtool_stats estats; 753 int in_shutdown; 754 u32 linkspeed; 755 int duplex; 756 int autoneg; 757 int fixed_mode; 758 int phyaddr; 759 int wolenabled; 760 unsigned int phy_oui; 761 unsigned int phy_model; 762 unsigned int phy_rev; 763 u16 gigabit; 764 int intr_test; 765 int recover_error; 766 int quiet_count; 767 768 /* General data: RO fields */ 769 dma_addr_t ring_addr; 770 struct pci_dev *pci_dev; 771 u32 orig_mac[2]; 772 u32 events; 773 u32 irqmask; 774 u32 desc_ver; 775 u32 txrxctl_bits; 776 u32 vlanctl_bits; 777 u32 driver_data; 778 u32 device_id; 779 u32 register_size; 780 int rx_csum; 781 u32 mac_in_use; 782 int mgmt_version; 783 int mgmt_sema; 784 785 void __iomem *base; 786 787 /* rx specific fields. 788 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 789 */ 790 union ring_type get_rx, put_rx, first_rx, last_rx; 791 struct nv_skb_map *get_rx_ctx, *put_rx_ctx; 792 struct nv_skb_map *first_rx_ctx, *last_rx_ctx; 793 struct nv_skb_map *rx_skb; 794 795 union ring_type rx_ring; 796 unsigned int rx_buf_sz; 797 unsigned int pkt_limit; 798 struct timer_list oom_kick; 799 struct timer_list nic_poll; 800 struct timer_list stats_poll; 801 u32 nic_poll_irq; 802 int rx_ring_size; 803 804 /* media detection workaround. 805 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 806 */ 807 int need_linktimer; 808 unsigned long link_timeout; 809 /* 810 * tx specific fields. 811 */ 812 union ring_type get_tx, put_tx, first_tx, last_tx; 813 struct nv_skb_map *get_tx_ctx, *put_tx_ctx; 814 struct nv_skb_map *first_tx_ctx, *last_tx_ctx; 815 struct nv_skb_map *tx_skb; 816 817 union ring_type tx_ring; 818 u32 tx_flags; 819 int tx_ring_size; 820 int tx_limit; 821 u32 tx_pkts_in_progress; 822 struct nv_skb_map *tx_change_owner; 823 struct nv_skb_map *tx_end_flip; 824 int tx_stop; 825 826 /* vlan fields */ 827 struct vlan_group *vlangrp; 828 829 /* msi/msi-x fields */ 830 u32 msi_flags; 831 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; 832 833 /* flow control */ 834 u32 pause_flags; 835 836 /* power saved state */ 837 u32 saved_config_space[NV_PCI_REGSZ_MAX/4]; 838 839 /* for different msi-x irq type */ 840 char name_rx[IFNAMSIZ + 3]; /* -rx */ 841 char name_tx[IFNAMSIZ + 3]; /* -tx */ 842 char name_other[IFNAMSIZ + 6]; /* -other */ 843}; 844 845/* 846 * Maximum number of loops until we assume that a bit in the irq mask 847 * is stuck. Overridable with module param. 848 */ 849static int max_interrupt_work = 4; 850 851/* 852 * Optimization can be either throuput mode or cpu mode 853 * 854 * Throughput Mode: Every tx and rx packet will generate an interrupt. 855 * CPU Mode: Interrupts are controlled by a timer. 856 */ 857enum { 858 NV_OPTIMIZATION_MODE_THROUGHPUT, 859 NV_OPTIMIZATION_MODE_CPU, 860 NV_OPTIMIZATION_MODE_DYNAMIC 861}; 862static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC; 863 864/* 865 * Poll interval for timer irq 866 * 867 * This interval determines how frequent an interrupt is generated. 868 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] 869 * Min = 0, and Max = 65535 870 */ 871static int poll_interval = -1; 872 873/* 874 * MSI interrupts 875 */ 876enum { 877 NV_MSI_INT_DISABLED, 878 NV_MSI_INT_ENABLED 879}; 880static int msi = NV_MSI_INT_ENABLED; 881 882/* 883 * MSIX interrupts 884 */ 885enum { 886 NV_MSIX_INT_DISABLED, 887 NV_MSIX_INT_ENABLED 888}; 889static int msix = NV_MSIX_INT_ENABLED; 890 891/* 892 * DMA 64bit 893 */ 894enum { 895 NV_DMA_64BIT_DISABLED, 896 NV_DMA_64BIT_ENABLED 897}; 898static int dma_64bit = NV_DMA_64BIT_ENABLED; 899 900/* 901 * Crossover Detection 902 * Realtek 8201 phy + some OEM boards do not work properly. 903 */ 904enum { 905 NV_CROSSOVER_DETECTION_DISABLED, 906 NV_CROSSOVER_DETECTION_ENABLED 907}; 908static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; 909 910/* 911 * Power down phy when interface is down (persists through reboot; 912 * older Linux and other OSes may not power it up again) 913 */ 914static int phy_power_down = 0; 915 916static inline struct fe_priv *get_nvpriv(struct net_device *dev) 917{ 918 return netdev_priv(dev); 919} 920 921static inline u8 __iomem *get_hwbase(struct net_device *dev) 922{ 923 return ((struct fe_priv *)netdev_priv(dev))->base; 924} 925 926static inline void pci_push(u8 __iomem *base) 927{ 928 /* force out pending posted writes */ 929 readl(base); 930} 931 932static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 933{ 934 return le32_to_cpu(prd->flaglen) 935 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 936} 937 938static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 939{ 940 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; 941} 942 943static bool nv_optimized(struct fe_priv *np) 944{ 945 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 946 return false; 947 return true; 948} 949 950static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 951 int delay, int delaymax, const char *msg) 952{ 953 u8 __iomem *base = get_hwbase(dev); 954 955 pci_push(base); 956 do { 957 udelay(delay); 958 delaymax -= delay; 959 if (delaymax < 0) { 960 if (msg) 961 printk("%s", msg); 962 return 1; 963 } 964 } while ((readl(base + offset) & mask) != target); 965 return 0; 966} 967 968#define NV_SETUP_RX_RING 0x01 969#define NV_SETUP_TX_RING 0x02 970 971static inline u32 dma_low(dma_addr_t addr) 972{ 973 return addr; 974} 975 976static inline u32 dma_high(dma_addr_t addr) 977{ 978 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */ 979} 980 981static void setup_hw_rings(struct net_device *dev, int rxtx_flags) 982{ 983 struct fe_priv *np = get_nvpriv(dev); 984 u8 __iomem *base = get_hwbase(dev); 985 986 if (!nv_optimized(np)) { 987 if (rxtx_flags & NV_SETUP_RX_RING) { 988 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 989 } 990 if (rxtx_flags & NV_SETUP_TX_RING) { 991 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 992 } 993 } else { 994 if (rxtx_flags & NV_SETUP_RX_RING) { 995 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 996 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); 997 } 998 if (rxtx_flags & NV_SETUP_TX_RING) { 999 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); 1000 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh); 1001 } 1002 } 1003} 1004 1005static void free_rings(struct net_device *dev) 1006{ 1007 struct fe_priv *np = get_nvpriv(dev); 1008 1009 if (!nv_optimized(np)) { 1010 if (np->rx_ring.orig) 1011 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 1012 np->rx_ring.orig, np->ring_addr); 1013 } else { 1014 if (np->rx_ring.ex) 1015 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 1016 np->rx_ring.ex, np->ring_addr); 1017 } 1018 if (np->rx_skb) 1019 kfree(np->rx_skb); 1020 if (np->tx_skb) 1021 kfree(np->tx_skb); 1022} 1023 1024static int using_multi_irqs(struct net_device *dev) 1025{ 1026 struct fe_priv *np = get_nvpriv(dev); 1027 1028 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1029 ((np->msi_flags & NV_MSI_X_ENABLED) && 1030 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) 1031 return 0; 1032 else 1033 return 1; 1034} 1035 1036static void nv_txrx_gate(struct net_device *dev, bool gate) 1037{ 1038 struct fe_priv *np = get_nvpriv(dev); 1039 u8 __iomem *base = get_hwbase(dev); 1040 u32 powerstate; 1041 1042 if (!np->mac_in_use && 1043 (np->driver_data & DEV_HAS_POWER_CNTRL)) { 1044 powerstate = readl(base + NvRegPowerState2); 1045 if (gate) 1046 powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS; 1047 else 1048 powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS; 1049 writel(powerstate, base + NvRegPowerState2); 1050 } 1051} 1052 1053static void nv_enable_irq(struct net_device *dev) 1054{ 1055 struct fe_priv *np = get_nvpriv(dev); 1056 1057 if (!using_multi_irqs(dev)) { 1058 if (np->msi_flags & NV_MSI_X_ENABLED) 1059 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1060 else 1061 enable_irq(np->pci_dev->irq); 1062 } else { 1063 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1064 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1065 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1066 } 1067} 1068 1069static void nv_disable_irq(struct net_device *dev) 1070{ 1071 struct fe_priv *np = get_nvpriv(dev); 1072 1073 if (!using_multi_irqs(dev)) { 1074 if (np->msi_flags & NV_MSI_X_ENABLED) 1075 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1076 else 1077 disable_irq(np->pci_dev->irq); 1078 } else { 1079 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1080 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1081 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1082 } 1083} 1084 1085/* In MSIX mode, a write to irqmask behaves as XOR */ 1086static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) 1087{ 1088 u8 __iomem *base = get_hwbase(dev); 1089 1090 writel(mask, base + NvRegIrqMask); 1091} 1092 1093static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) 1094{ 1095 struct fe_priv *np = get_nvpriv(dev); 1096 u8 __iomem *base = get_hwbase(dev); 1097 1098 if (np->msi_flags & NV_MSI_X_ENABLED) { 1099 writel(mask, base + NvRegIrqMask); 1100 } else { 1101 if (np->msi_flags & NV_MSI_ENABLED) 1102 writel(0, base + NvRegMSIIrqMask); 1103 writel(0, base + NvRegIrqMask); 1104 } 1105} 1106 1107static void nv_napi_enable(struct net_device *dev) 1108{ 1109 struct fe_priv *np = get_nvpriv(dev); 1110 1111 napi_enable(&np->napi); 1112} 1113 1114static void nv_napi_disable(struct net_device *dev) 1115{ 1116 struct fe_priv *np = get_nvpriv(dev); 1117 1118 napi_disable(&np->napi); 1119} 1120 1121#define MII_READ (-1) 1122/* mii_rw: read/write a register on the PHY. 1123 * 1124 * Caller must guarantee serialization 1125 */ 1126static int mii_rw(struct net_device *dev, int addr, int miireg, int value) 1127{ 1128 u8 __iomem *base = get_hwbase(dev); 1129 u32 reg; 1130 int retval; 1131 1132 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus); 1133 1134 reg = readl(base + NvRegMIIControl); 1135 if (reg & NVREG_MIICTL_INUSE) { 1136 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); 1137 udelay(NV_MIIBUSY_DELAY); 1138 } 1139 1140 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; 1141 if (value != MII_READ) { 1142 writel(value, base + NvRegMIIData); 1143 reg |= NVREG_MIICTL_WRITE; 1144 } 1145 writel(reg, base + NvRegMIIControl); 1146 1147 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1148 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { 1149 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", 1150 dev->name, miireg, addr); 1151 retval = -1; 1152 } else if (value != MII_READ) { 1153 /* it was a write operation - fewer failures are detectable */ 1154 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", 1155 dev->name, value, miireg, addr); 1156 retval = 0; 1157 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1158 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", 1159 dev->name, miireg, addr); 1160 retval = -1; 1161 } else { 1162 retval = readl(base + NvRegMIIData); 1163 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", 1164 dev->name, miireg, addr, retval); 1165 } 1166 1167 return retval; 1168} 1169 1170static int phy_reset(struct net_device *dev, u32 bmcr_setup) 1171{ 1172 struct fe_priv *np = netdev_priv(dev); 1173 u32 miicontrol; 1174 unsigned int tries = 0; 1175 1176 miicontrol = BMCR_RESET | bmcr_setup; 1177 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1178 return -1; 1179 } 1180 1181 /* wait for 500ms */ 1182 msleep(500); 1183 1184 /* must wait till reset is deasserted */ 1185 while (miicontrol & BMCR_RESET) { 1186 msleep(10); 1187 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1188 /* FIXME: 100 tries seem excessive */ 1189 if (tries++ > 100) 1190 return -1; 1191 } 1192 return 0; 1193} 1194 1195static int phy_init(struct net_device *dev) 1196{ 1197 struct fe_priv *np = get_nvpriv(dev); 1198 u8 __iomem *base = get_hwbase(dev); 1199 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1200 1201 /* phy errata for E3016 phy */ 1202 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1203 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1204 reg &= ~PHY_MARVELL_E3016_INITMASK; 1205 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { 1206 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); 1207 return PHY_ERROR; 1208 } 1209 } 1210 if (np->phy_oui == PHY_OUI_REALTEK) { 1211 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1212 np->phy_rev == PHY_REV_REALTEK_8211B) { 1213 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1214 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1215 return PHY_ERROR; 1216 } 1217 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1218 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1219 return PHY_ERROR; 1220 } 1221 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1222 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1223 return PHY_ERROR; 1224 } 1225 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1226 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1227 return PHY_ERROR; 1228 } 1229 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1230 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1231 return PHY_ERROR; 1232 } 1233 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) { 1234 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1235 return PHY_ERROR; 1236 } 1237 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1238 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1239 return PHY_ERROR; 1240 } 1241 } 1242 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1243 np->phy_rev == PHY_REV_REALTEK_8211C) { 1244 u32 powerstate = readl(base + NvRegPowerState2); 1245 1246 /* need to perform hw phy reset */ 1247 powerstate |= NVREG_POWERSTATE2_PHY_RESET; 1248 writel(powerstate, base + NvRegPowerState2); 1249 msleep(25); 1250 1251 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET; 1252 writel(powerstate, base + NvRegPowerState2); 1253 msleep(25); 1254 1255 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1256 reg |= PHY_REALTEK_INIT9; 1257 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) { 1258 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1259 return PHY_ERROR; 1260 } 1261 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) { 1262 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1263 return PHY_ERROR; 1264 } 1265 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ); 1266 if (!(reg & PHY_REALTEK_INIT11)) { 1267 reg |= PHY_REALTEK_INIT11; 1268 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) { 1269 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1270 return PHY_ERROR; 1271 } 1272 } 1273 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1274 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1275 return PHY_ERROR; 1276 } 1277 } 1278 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1279 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) { 1280 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1281 phy_reserved |= PHY_REALTEK_INIT7; 1282 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) { 1283 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1284 return PHY_ERROR; 1285 } 1286 } 1287 } 1288 } 1289 1290 /* set advertise register */ 1291 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1292 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1293 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1294 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); 1295 return PHY_ERROR; 1296 } 1297 1298 /* get phy interface type */ 1299 phyinterface = readl(base + NvRegPhyInterface); 1300 1301 /* see if gigabit phy */ 1302 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1303 if (mii_status & PHY_GIGABIT) { 1304 np->gigabit = PHY_GIGABIT; 1305 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 1306 mii_control_1000 &= ~ADVERTISE_1000HALF; 1307 if (phyinterface & PHY_RGMII) 1308 mii_control_1000 |= ADVERTISE_1000FULL; 1309 else 1310 mii_control_1000 &= ~ADVERTISE_1000FULL; 1311 1312 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1313 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1314 return PHY_ERROR; 1315 } 1316 } 1317 else 1318 np->gigabit = 0; 1319 1320 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1321 mii_control |= BMCR_ANENABLE; 1322 1323 if (np->phy_oui == PHY_OUI_REALTEK && 1324 np->phy_model == PHY_MODEL_REALTEK_8211 && 1325 np->phy_rev == PHY_REV_REALTEK_8211C) { 1326 /* start autoneg since we already performed hw reset above */ 1327 mii_control |= BMCR_ANRESTART; 1328 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1329 printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev)); 1330 return PHY_ERROR; 1331 } 1332 } else { 1333 /* reset the phy 1334 * (certain phys need bmcr to be setup with reset) 1335 */ 1336 if (phy_reset(dev, mii_control)) { 1337 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1338 return PHY_ERROR; 1339 } 1340 } 1341 1342 /* phy vendor specific configuration */ 1343 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1344 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1345 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1346 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1347 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { 1348 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1349 return PHY_ERROR; 1350 } 1351 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1352 phy_reserved |= PHY_CICADA_INIT5; 1353 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { 1354 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1355 return PHY_ERROR; 1356 } 1357 } 1358 if (np->phy_oui == PHY_OUI_CICADA) { 1359 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); 1360 phy_reserved |= PHY_CICADA_INIT6; 1361 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { 1362 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1363 return PHY_ERROR; 1364 } 1365 } 1366 if (np->phy_oui == PHY_OUI_VITESSE) { 1367 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) { 1368 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1369 return PHY_ERROR; 1370 } 1371 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) { 1372 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1373 return PHY_ERROR; 1374 } 1375 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1376 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1377 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1378 return PHY_ERROR; 1379 } 1380 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1381 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1382 phy_reserved |= PHY_VITESSE_INIT3; 1383 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1384 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1385 return PHY_ERROR; 1386 } 1387 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) { 1388 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1389 return PHY_ERROR; 1390 } 1391 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) { 1392 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1393 return PHY_ERROR; 1394 } 1395 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1396 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1397 phy_reserved |= PHY_VITESSE_INIT3; 1398 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1399 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1400 return PHY_ERROR; 1401 } 1402 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1403 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1404 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1405 return PHY_ERROR; 1406 } 1407 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { 1408 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1409 return PHY_ERROR; 1410 } 1411 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { 1412 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1413 return PHY_ERROR; 1414 } 1415 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1416 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1417 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1418 return PHY_ERROR; 1419 } 1420 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1421 phy_reserved &= ~PHY_VITESSE_INIT_MSK2; 1422 phy_reserved |= PHY_VITESSE_INIT8; 1423 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1424 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1425 return PHY_ERROR; 1426 } 1427 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) { 1428 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1429 return PHY_ERROR; 1430 } 1431 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) { 1432 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1433 return PHY_ERROR; 1434 } 1435 } 1436 if (np->phy_oui == PHY_OUI_REALTEK) { 1437 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1438 np->phy_rev == PHY_REV_REALTEK_8211B) { 1439 /* reset could have cleared these out, set them back */ 1440 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1441 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1442 return PHY_ERROR; 1443 } 1444 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1445 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1446 return PHY_ERROR; 1447 } 1448 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1449 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1450 return PHY_ERROR; 1451 } 1452 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1453 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1454 return PHY_ERROR; 1455 } 1456 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1457 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1458 return PHY_ERROR; 1459 } 1460 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) { 1461 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1462 return PHY_ERROR; 1463 } 1464 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1465 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1466 return PHY_ERROR; 1467 } 1468 } 1469 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1470 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) { 1471 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1472 phy_reserved |= PHY_REALTEK_INIT7; 1473 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) { 1474 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1475 return PHY_ERROR; 1476 } 1477 } 1478 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 1479 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1480 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1481 return PHY_ERROR; 1482 } 1483 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); 1484 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 1485 phy_reserved |= PHY_REALTEK_INIT3; 1486 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) { 1487 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1488 return PHY_ERROR; 1489 } 1490 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1491 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1492 return PHY_ERROR; 1493 } 1494 } 1495 } 1496 } 1497 1498 /* some phys clear out pause advertisment on reset, set it back */ 1499 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); 1500 1501 /* restart auto negotiation, power down phy */ 1502 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1503 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1504 if (phy_power_down) { 1505 mii_control |= BMCR_PDOWN; 1506 } 1507 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1508 return PHY_ERROR; 1509 } 1510 1511 return 0; 1512} 1513 1514static void nv_start_rx(struct net_device *dev) 1515{ 1516 struct fe_priv *np = netdev_priv(dev); 1517 u8 __iomem *base = get_hwbase(dev); 1518 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1519 1520 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 1521 /* Already running? Stop it. */ 1522 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1523 rx_ctrl &= ~NVREG_RCVCTL_START; 1524 writel(rx_ctrl, base + NvRegReceiverControl); 1525 pci_push(base); 1526 } 1527 writel(np->linkspeed, base + NvRegLinkSpeed); 1528 pci_push(base); 1529 rx_ctrl |= NVREG_RCVCTL_START; 1530 if (np->mac_in_use) 1531 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1532 writel(rx_ctrl, base + NvRegReceiverControl); 1533 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1534 dev->name, np->duplex, np->linkspeed); 1535 pci_push(base); 1536} 1537 1538static void nv_stop_rx(struct net_device *dev) 1539{ 1540 struct fe_priv *np = netdev_priv(dev); 1541 u8 __iomem *base = get_hwbase(dev); 1542 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1543 1544 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 1545 if (!np->mac_in_use) 1546 rx_ctrl &= ~NVREG_RCVCTL_START; 1547 else 1548 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1549 writel(rx_ctrl, base + NvRegReceiverControl); 1550 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1551 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1552 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1553 1554 udelay(NV_RXSTOP_DELAY2); 1555 if (!np->mac_in_use) 1556 writel(0, base + NvRegLinkSpeed); 1557} 1558 1559static void nv_start_tx(struct net_device *dev) 1560{ 1561 struct fe_priv *np = netdev_priv(dev); 1562 u8 __iomem *base = get_hwbase(dev); 1563 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1564 1565 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 1566 tx_ctrl |= NVREG_XMITCTL_START; 1567 if (np->mac_in_use) 1568 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1569 writel(tx_ctrl, base + NvRegTransmitterControl); 1570 pci_push(base); 1571} 1572 1573static void nv_stop_tx(struct net_device *dev) 1574{ 1575 struct fe_priv *np = netdev_priv(dev); 1576 u8 __iomem *base = get_hwbase(dev); 1577 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1578 1579 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 1580 if (!np->mac_in_use) 1581 tx_ctrl &= ~NVREG_XMITCTL_START; 1582 else 1583 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1584 writel(tx_ctrl, base + NvRegTransmitterControl); 1585 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1586 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1587 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1588 1589 udelay(NV_TXSTOP_DELAY2); 1590 if (!np->mac_in_use) 1591 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, 1592 base + NvRegTransmitPoll); 1593} 1594 1595static void nv_start_rxtx(struct net_device *dev) 1596{ 1597 nv_start_rx(dev); 1598 nv_start_tx(dev); 1599} 1600 1601static void nv_stop_rxtx(struct net_device *dev) 1602{ 1603 nv_stop_rx(dev); 1604 nv_stop_tx(dev); 1605} 1606 1607static void nv_txrx_reset(struct net_device *dev) 1608{ 1609 struct fe_priv *np = netdev_priv(dev); 1610 u8 __iomem *base = get_hwbase(dev); 1611 1612 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); 1613 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1614 pci_push(base); 1615 udelay(NV_TXRX_RESET_DELAY); 1616 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1617 pci_push(base); 1618} 1619 1620static void nv_mac_reset(struct net_device *dev) 1621{ 1622 struct fe_priv *np = netdev_priv(dev); 1623 u8 __iomem *base = get_hwbase(dev); 1624 u32 temp1, temp2, temp3; 1625 1626 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); 1627 1628 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1629 pci_push(base); 1630 1631 /* save registers since they will be cleared on reset */ 1632 temp1 = readl(base + NvRegMacAddrA); 1633 temp2 = readl(base + NvRegMacAddrB); 1634 temp3 = readl(base + NvRegTransmitPoll); 1635 1636 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); 1637 pci_push(base); 1638 udelay(NV_MAC_RESET_DELAY); 1639 writel(0, base + NvRegMacReset); 1640 pci_push(base); 1641 udelay(NV_MAC_RESET_DELAY); 1642 1643 /* restore saved registers */ 1644 writel(temp1, base + NvRegMacAddrA); 1645 writel(temp2, base + NvRegMacAddrB); 1646 writel(temp3, base + NvRegTransmitPoll); 1647 1648 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1649 pci_push(base); 1650} 1651 1652static void nv_get_hw_stats(struct net_device *dev) 1653{ 1654 struct fe_priv *np = netdev_priv(dev); 1655 u8 __iomem *base = get_hwbase(dev); 1656 1657 np->estats.tx_bytes += readl(base + NvRegTxCnt); 1658 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); 1659 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); 1660 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); 1661 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); 1662 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); 1663 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); 1664 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); 1665 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); 1666 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); 1667 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); 1668 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); 1669 np->estats.rx_runt += readl(base + NvRegRxRunt); 1670 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); 1671 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); 1672 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); 1673 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); 1674 np->estats.rx_length_error += readl(base + NvRegRxLenErr); 1675 np->estats.rx_unicast += readl(base + NvRegRxUnicast); 1676 np->estats.rx_multicast += readl(base + NvRegRxMulticast); 1677 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); 1678 np->estats.rx_packets = 1679 np->estats.rx_unicast + 1680 np->estats.rx_multicast + 1681 np->estats.rx_broadcast; 1682 np->estats.rx_errors_total = 1683 np->estats.rx_crc_errors + 1684 np->estats.rx_over_errors + 1685 np->estats.rx_frame_error + 1686 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + 1687 np->estats.rx_late_collision + 1688 np->estats.rx_runt + 1689 np->estats.rx_frame_too_long; 1690 np->estats.tx_errors_total = 1691 np->estats.tx_late_collision + 1692 np->estats.tx_fifo_errors + 1693 np->estats.tx_carrier_errors + 1694 np->estats.tx_excess_deferral + 1695 np->estats.tx_retry_error; 1696 1697 if (np->driver_data & DEV_HAS_STATISTICS_V2) { 1698 np->estats.tx_deferral += readl(base + NvRegTxDef); 1699 np->estats.tx_packets += readl(base + NvRegTxFrame); 1700 np->estats.rx_bytes += readl(base + NvRegRxCnt); 1701 np->estats.tx_pause += readl(base + NvRegTxPause); 1702 np->estats.rx_pause += readl(base + NvRegRxPause); 1703 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1704 } 1705 1706 if (np->driver_data & DEV_HAS_STATISTICS_V3) { 1707 np->estats.tx_unicast += readl(base + NvRegTxUnicast); 1708 np->estats.tx_multicast += readl(base + NvRegTxMulticast); 1709 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast); 1710 } 1711} 1712 1713/* 1714 * nv_get_stats: dev->get_stats function 1715 * Get latest stats value from the nic. 1716 * Called with read_lock(&dev_base_lock) held for read - 1717 * only synchronized against unregister_netdevice. 1718 */ 1719static struct net_device_stats *nv_get_stats(struct net_device *dev) 1720{ 1721 struct fe_priv *np = netdev_priv(dev); 1722 1723 /* If the nic supports hw counters then retrieve latest values */ 1724 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) { 1725 nv_get_hw_stats(dev); 1726 1727 /* copy to net_device stats */ 1728 dev->stats.tx_bytes = np->estats.tx_bytes; 1729 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1730 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1731 dev->stats.rx_crc_errors = np->estats.rx_crc_errors; 1732 dev->stats.rx_over_errors = np->estats.rx_over_errors; 1733 dev->stats.rx_errors = np->estats.rx_errors_total; 1734 dev->stats.tx_errors = np->estats.tx_errors_total; 1735 } 1736 1737 return &dev->stats; 1738} 1739 1740/* 1741 * nv_alloc_rx: fill rx ring entries. 1742 * Return 1 if the allocations for the skbs failed and the 1743 * rx engine is without Available descriptors 1744 */ 1745static int nv_alloc_rx(struct net_device *dev) 1746{ 1747 struct fe_priv *np = netdev_priv(dev); 1748 struct ring_desc* less_rx; 1749 1750 less_rx = np->get_rx.orig; 1751 if (less_rx-- == np->first_rx.orig) 1752 less_rx = np->last_rx.orig; 1753 1754 while (np->put_rx.orig != less_rx) { 1755 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1756 if (skb) { 1757 np->put_rx_ctx->skb = skb; 1758 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1759 skb->data, 1760 skb_tailroom(skb), 1761 PCI_DMA_FROMDEVICE); 1762 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1763 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); 1764 wmb(); 1765 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1766 if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) 1767 np->put_rx.orig = np->first_rx.orig; 1768 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1769 np->put_rx_ctx = np->first_rx_ctx; 1770 } else { 1771 return 1; 1772 } 1773 } 1774 return 0; 1775} 1776 1777static int nv_alloc_rx_optimized(struct net_device *dev) 1778{ 1779 struct fe_priv *np = netdev_priv(dev); 1780 struct ring_desc_ex* less_rx; 1781 1782 less_rx = np->get_rx.ex; 1783 if (less_rx-- == np->first_rx.ex) 1784 less_rx = np->last_rx.ex; 1785 1786 while (np->put_rx.ex != less_rx) { 1787 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1788 if (skb) { 1789 np->put_rx_ctx->skb = skb; 1790 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1791 skb->data, 1792 skb_tailroom(skb), 1793 PCI_DMA_FROMDEVICE); 1794 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1795 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); 1796 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); 1797 wmb(); 1798 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1799 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) 1800 np->put_rx.ex = np->first_rx.ex; 1801 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1802 np->put_rx_ctx = np->first_rx_ctx; 1803 } else { 1804 return 1; 1805 } 1806 } 1807 return 0; 1808} 1809 1810/* If rx bufs are exhausted called after 50ms to attempt to refresh */ 1811static void nv_do_rx_refill(unsigned long data) 1812{ 1813 struct net_device *dev = (struct net_device *) data; 1814 struct fe_priv *np = netdev_priv(dev); 1815 1816 /* Just reschedule NAPI rx processing */ 1817 napi_schedule(&np->napi); 1818} 1819 1820static void nv_init_rx(struct net_device *dev) 1821{ 1822 struct fe_priv *np = netdev_priv(dev); 1823 int i; 1824 1825 np->get_rx = np->put_rx = np->first_rx = np->rx_ring; 1826 1827 if (!nv_optimized(np)) 1828 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; 1829 else 1830 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; 1831 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; 1832 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; 1833 1834 for (i = 0; i < np->rx_ring_size; i++) { 1835 if (!nv_optimized(np)) { 1836 np->rx_ring.orig[i].flaglen = 0; 1837 np->rx_ring.orig[i].buf = 0; 1838 } else { 1839 np->rx_ring.ex[i].flaglen = 0; 1840 np->rx_ring.ex[i].txvlan = 0; 1841 np->rx_ring.ex[i].bufhigh = 0; 1842 np->rx_ring.ex[i].buflow = 0; 1843 } 1844 np->rx_skb[i].skb = NULL; 1845 np->rx_skb[i].dma = 0; 1846 } 1847} 1848 1849static void nv_init_tx(struct net_device *dev) 1850{ 1851 struct fe_priv *np = netdev_priv(dev); 1852 int i; 1853 1854 np->get_tx = np->put_tx = np->first_tx = np->tx_ring; 1855 1856 if (!nv_optimized(np)) 1857 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; 1858 else 1859 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; 1860 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; 1861 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; 1862 np->tx_pkts_in_progress = 0; 1863 np->tx_change_owner = NULL; 1864 np->tx_end_flip = NULL; 1865 np->tx_stop = 0; 1866 1867 for (i = 0; i < np->tx_ring_size; i++) { 1868 if (!nv_optimized(np)) { 1869 np->tx_ring.orig[i].flaglen = 0; 1870 np->tx_ring.orig[i].buf = 0; 1871 } else { 1872 np->tx_ring.ex[i].flaglen = 0; 1873 np->tx_ring.ex[i].txvlan = 0; 1874 np->tx_ring.ex[i].bufhigh = 0; 1875 np->tx_ring.ex[i].buflow = 0; 1876 } 1877 np->tx_skb[i].skb = NULL; 1878 np->tx_skb[i].dma = 0; 1879 np->tx_skb[i].dma_len = 0; 1880 np->tx_skb[i].dma_single = 0; 1881 np->tx_skb[i].first_tx_desc = NULL; 1882 np->tx_skb[i].next_tx_ctx = NULL; 1883 } 1884} 1885 1886static int nv_init_ring(struct net_device *dev) 1887{ 1888 struct fe_priv *np = netdev_priv(dev); 1889 1890 nv_init_tx(dev); 1891 nv_init_rx(dev); 1892 1893 if (!nv_optimized(np)) 1894 return nv_alloc_rx(dev); 1895 else 1896 return nv_alloc_rx_optimized(dev); 1897} 1898 1899static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) 1900{ 1901 if (tx_skb->dma) { 1902 if (tx_skb->dma_single) 1903 pci_unmap_single(np->pci_dev, tx_skb->dma, 1904 tx_skb->dma_len, 1905 PCI_DMA_TODEVICE); 1906 else 1907 pci_unmap_page(np->pci_dev, tx_skb->dma, 1908 tx_skb->dma_len, 1909 PCI_DMA_TODEVICE); 1910 tx_skb->dma = 0; 1911 } 1912} 1913 1914static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) 1915{ 1916 nv_unmap_txskb(np, tx_skb); 1917 if (tx_skb->skb) { 1918 dev_kfree_skb_any(tx_skb->skb); 1919 tx_skb->skb = NULL; 1920 return 1; 1921 } 1922 return 0; 1923} 1924 1925static void nv_drain_tx(struct net_device *dev) 1926{ 1927 struct fe_priv *np = netdev_priv(dev); 1928 unsigned int i; 1929 1930 for (i = 0; i < np->tx_ring_size; i++) { 1931 if (!nv_optimized(np)) { 1932 np->tx_ring.orig[i].flaglen = 0; 1933 np->tx_ring.orig[i].buf = 0; 1934 } else { 1935 np->tx_ring.ex[i].flaglen = 0; 1936 np->tx_ring.ex[i].txvlan = 0; 1937 np->tx_ring.ex[i].bufhigh = 0; 1938 np->tx_ring.ex[i].buflow = 0; 1939 } 1940 if (nv_release_txskb(np, &np->tx_skb[i])) 1941 dev->stats.tx_dropped++; 1942 np->tx_skb[i].dma = 0; 1943 np->tx_skb[i].dma_len = 0; 1944 np->tx_skb[i].dma_single = 0; 1945 np->tx_skb[i].first_tx_desc = NULL; 1946 np->tx_skb[i].next_tx_ctx = NULL; 1947 } 1948 np->tx_pkts_in_progress = 0; 1949 np->tx_change_owner = NULL; 1950 np->tx_end_flip = NULL; 1951} 1952 1953static void nv_drain_rx(struct net_device *dev) 1954{ 1955 struct fe_priv *np = netdev_priv(dev); 1956 int i; 1957 1958 for (i = 0; i < np->rx_ring_size; i++) { 1959 if (!nv_optimized(np)) { 1960 np->rx_ring.orig[i].flaglen = 0; 1961 np->rx_ring.orig[i].buf = 0; 1962 } else { 1963 np->rx_ring.ex[i].flaglen = 0; 1964 np->rx_ring.ex[i].txvlan = 0; 1965 np->rx_ring.ex[i].bufhigh = 0; 1966 np->rx_ring.ex[i].buflow = 0; 1967 } 1968 wmb(); 1969 if (np->rx_skb[i].skb) { 1970 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, 1971 (skb_end_pointer(np->rx_skb[i].skb) - 1972 np->rx_skb[i].skb->data), 1973 PCI_DMA_FROMDEVICE); 1974 dev_kfree_skb(np->rx_skb[i].skb); 1975 np->rx_skb[i].skb = NULL; 1976 } 1977 } 1978} 1979 1980static void nv_drain_rxtx(struct net_device *dev) 1981{ 1982 nv_drain_tx(dev); 1983 nv_drain_rx(dev); 1984} 1985 1986static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) 1987{ 1988 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); 1989} 1990 1991static void nv_legacybackoff_reseed(struct net_device *dev) 1992{ 1993 u8 __iomem *base = get_hwbase(dev); 1994 u32 reg; 1995 u32 low; 1996 int tx_status = 0; 1997 1998 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK; 1999 get_random_bytes(&low, sizeof(low)); 2000 reg |= low & NVREG_SLOTTIME_MASK; 2001 2002 /* Need to stop tx before change takes effect. 2003 * Caller has already gained np->lock. 2004 */ 2005 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START; 2006 if (tx_status) 2007 nv_stop_tx(dev); 2008 nv_stop_rx(dev); 2009 writel(reg, base + NvRegSlotTime); 2010 if (tx_status) 2011 nv_start_tx(dev); 2012 nv_start_rx(dev); 2013} 2014 2015/* Gear Backoff Seeds */ 2016#define BACKOFF_SEEDSET_ROWS 8 2017#define BACKOFF_SEEDSET_LFSRS 15 2018 2019/* Known Good seed sets */ 2020static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2021 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2022 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, 2023 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2024 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, 2025 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, 2026 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, 2027 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, 2028 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}}; 2029 2030static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2031 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2032 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2033 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, 2034 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2035 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2036 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2037 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2038 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}}; 2039 2040static void nv_gear_backoff_reseed(struct net_device *dev) 2041{ 2042 u8 __iomem *base = get_hwbase(dev); 2043 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed; 2044 u32 temp, seedset, combinedSeed; 2045 int i; 2046 2047 /* Setup seed for free running LFSR */ 2048 /* We are going to read the time stamp counter 3 times 2049 and swizzle bits around to increase randomness */ 2050 get_random_bytes(&miniseed1, sizeof(miniseed1)); 2051 miniseed1 &= 0x0fff; 2052 if (miniseed1 == 0) 2053 miniseed1 = 0xabc; 2054 2055 get_random_bytes(&miniseed2, sizeof(miniseed2)); 2056 miniseed2 &= 0x0fff; 2057 if (miniseed2 == 0) 2058 miniseed2 = 0xabc; 2059 miniseed2_reversed = 2060 ((miniseed2 & 0xF00) >> 8) | 2061 (miniseed2 & 0x0F0) | 2062 ((miniseed2 & 0x00F) << 8); 2063 2064 get_random_bytes(&miniseed3, sizeof(miniseed3)); 2065 miniseed3 &= 0x0fff; 2066 if (miniseed3 == 0) 2067 miniseed3 = 0xabc; 2068 miniseed3_reversed = 2069 ((miniseed3 & 0xF00) >> 8) | 2070 (miniseed3 & 0x0F0) | 2071 ((miniseed3 & 0x00F) << 8); 2072 2073 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) | 2074 (miniseed2 ^ miniseed3_reversed); 2075 2076 /* Seeds can not be zero */ 2077 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0) 2078 combinedSeed |= 0x08; 2079 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0) 2080 combinedSeed |= 0x8000; 2081 2082 /* No need to disable tx here */ 2083 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); 2084 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; 2085 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; 2086 writel(temp,base + NvRegBackOffControl); 2087 2088 /* Setup seeds for all gear LFSRs. */ 2089 get_random_bytes(&seedset, sizeof(seedset)); 2090 seedset = seedset % BACKOFF_SEEDSET_ROWS; 2091 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) 2092 { 2093 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); 2094 temp |= main_seedset[seedset][i-1] & 0x3ff; 2095 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); 2096 writel(temp, base + NvRegBackOffControl); 2097 } 2098} 2099 2100/* 2101 * nv_start_xmit: dev->hard_start_xmit function 2102 * Called with netif_tx_lock held. 2103 */ 2104static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 2105{ 2106 struct fe_priv *np = netdev_priv(dev); 2107 u32 tx_flags = 0; 2108 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 2109 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2110 unsigned int i; 2111 u32 offset = 0; 2112 u32 bcnt; 2113 u32 size = skb_headlen(skb); 2114 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2115 u32 empty_slots; 2116 struct ring_desc* put_tx; 2117 struct ring_desc* start_tx; 2118 struct ring_desc* prev_tx; 2119 struct nv_skb_map* prev_tx_ctx; 2120 unsigned long flags; 2121 2122 /* add fragments to entries count */ 2123 for (i = 0; i < fragments; i++) { 2124 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 2125 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2126 } 2127 2128 spin_lock_irqsave(&np->lock, flags); 2129 empty_slots = nv_get_empty_tx_slots(np); 2130 if (unlikely(empty_slots <= entries)) { 2131 netif_stop_queue(dev); 2132 np->tx_stop = 1; 2133 spin_unlock_irqrestore(&np->lock, flags); 2134 return NETDEV_TX_BUSY; 2135 } 2136 spin_unlock_irqrestore(&np->lock, flags); 2137 2138 start_tx = put_tx = np->put_tx.orig; 2139 2140 /* setup the header buffer */ 2141 do { 2142 prev_tx = put_tx; 2143 prev_tx_ctx = np->put_tx_ctx; 2144 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2145 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2146 PCI_DMA_TODEVICE); 2147 np->put_tx_ctx->dma_len = bcnt; 2148 np->put_tx_ctx->dma_single = 1; 2149 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2150 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2151 2152 tx_flags = np->tx_flags; 2153 offset += bcnt; 2154 size -= bcnt; 2155 if (unlikely(put_tx++ == np->last_tx.orig)) 2156 put_tx = np->first_tx.orig; 2157 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2158 np->put_tx_ctx = np->first_tx_ctx; 2159 } while (size); 2160 2161 /* setup the fragments */ 2162 for (i = 0; i < fragments; i++) { 2163 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2164 u32 size = frag->size; 2165 offset = 0; 2166 2167 do { 2168 prev_tx = put_tx; 2169 prev_tx_ctx = np->put_tx_ctx; 2170 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2171 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 2172 PCI_DMA_TODEVICE); 2173 np->put_tx_ctx->dma_len = bcnt; 2174 np->put_tx_ctx->dma_single = 0; 2175 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2176 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2177 2178 offset += bcnt; 2179 size -= bcnt; 2180 if (unlikely(put_tx++ == np->last_tx.orig)) 2181 put_tx = np->first_tx.orig; 2182 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2183 np->put_tx_ctx = np->first_tx_ctx; 2184 } while (size); 2185 } 2186 2187 /* set last fragment flag */ 2188 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); 2189 2190 /* save skb in this slot's context area */ 2191 prev_tx_ctx->skb = skb; 2192 2193 if (skb_is_gso(skb)) 2194 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2195 else 2196 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2197 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2198 2199 spin_lock_irqsave(&np->lock, flags); 2200 2201 /* set tx flags */ 2202 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2203 np->put_tx.orig = put_tx; 2204 2205 spin_unlock_irqrestore(&np->lock, flags); 2206 2207 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", 2208 dev->name, entries, tx_flags_extra); 2209 { 2210 int j; 2211 for (j=0; j<64; j++) { 2212 if ((j%16) == 0) 2213 dprintk("\n%03x:", j); 2214 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2215 } 2216 dprintk("\n"); 2217 } 2218 2219 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2220 return NETDEV_TX_OK; 2221} 2222 2223static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, 2224 struct net_device *dev) 2225{ 2226 struct fe_priv *np = netdev_priv(dev); 2227 u32 tx_flags = 0; 2228 u32 tx_flags_extra; 2229 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2230 unsigned int i; 2231 u32 offset = 0; 2232 u32 bcnt; 2233 u32 size = skb_headlen(skb); 2234 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2235 u32 empty_slots; 2236 struct ring_desc_ex* put_tx; 2237 struct ring_desc_ex* start_tx; 2238 struct ring_desc_ex* prev_tx; 2239 struct nv_skb_map* prev_tx_ctx; 2240 struct nv_skb_map* start_tx_ctx; 2241 unsigned long flags; 2242 2243 /* add fragments to entries count */ 2244 for (i = 0; i < fragments; i++) { 2245 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 2246 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2247 } 2248 2249 spin_lock_irqsave(&np->lock, flags); 2250 empty_slots = nv_get_empty_tx_slots(np); 2251 if (unlikely(empty_slots <= entries)) { 2252 netif_stop_queue(dev); 2253 np->tx_stop = 1; 2254 spin_unlock_irqrestore(&np->lock, flags); 2255 return NETDEV_TX_BUSY; 2256 } 2257 spin_unlock_irqrestore(&np->lock, flags); 2258 2259 start_tx = put_tx = np->put_tx.ex; 2260 start_tx_ctx = np->put_tx_ctx; 2261 2262 /* setup the header buffer */ 2263 do { 2264 prev_tx = put_tx; 2265 prev_tx_ctx = np->put_tx_ctx; 2266 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2267 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2268 PCI_DMA_TODEVICE); 2269 np->put_tx_ctx->dma_len = bcnt; 2270 np->put_tx_ctx->dma_single = 1; 2271 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2272 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2273 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2274 2275 tx_flags = NV_TX2_VALID; 2276 offset += bcnt; 2277 size -= bcnt; 2278 if (unlikely(put_tx++ == np->last_tx.ex)) 2279 put_tx = np->first_tx.ex; 2280 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2281 np->put_tx_ctx = np->first_tx_ctx; 2282 } while (size); 2283 2284 /* setup the fragments */ 2285 for (i = 0; i < fragments; i++) { 2286 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2287 u32 size = frag->size; 2288 offset = 0; 2289 2290 do { 2291 prev_tx = put_tx; 2292 prev_tx_ctx = np->put_tx_ctx; 2293 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2294 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 2295 PCI_DMA_TODEVICE); 2296 np->put_tx_ctx->dma_len = bcnt; 2297 np->put_tx_ctx->dma_single = 0; 2298 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2299 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2300 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2301 2302 offset += bcnt; 2303 size -= bcnt; 2304 if (unlikely(put_tx++ == np->last_tx.ex)) 2305 put_tx = np->first_tx.ex; 2306 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2307 np->put_tx_ctx = np->first_tx_ctx; 2308 } while (size); 2309 } 2310 2311 /* set last fragment flag */ 2312 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); 2313 2314 /* save skb in this slot's context area */ 2315 prev_tx_ctx->skb = skb; 2316 2317 if (skb_is_gso(skb)) 2318 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2319 else 2320 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2321 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2322 2323 /* vlan tag */ 2324 if (vlan_tx_tag_present(skb)) 2325 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | 2326 vlan_tx_tag_get(skb)); 2327 else 2328 start_tx->txvlan = 0; 2329 2330 spin_lock_irqsave(&np->lock, flags); 2331 2332 if (np->tx_limit) { 2333 /* Limit the number of outstanding tx. Setup all fragments, but 2334 * do not set the VALID bit on the first descriptor. Save a pointer 2335 * to that descriptor and also for next skb_map element. 2336 */ 2337 2338 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) { 2339 if (!np->tx_change_owner) 2340 np->tx_change_owner = start_tx_ctx; 2341 2342 /* remove VALID bit */ 2343 tx_flags &= ~NV_TX2_VALID; 2344 start_tx_ctx->first_tx_desc = start_tx; 2345 start_tx_ctx->next_tx_ctx = np->put_tx_ctx; 2346 np->tx_end_flip = np->put_tx_ctx; 2347 } else { 2348 np->tx_pkts_in_progress++; 2349 } 2350 } 2351 2352 /* set tx flags */ 2353 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2354 np->put_tx.ex = put_tx; 2355 2356 spin_unlock_irqrestore(&np->lock, flags); 2357 2358 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", 2359 dev->name, entries, tx_flags_extra); 2360 { 2361 int j; 2362 for (j=0; j<64; j++) { 2363 if ((j%16) == 0) 2364 dprintk("\n%03x:", j); 2365 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2366 } 2367 dprintk("\n"); 2368 } 2369 2370 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2371 return NETDEV_TX_OK; 2372} 2373 2374static inline void nv_tx_flip_ownership(struct net_device *dev) 2375{ 2376 struct fe_priv *np = netdev_priv(dev); 2377 2378 np->tx_pkts_in_progress--; 2379 if (np->tx_change_owner) { 2380 np->tx_change_owner->first_tx_desc->flaglen |= 2381 cpu_to_le32(NV_TX2_VALID); 2382 np->tx_pkts_in_progress++; 2383 2384 np->tx_change_owner = np->tx_change_owner->next_tx_ctx; 2385 if (np->tx_change_owner == np->tx_end_flip) 2386 np->tx_change_owner = NULL; 2387 2388 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2389 } 2390} 2391 2392/* 2393 * nv_tx_done: check for completed packets, release the skbs. 2394 * 2395 * Caller must own np->lock. 2396 */ 2397static int nv_tx_done(struct net_device *dev, int limit) 2398{ 2399 struct fe_priv *np = netdev_priv(dev); 2400 u32 flags; 2401 int tx_work = 0; 2402 struct ring_desc* orig_get_tx = np->get_tx.orig; 2403 2404 while ((np->get_tx.orig != np->put_tx.orig) && 2405 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && 2406 (tx_work < limit)) { 2407 2408 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", 2409 dev->name, flags); 2410 2411 nv_unmap_txskb(np, np->get_tx_ctx); 2412 2413 if (np->desc_ver == DESC_VER_1) { 2414 if (flags & NV_TX_LASTPACKET) { 2415 if (flags & NV_TX_ERROR) { 2416 if (flags & NV_TX_UNDERFLOW) 2417 dev->stats.tx_fifo_errors++; 2418 if (flags & NV_TX_CARRIERLOST) 2419 dev->stats.tx_carrier_errors++; 2420 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) 2421 nv_legacybackoff_reseed(dev); 2422 dev->stats.tx_errors++; 2423 } else { 2424 dev->stats.tx_packets++; 2425 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2426 } 2427 dev_kfree_skb_any(np->get_tx_ctx->skb); 2428 np->get_tx_ctx->skb = NULL; 2429 tx_work++; 2430 } 2431 } else { 2432 if (flags & NV_TX2_LASTPACKET) { 2433 if (flags & NV_TX2_ERROR) { 2434 if (flags & NV_TX2_UNDERFLOW) 2435 dev->stats.tx_fifo_errors++; 2436 if (flags & NV_TX2_CARRIERLOST) 2437 dev->stats.tx_carrier_errors++; 2438 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2439 nv_legacybackoff_reseed(dev); 2440 dev->stats.tx_errors++; 2441 } else { 2442 dev->stats.tx_packets++; 2443 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2444 } 2445 dev_kfree_skb_any(np->get_tx_ctx->skb); 2446 np->get_tx_ctx->skb = NULL; 2447 tx_work++; 2448 } 2449 } 2450 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) 2451 np->get_tx.orig = np->first_tx.orig; 2452 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2453 np->get_tx_ctx = np->first_tx_ctx; 2454 } 2455 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { 2456 np->tx_stop = 0; 2457 netif_wake_queue(dev); 2458 } 2459 return tx_work; 2460} 2461 2462static int nv_tx_done_optimized(struct net_device *dev, int limit) 2463{ 2464 struct fe_priv *np = netdev_priv(dev); 2465 u32 flags; 2466 int tx_work = 0; 2467 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 2468 2469 while ((np->get_tx.ex != np->put_tx.ex) && 2470 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && 2471 (tx_work < limit)) { 2472 2473 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", 2474 dev->name, flags); 2475 2476 nv_unmap_txskb(np, np->get_tx_ctx); 2477 2478 if (flags & NV_TX2_LASTPACKET) { 2479 if (!(flags & NV_TX2_ERROR)) 2480 dev->stats.tx_packets++; 2481 else { 2482 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { 2483 if (np->driver_data & DEV_HAS_GEAR_MODE) 2484 nv_gear_backoff_reseed(dev); 2485 else 2486 nv_legacybackoff_reseed(dev); 2487 } 2488 } 2489 2490 dev_kfree_skb_any(np->get_tx_ctx->skb); 2491 np->get_tx_ctx->skb = NULL; 2492 tx_work++; 2493 2494 if (np->tx_limit) { 2495 nv_tx_flip_ownership(dev); 2496 } 2497 } 2498 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 2499 np->get_tx.ex = np->first_tx.ex; 2500 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2501 np->get_tx_ctx = np->first_tx_ctx; 2502 } 2503 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { 2504 np->tx_stop = 0; 2505 netif_wake_queue(dev); 2506 } 2507 return tx_work; 2508} 2509 2510/* 2511 * nv_tx_timeout: dev->tx_timeout function 2512 * Called with netif_tx_lock held. 2513 */ 2514static void nv_tx_timeout(struct net_device *dev) 2515{ 2516 struct fe_priv *np = netdev_priv(dev); 2517 u8 __iomem *base = get_hwbase(dev); 2518 u32 status; 2519 union ring_type put_tx; 2520 int saved_tx_limit; 2521 2522 if (np->msi_flags & NV_MSI_X_ENABLED) 2523 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2524 else 2525 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2526 2527 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); 2528 2529 { 2530 int i; 2531 2532 printk(KERN_INFO "%s: Ring at %lx\n", 2533 dev->name, (unsigned long)np->ring_addr); 2534 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 2535 for (i=0;i<=np->register_size;i+= 32) { 2536 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 2537 i, 2538 readl(base + i + 0), readl(base + i + 4), 2539 readl(base + i + 8), readl(base + i + 12), 2540 readl(base + i + 16), readl(base + i + 20), 2541 readl(base + i + 24), readl(base + i + 28)); 2542 } 2543 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2544 for (i=0;i<np->tx_ring_size;i+= 4) { 2545 if (!nv_optimized(np)) { 2546 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2547 i, 2548 le32_to_cpu(np->tx_ring.orig[i].buf), 2549 le32_to_cpu(np->tx_ring.orig[i].flaglen), 2550 le32_to_cpu(np->tx_ring.orig[i+1].buf), 2551 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), 2552 le32_to_cpu(np->tx_ring.orig[i+2].buf), 2553 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), 2554 le32_to_cpu(np->tx_ring.orig[i+3].buf), 2555 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); 2556 } else { 2557 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 2558 i, 2559 le32_to_cpu(np->tx_ring.ex[i].bufhigh), 2560 le32_to_cpu(np->tx_ring.ex[i].buflow), 2561 le32_to_cpu(np->tx_ring.ex[i].flaglen), 2562 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), 2563 le32_to_cpu(np->tx_ring.ex[i+1].buflow), 2564 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), 2565 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), 2566 le32_to_cpu(np->tx_ring.ex[i+2].buflow), 2567 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), 2568 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), 2569 le32_to_cpu(np->tx_ring.ex[i+3].buflow), 2570 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); 2571 } 2572 } 2573 } 2574 2575 spin_lock_irq(&np->lock); 2576 2577 /* 1) stop tx engine */ 2578 nv_stop_tx(dev); 2579 2580 /* 2) complete any outstanding tx and do not give HW any limited tx pkts */ 2581 saved_tx_limit = np->tx_limit; 2582 np->tx_limit = 0; /* prevent giving HW any limited pkts */ 2583 np->tx_stop = 0; /* prevent waking tx queue */ 2584 if (!nv_optimized(np)) 2585 nv_tx_done(dev, np->tx_ring_size); 2586 else 2587 nv_tx_done_optimized(dev, np->tx_ring_size); 2588 2589 /* save current HW postion */ 2590 if (np->tx_change_owner) 2591 put_tx.ex = np->tx_change_owner->first_tx_desc; 2592 else 2593 put_tx = np->put_tx; 2594 2595 /* 3) clear all tx state */ 2596 nv_drain_tx(dev); 2597 nv_init_tx(dev); 2598 2599 /* 4) restore state to current HW position */ 2600 np->get_tx = np->put_tx = put_tx; 2601 np->tx_limit = saved_tx_limit; 2602 2603 /* 5) restart tx engine */ 2604 nv_start_tx(dev); 2605 netif_wake_queue(dev); 2606 spin_unlock_irq(&np->lock); 2607} 2608 2609/* 2610 * Called when the nic notices a mismatch between the actual data len on the 2611 * wire and the len indicated in the 802 header 2612 */ 2613static int nv_getlen(struct net_device *dev, void *packet, int datalen) 2614{ 2615 int hdrlen; /* length of the 802 header */ 2616 int protolen; /* length as stored in the proto field */ 2617 2618 /* 1) calculate len according to header */ 2619 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2620 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 2621 hdrlen = VLAN_HLEN; 2622 } else { 2623 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 2624 hdrlen = ETH_HLEN; 2625 } 2626 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", 2627 dev->name, datalen, protolen, hdrlen); 2628 if (protolen > ETH_DATA_LEN) 2629 return datalen; /* Value in proto field not a len, no checks possible */ 2630 2631 protolen += hdrlen; 2632 /* consistency checks: */ 2633 if (datalen > ETH_ZLEN) { 2634 if (datalen >= protolen) { 2635 /* more data on wire than in 802 header, trim of 2636 * additional data. 2637 */ 2638 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2639 dev->name, protolen); 2640 return protolen; 2641 } else { 2642 /* less data on wire than mentioned in header. 2643 * Discard the packet. 2644 */ 2645 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", 2646 dev->name); 2647 return -1; 2648 } 2649 } else { 2650 /* short packet. Accept only if 802 values are also short */ 2651 if (protolen > ETH_ZLEN) { 2652 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", 2653 dev->name); 2654 return -1; 2655 } 2656 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2657 dev->name, datalen); 2658 return datalen; 2659 } 2660} 2661 2662static int nv_rx_process(struct net_device *dev, int limit) 2663{ 2664 struct fe_priv *np = netdev_priv(dev); 2665 u32 flags; 2666 int rx_work = 0; 2667 struct sk_buff *skb; 2668 int len; 2669 2670 while((np->get_rx.orig != np->put_rx.orig) && 2671 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2672 (rx_work < limit)) { 2673 2674 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", 2675 dev->name, flags); 2676 2677 /* 2678 * the packet is for us - immediately tear down the pci mapping. 2679 * TODO: check if a prefetch of the first cacheline improves 2680 * the performance. 2681 */ 2682 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2683 np->get_rx_ctx->dma_len, 2684 PCI_DMA_FROMDEVICE); 2685 skb = np->get_rx_ctx->skb; 2686 np->get_rx_ctx->skb = NULL; 2687 2688 { 2689 int j; 2690 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2691 for (j=0; j<64; j++) { 2692 if ((j%16) == 0) 2693 dprintk("\n%03x:", j); 2694 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2695 } 2696 dprintk("\n"); 2697 } 2698 /* look at what we actually got: */ 2699 if (np->desc_ver == DESC_VER_1) { 2700 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2701 len = flags & LEN_MASK_V1; 2702 if (unlikely(flags & NV_RX_ERROR)) { 2703 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { 2704 len = nv_getlen(dev, skb->data, len); 2705 if (len < 0) { 2706 dev->stats.rx_errors++; 2707 dev_kfree_skb(skb); 2708 goto next_pkt; 2709 } 2710 } 2711 /* framing errors are soft errors */ 2712 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { 2713 if (flags & NV_RX_SUBSTRACT1) { 2714 len--; 2715 } 2716 } 2717 /* the rest are hard errors */ 2718 else { 2719 if (flags & NV_RX_MISSEDFRAME) 2720 dev->stats.rx_missed_errors++; 2721 if (flags & NV_RX_CRCERR) 2722 dev->stats.rx_crc_errors++; 2723 if (flags & NV_RX_OVERFLOW) 2724 dev->stats.rx_over_errors++; 2725 dev->stats.rx_errors++; 2726 dev_kfree_skb(skb); 2727 goto next_pkt; 2728 } 2729 } 2730 } else { 2731 dev_kfree_skb(skb); 2732 goto next_pkt; 2733 } 2734 } else { 2735 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2736 len = flags & LEN_MASK_V2; 2737 if (unlikely(flags & NV_RX2_ERROR)) { 2738 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2739 len = nv_getlen(dev, skb->data, len); 2740 if (len < 0) { 2741 dev->stats.rx_errors++; 2742 dev_kfree_skb(skb); 2743 goto next_pkt; 2744 } 2745 } 2746 /* framing errors are soft errors */ 2747 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2748 if (flags & NV_RX2_SUBSTRACT1) { 2749 len--; 2750 } 2751 } 2752 /* the rest are hard errors */ 2753 else { 2754 if (flags & NV_RX2_CRCERR) 2755 dev->stats.rx_crc_errors++; 2756 if (flags & NV_RX2_OVERFLOW) 2757 dev->stats.rx_over_errors++; 2758 dev->stats.rx_errors++; 2759 dev_kfree_skb(skb); 2760 goto next_pkt; 2761 } 2762 } 2763 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2764 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2765 skb->ip_summed = CHECKSUM_UNNECESSARY; 2766 } else { 2767 dev_kfree_skb(skb); 2768 goto next_pkt; 2769 } 2770 } 2771 /* got a valid packet - forward it to the network core */ 2772 skb_put(skb, len); 2773 skb->protocol = eth_type_trans(skb, dev); 2774 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", 2775 dev->name, len, skb->protocol); 2776 napi_gro_receive(&np->napi, skb); 2777 dev->stats.rx_packets++; 2778 dev->stats.rx_bytes += len; 2779next_pkt: 2780 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2781 np->get_rx.orig = np->first_rx.orig; 2782 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2783 np->get_rx_ctx = np->first_rx_ctx; 2784 2785 rx_work++; 2786 } 2787 2788 return rx_work; 2789} 2790 2791static int nv_rx_process_optimized(struct net_device *dev, int limit) 2792{ 2793 struct fe_priv *np = netdev_priv(dev); 2794 u32 flags; 2795 u32 vlanflags = 0; 2796 int rx_work = 0; 2797 struct sk_buff *skb; 2798 int len; 2799 2800 while((np->get_rx.ex != np->put_rx.ex) && 2801 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2802 (rx_work < limit)) { 2803 2804 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", 2805 dev->name, flags); 2806 2807 /* 2808 * the packet is for us - immediately tear down the pci mapping. 2809 * TODO: check if a prefetch of the first cacheline improves 2810 * the performance. 2811 */ 2812 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2813 np->get_rx_ctx->dma_len, 2814 PCI_DMA_FROMDEVICE); 2815 skb = np->get_rx_ctx->skb; 2816 np->get_rx_ctx->skb = NULL; 2817 2818 { 2819 int j; 2820 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2821 for (j=0; j<64; j++) { 2822 if ((j%16) == 0) 2823 dprintk("\n%03x:", j); 2824 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2825 } 2826 dprintk("\n"); 2827 } 2828 /* look at what we actually got: */ 2829 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2830 len = flags & LEN_MASK_V2; 2831 if (unlikely(flags & NV_RX2_ERROR)) { 2832 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2833 len = nv_getlen(dev, skb->data, len); 2834 if (len < 0) { 2835 dev_kfree_skb(skb); 2836 goto next_pkt; 2837 } 2838 } 2839 /* framing errors are soft errors */ 2840 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2841 if (flags & NV_RX2_SUBSTRACT1) { 2842 len--; 2843 } 2844 } 2845 /* the rest are hard errors */ 2846 else { 2847 dev_kfree_skb(skb); 2848 goto next_pkt; 2849 } 2850 } 2851 2852 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2853 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2854 skb->ip_summed = CHECKSUM_UNNECESSARY; 2855 2856 /* got a valid packet - forward it to the network core */ 2857 skb_put(skb, len); 2858 skb->protocol = eth_type_trans(skb, dev); 2859 prefetch(skb->data); 2860 2861 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n", 2862 dev->name, len, skb->protocol); 2863 2864 if (likely(!np->vlangrp)) { 2865 napi_gro_receive(&np->napi, skb); 2866 } else { 2867 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2868 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2869 vlan_gro_receive(&np->napi, np->vlangrp, 2870 vlanflags & NV_RX3_VLAN_TAG_MASK, skb); 2871 } else { 2872 napi_gro_receive(&np->napi, skb); 2873 } 2874 } 2875 2876 dev->stats.rx_packets++; 2877 dev->stats.rx_bytes += len; 2878 } else { 2879 dev_kfree_skb(skb); 2880 } 2881next_pkt: 2882 if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) 2883 np->get_rx.ex = np->first_rx.ex; 2884 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2885 np->get_rx_ctx = np->first_rx_ctx; 2886 2887 rx_work++; 2888 } 2889 2890 return rx_work; 2891} 2892 2893static void set_bufsize(struct net_device *dev) 2894{ 2895 struct fe_priv *np = netdev_priv(dev); 2896 2897 if (dev->mtu <= ETH_DATA_LEN) 2898 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; 2899 else 2900 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; 2901} 2902 2903/* 2904 * nv_change_mtu: dev->change_mtu function 2905 * Called with dev_base_lock held for read. 2906 */ 2907static int nv_change_mtu(struct net_device *dev, int new_mtu) 2908{ 2909 struct fe_priv *np = netdev_priv(dev); 2910 int old_mtu; 2911 2912 if (new_mtu < 64 || new_mtu > np->pkt_limit) 2913 return -EINVAL; 2914 2915 old_mtu = dev->mtu; 2916 dev->mtu = new_mtu; 2917 2918 /* return early if the buffer sizes will not change */ 2919 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) 2920 return 0; 2921 if (old_mtu == new_mtu) 2922 return 0; 2923 2924 /* synchronized against open : rtnl_lock() held by caller */ 2925 if (netif_running(dev)) { 2926 u8 __iomem *base = get_hwbase(dev); 2927 /* 2928 * It seems that the nic preloads valid ring entries into an 2929 * internal buffer. The procedure for flushing everything is 2930 * guessed, there is probably a simpler approach. 2931 * Changing the MTU is a rare event, it shouldn't matter. 2932 */ 2933 nv_disable_irq(dev); 2934 nv_napi_disable(dev); 2935 netif_tx_lock_bh(dev); 2936 netif_addr_lock(dev); 2937 spin_lock(&np->lock); 2938 /* stop engines */ 2939 nv_stop_rxtx(dev); 2940 nv_txrx_reset(dev); 2941 /* drain rx queue */ 2942 nv_drain_rxtx(dev); 2943 /* reinit driver view of the rx queue */ 2944 set_bufsize(dev); 2945 if (nv_init_ring(dev)) { 2946 if (!np->in_shutdown) 2947 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2948 } 2949 /* reinit nic view of the rx queue */ 2950 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 2951 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 2952 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 2953 base + NvRegRingSizes); 2954 pci_push(base); 2955 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2956 pci_push(base); 2957 2958 /* restart rx engine */ 2959 nv_start_rxtx(dev); 2960 spin_unlock(&np->lock); 2961 netif_addr_unlock(dev); 2962 netif_tx_unlock_bh(dev); 2963 nv_napi_enable(dev); 2964 nv_enable_irq(dev); 2965 } 2966 return 0; 2967} 2968 2969static void nv_copy_mac_to_hw(struct net_device *dev) 2970{ 2971 u8 __iomem *base = get_hwbase(dev); 2972 u32 mac[2]; 2973 2974 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 2975 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 2976 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 2977 2978 writel(mac[0], base + NvRegMacAddrA); 2979 writel(mac[1], base + NvRegMacAddrB); 2980} 2981 2982/* 2983 * nv_set_mac_address: dev->set_mac_address function 2984 * Called with rtnl_lock() held. 2985 */ 2986static int nv_set_mac_address(struct net_device *dev, void *addr) 2987{ 2988 struct fe_priv *np = netdev_priv(dev); 2989 struct sockaddr *macaddr = (struct sockaddr*)addr; 2990 2991 if (!is_valid_ether_addr(macaddr->sa_data)) 2992 return -EADDRNOTAVAIL; 2993 2994 /* synchronized against open : rtnl_lock() held by caller */ 2995 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 2996 2997 if (netif_running(dev)) { 2998 netif_tx_lock_bh(dev); 2999 netif_addr_lock(dev); 3000 spin_lock_irq(&np->lock); 3001 3002 /* stop rx engine */ 3003 nv_stop_rx(dev); 3004 3005 /* set mac address */ 3006 nv_copy_mac_to_hw(dev); 3007 3008 /* restart rx engine */ 3009 nv_start_rx(dev); 3010 spin_unlock_irq(&np->lock); 3011 netif_addr_unlock(dev); 3012 netif_tx_unlock_bh(dev); 3013 } else { 3014 nv_copy_mac_to_hw(dev); 3015 } 3016 return 0; 3017} 3018 3019/* 3020 * nv_set_multicast: dev->set_multicast function 3021 * Called with netif_tx_lock held. 3022 */ 3023static void nv_set_multicast(struct net_device *dev) 3024{ 3025 struct fe_priv *np = netdev_priv(dev); 3026 u8 __iomem *base = get_hwbase(dev); 3027 u32 addr[2]; 3028 u32 mask[2]; 3029 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; 3030 3031 memset(addr, 0, sizeof(addr)); 3032 memset(mask, 0, sizeof(mask)); 3033 3034 if (dev->flags & IFF_PROMISC) { 3035 pff |= NVREG_PFF_PROMISC; 3036 } else { 3037 pff |= NVREG_PFF_MYADDR; 3038 3039 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { 3040 u32 alwaysOff[2]; 3041 u32 alwaysOn[2]; 3042 3043 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; 3044 if (dev->flags & IFF_ALLMULTI) { 3045 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 3046 } else { 3047 struct netdev_hw_addr *ha; 3048 3049 netdev_for_each_mc_addr(ha, dev) { 3050 unsigned char *addr = ha->addr; 3051 u32 a, b; 3052 3053 a = le32_to_cpu(*(__le32 *) addr); 3054 b = le16_to_cpu(*(__le16 *) (&addr[4])); 3055 alwaysOn[0] &= a; 3056 alwaysOff[0] &= ~a; 3057 alwaysOn[1] &= b; 3058 alwaysOff[1] &= ~b; 3059 } 3060 } 3061 addr[0] = alwaysOn[0]; 3062 addr[1] = alwaysOn[1]; 3063 mask[0] = alwaysOn[0] | alwaysOff[0]; 3064 mask[1] = alwaysOn[1] | alwaysOff[1]; 3065 } else { 3066 mask[0] = NVREG_MCASTMASKA_NONE; 3067 mask[1] = NVREG_MCASTMASKB_NONE; 3068 } 3069 } 3070 addr[0] |= NVREG_MCASTADDRA_FORCE; 3071 pff |= NVREG_PFF_ALWAYS; 3072 spin_lock_irq(&np->lock); 3073 nv_stop_rx(dev); 3074 writel(addr[0], base + NvRegMulticastAddrA); 3075 writel(addr[1], base + NvRegMulticastAddrB); 3076 writel(mask[0], base + NvRegMulticastMaskA); 3077 writel(mask[1], base + NvRegMulticastMaskB); 3078 writel(pff, base + NvRegPacketFilterFlags); 3079 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", 3080 dev->name); 3081 nv_start_rx(dev); 3082 spin_unlock_irq(&np->lock); 3083} 3084 3085static void nv_update_pause(struct net_device *dev, u32 pause_flags) 3086{ 3087 struct fe_priv *np = netdev_priv(dev); 3088 u8 __iomem *base = get_hwbase(dev); 3089 3090 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); 3091 3092 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { 3093 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; 3094 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { 3095 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); 3096 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3097 } else { 3098 writel(pff, base + NvRegPacketFilterFlags); 3099 } 3100 } 3101 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 3102 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 3103 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 3104 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; 3105 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) 3106 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; 3107 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { 3108 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; 3109 /* limit the number of tx pause frames to a default of 8 */ 3110 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit); 3111 } 3112 writel(pause_enable, base + NvRegTxPauseFrame); 3113 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 3114 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3115 } else { 3116 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 3117 writel(regmisc, base + NvRegMisc1); 3118 } 3119 } 3120} 3121 3122/** 3123 * nv_update_linkspeed: Setup the MAC according to the link partner 3124 * @dev: Network device to be configured 3125 * 3126 * The function queries the PHY and checks if there is a link partner. 3127 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is 3128 * set to 10 MBit HD. 3129 * 3130 * The function returns 0 if there is no link partner and 1 if there is 3131 * a good link partner. 3132 */ 3133static int nv_update_linkspeed(struct net_device *dev) 3134{ 3135 struct fe_priv *np = netdev_priv(dev); 3136 u8 __iomem *base = get_hwbase(dev); 3137 int adv = 0; 3138 int lpa = 0; 3139 int adv_lpa, adv_pause, lpa_pause; 3140 int newls = np->linkspeed; 3141 int newdup = np->duplex; 3142 int mii_status; 3143 int retval = 0; 3144 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 3145 u32 txrxFlags = 0; 3146 u32 phy_exp; 3147 3148 /* BMSR_LSTATUS is latched, read it twice: 3149 * we want the current value. 3150 */ 3151 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3152 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3153 3154 if (!(mii_status & BMSR_LSTATUS)) { 3155 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", 3156 dev->name); 3157 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3158 newdup = 0; 3159 retval = 0; 3160 goto set_speed; 3161 } 3162 3163 if (np->autoneg == 0) { 3164 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", 3165 dev->name, np->fixed_mode); 3166 if (np->fixed_mode & LPA_100FULL) { 3167 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3168 newdup = 1; 3169 } else if (np->fixed_mode & LPA_100HALF) { 3170 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3171 newdup = 0; 3172 } else if (np->fixed_mode & LPA_10FULL) { 3173 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3174 newdup = 1; 3175 } else { 3176 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3177 newdup = 0; 3178 } 3179 retval = 1; 3180 goto set_speed; 3181 } 3182 /* check auto negotiation is complete */ 3183 if (!(mii_status & BMSR_ANEGCOMPLETE)) { 3184 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ 3185 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3186 newdup = 0; 3187 retval = 0; 3188 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); 3189 goto set_speed; 3190 } 3191 3192 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3193 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 3194 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", 3195 dev->name, adv, lpa); 3196 3197 retval = 1; 3198 if (np->gigabit == PHY_GIGABIT) { 3199 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3200 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); 3201 3202 if ((control_1000 & ADVERTISE_1000FULL) && 3203 (status_1000 & LPA_1000FULL)) { 3204 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", 3205 dev->name); 3206 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 3207 newdup = 1; 3208 goto set_speed; 3209 } 3210 } 3211 3212 /* FIXME: handle parallel detection properly */ 3213 adv_lpa = lpa & adv; 3214 if (adv_lpa & LPA_100FULL) { 3215 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3216 newdup = 1; 3217 } else if (adv_lpa & LPA_100HALF) { 3218 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3219 newdup = 0; 3220 } else if (adv_lpa & LPA_10FULL) { 3221 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3222 newdup = 1; 3223 } else if (adv_lpa & LPA_10HALF) { 3224 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3225 newdup = 0; 3226 } else { 3227 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); 3228 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3229 newdup = 0; 3230 } 3231 3232set_speed: 3233 if (np->duplex == newdup && np->linkspeed == newls) 3234 return retval; 3235 3236 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", 3237 dev->name, np->linkspeed, np->duplex, newls, newdup); 3238 3239 np->duplex = newdup; 3240 np->linkspeed = newls; 3241 3242 /* The transmitter and receiver must be restarted for safe update */ 3243 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) { 3244 txrxFlags |= NV_RESTART_TX; 3245 nv_stop_tx(dev); 3246 } 3247 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { 3248 txrxFlags |= NV_RESTART_RX; 3249 nv_stop_rx(dev); 3250 } 3251 3252 if (np->gigabit == PHY_GIGABIT) { 3253 phyreg = readl(base + NvRegSlotTime); 3254 phyreg &= ~(0x3FF00); 3255 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) || 3256 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)) 3257 phyreg |= NVREG_SLOTTIME_10_100_FULL; 3258 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 3259 phyreg |= NVREG_SLOTTIME_1000_FULL; 3260 writel(phyreg, base + NvRegSlotTime); 3261 } 3262 3263 phyreg = readl(base + NvRegPhyInterface); 3264 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 3265 if (np->duplex == 0) 3266 phyreg |= PHY_HALF; 3267 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 3268 phyreg |= PHY_100; 3269 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3270 phyreg |= PHY_1000; 3271 writel(phyreg, base + NvRegPhyInterface); 3272 3273 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ 3274 if (phyreg & PHY_RGMII) { 3275 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { 3276 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 3277 } else { 3278 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { 3279 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) 3280 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10; 3281 else 3282 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100; 3283 } else { 3284 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 3285 } 3286 } 3287 } else { 3288 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) 3289 txreg = NVREG_TX_DEFERRAL_MII_STRETCH; 3290 else 3291 txreg = NVREG_TX_DEFERRAL_DEFAULT; 3292 } 3293 writel(txreg, base + NvRegTxDeferral); 3294 3295 if (np->desc_ver == DESC_VER_1) { 3296 txreg = NVREG_TX_WM_DESC1_DEFAULT; 3297 } else { 3298 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3299 txreg = NVREG_TX_WM_DESC2_3_1000; 3300 else 3301 txreg = NVREG_TX_WM_DESC2_3_DEFAULT; 3302 } 3303 writel(txreg, base + NvRegTxWatermark); 3304 3305 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 3306 base + NvRegMisc1); 3307 pci_push(base); 3308 writel(np->linkspeed, base + NvRegLinkSpeed); 3309 pci_push(base); 3310 3311 pause_flags = 0; 3312 /* setup pause frame */ 3313 if (np->duplex != 0) { 3314 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3315 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 3316 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 3317 3318 switch (adv_pause) { 3319 case ADVERTISE_PAUSE_CAP: 3320 if (lpa_pause & LPA_PAUSE_CAP) { 3321 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3322 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3323 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3324 } 3325 break; 3326 case ADVERTISE_PAUSE_ASYM: 3327 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 3328 { 3329 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3330 } 3331 break; 3332 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: 3333 if (lpa_pause & LPA_PAUSE_CAP) 3334 { 3335 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3336 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3337 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3338 } 3339 if (lpa_pause == LPA_PAUSE_ASYM) 3340 { 3341 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3342 } 3343 break; 3344 } 3345 } else { 3346 pause_flags = np->pause_flags; 3347 } 3348 } 3349 nv_update_pause(dev, pause_flags); 3350 3351 if (txrxFlags & NV_RESTART_TX) 3352 nv_start_tx(dev); 3353 if (txrxFlags & NV_RESTART_RX) 3354 nv_start_rx(dev); 3355 3356 return retval; 3357} 3358 3359static void nv_linkchange(struct net_device *dev) 3360{ 3361 if (nv_update_linkspeed(dev)) { 3362 if (!netif_carrier_ok(dev)) { 3363 netif_carrier_on(dev); 3364 printk(KERN_INFO "%s: link up.\n", dev->name); 3365 nv_txrx_gate(dev, false); 3366 nv_start_rx(dev); 3367 } 3368 } else { 3369 if (netif_carrier_ok(dev)) { 3370 netif_carrier_off(dev); 3371 printk(KERN_INFO "%s: link down.\n", dev->name); 3372 nv_txrx_gate(dev, true); 3373 nv_stop_rx(dev); 3374 } 3375 } 3376} 3377 3378static void nv_link_irq(struct net_device *dev) 3379{ 3380 u8 __iomem *base = get_hwbase(dev); 3381 u32 miistat; 3382 3383 miistat = readl(base + NvRegMIIStatus); 3384 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus); 3385 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); 3386 3387 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 3388 nv_linkchange(dev); 3389 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); 3390} 3391 3392static void nv_msi_workaround(struct fe_priv *np) 3393{ 3394 3395 /* Need to toggle the msi irq mask within the ethernet device, 3396 * otherwise, future interrupts will not be detected. 3397 */ 3398 if (np->msi_flags & NV_MSI_ENABLED) { 3399 u8 __iomem *base = np->base; 3400 3401 writel(0, base + NvRegMSIIrqMask); 3402 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 3403 } 3404} 3405 3406static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work) 3407{ 3408 struct fe_priv *np = netdev_priv(dev); 3409 3410 if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) { 3411 if (total_work > NV_DYNAMIC_THRESHOLD) { 3412 /* transition to poll based interrupts */ 3413 np->quiet_count = 0; 3414 if (np->irqmask != NVREG_IRQMASK_CPU) { 3415 np->irqmask = NVREG_IRQMASK_CPU; 3416 return 1; 3417 } 3418 } else { 3419 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) { 3420 np->quiet_count++; 3421 } else { 3422 /* reached a period of low activity, switch 3423 to per tx/rx packet interrupts */ 3424 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) { 3425 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 3426 return 1; 3427 } 3428 } 3429 } 3430 } 3431 return 0; 3432} 3433 3434static irqreturn_t nv_nic_irq(int foo, void *data) 3435{ 3436 struct net_device *dev = (struct net_device *) data; 3437 struct fe_priv *np = netdev_priv(dev); 3438 u8 __iomem *base = get_hwbase(dev); 3439 3440 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 3441 3442 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3443 np->events = readl(base + NvRegIrqStatus); 3444 writel(np->events, base + NvRegIrqStatus); 3445 } else { 3446 np->events = readl(base + NvRegMSIXIrqStatus); 3447 writel(np->events, base + NvRegMSIXIrqStatus); 3448 } 3449 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events); 3450 if (!(np->events & np->irqmask)) 3451 return IRQ_NONE; 3452 3453 nv_msi_workaround(np); 3454 3455 if (napi_schedule_prep(&np->napi)) { 3456 /* 3457 * Disable further irq's (msix not enabled with napi) 3458 */ 3459 writel(0, base + NvRegIrqMask); 3460 __napi_schedule(&np->napi); 3461 } 3462 3463 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3464 3465 return IRQ_HANDLED; 3466} 3467 3468/** 3469 * All _optimized functions are used to help increase performance 3470 * (reduce CPU and increase throughput). They use descripter version 3, 3471 * compiler directives, and reduce memory accesses. 3472 */ 3473static irqreturn_t nv_nic_irq_optimized(int foo, void *data) 3474{ 3475 struct net_device *dev = (struct net_device *) data; 3476 struct fe_priv *np = netdev_priv(dev); 3477 u8 __iomem *base = get_hwbase(dev); 3478 3479 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); 3480 3481 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3482 np->events = readl(base + NvRegIrqStatus); 3483 writel(np->events, base + NvRegIrqStatus); 3484 } else { 3485 np->events = readl(base + NvRegMSIXIrqStatus); 3486 writel(np->events, base + NvRegMSIXIrqStatus); 3487 } 3488 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events); 3489 if (!(np->events & np->irqmask)) 3490 return IRQ_NONE; 3491 3492 nv_msi_workaround(np); 3493 3494 if (napi_schedule_prep(&np->napi)) { 3495 /* 3496 * Disable further irq's (msix not enabled with napi) 3497 */ 3498 writel(0, base + NvRegIrqMask); 3499 __napi_schedule(&np->napi); 3500 } 3501 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); 3502 3503 return IRQ_HANDLED; 3504} 3505 3506static irqreturn_t nv_nic_irq_tx(int foo, void *data) 3507{ 3508 struct net_device *dev = (struct net_device *) data; 3509 struct fe_priv *np = netdev_priv(dev); 3510 u8 __iomem *base = get_hwbase(dev); 3511 u32 events; 3512 int i; 3513 unsigned long flags; 3514 3515 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 3516 3517 for (i=0; ; i++) { 3518 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3519 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3520 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 3521 if (!(events & np->irqmask)) 3522 break; 3523 3524 spin_lock_irqsave(&np->lock, flags); 3525 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3526 spin_unlock_irqrestore(&np->lock, flags); 3527 3528 if (unlikely(i > max_interrupt_work)) { 3529 spin_lock_irqsave(&np->lock, flags); 3530 /* disable interrupts on the nic */ 3531 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 3532 pci_push(base); 3533 3534 if (!np->in_shutdown) { 3535 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; 3536 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3537 } 3538 spin_unlock_irqrestore(&np->lock, flags); 3539 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 3540 break; 3541 } 3542 3543 } 3544 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); 3545 3546 return IRQ_RETVAL(i); 3547} 3548 3549static int nv_napi_poll(struct napi_struct *napi, int budget) 3550{ 3551 struct fe_priv *np = container_of(napi, struct fe_priv, napi); 3552 struct net_device *dev = np->dev; 3553 u8 __iomem *base = get_hwbase(dev); 3554 unsigned long flags; 3555 int retcode; 3556 int rx_count, tx_work=0, rx_work=0; 3557 3558 do { 3559 if (!nv_optimized(np)) { 3560 spin_lock_irqsave(&np->lock, flags); 3561 tx_work += nv_tx_done(dev, np->tx_ring_size); 3562 spin_unlock_irqrestore(&np->lock, flags); 3563 3564 rx_count = nv_rx_process(dev, budget - rx_work); 3565 retcode = nv_alloc_rx(dev); 3566 } else { 3567 spin_lock_irqsave(&np->lock, flags); 3568 tx_work += nv_tx_done_optimized(dev, np->tx_ring_size); 3569 spin_unlock_irqrestore(&np->lock, flags); 3570 3571 rx_count = nv_rx_process_optimized(dev, 3572 budget - rx_work); 3573 retcode = nv_alloc_rx_optimized(dev); 3574 } 3575 } while (retcode == 0 && 3576 rx_count > 0 && (rx_work += rx_count) < budget); 3577 3578 if (retcode) { 3579 spin_lock_irqsave(&np->lock, flags); 3580 if (!np->in_shutdown) 3581 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3582 spin_unlock_irqrestore(&np->lock, flags); 3583 } 3584 3585 nv_change_interrupt_mode(dev, tx_work + rx_work); 3586 3587 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3588 spin_lock_irqsave(&np->lock, flags); 3589 nv_link_irq(dev); 3590 spin_unlock_irqrestore(&np->lock, flags); 3591 } 3592 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3593 spin_lock_irqsave(&np->lock, flags); 3594 nv_linkchange(dev); 3595 spin_unlock_irqrestore(&np->lock, flags); 3596 np->link_timeout = jiffies + LINK_TIMEOUT; 3597 } 3598 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { 3599 spin_lock_irqsave(&np->lock, flags); 3600 if (!np->in_shutdown) { 3601 np->nic_poll_irq = np->irqmask; 3602 np->recover_error = 1; 3603 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3604 } 3605 spin_unlock_irqrestore(&np->lock, flags); 3606 napi_complete(napi); 3607 return rx_work; 3608 } 3609 3610 if (rx_work < budget) { 3611 /* re-enable interrupts 3612 (msix not enabled in napi) */ 3613 napi_complete(napi); 3614 3615 writel(np->irqmask, base + NvRegIrqMask); 3616 } 3617 return rx_work; 3618} 3619 3620static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3621{ 3622 struct net_device *dev = (struct net_device *) data; 3623 struct fe_priv *np = netdev_priv(dev); 3624 u8 __iomem *base = get_hwbase(dev); 3625 u32 events; 3626 int i; 3627 unsigned long flags; 3628 3629 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 3630 3631 for (i=0; ; i++) { 3632 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3633 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3634 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 3635 if (!(events & np->irqmask)) 3636 break; 3637 3638 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3639 if (unlikely(nv_alloc_rx_optimized(dev))) { 3640 spin_lock_irqsave(&np->lock, flags); 3641 if (!np->in_shutdown) 3642 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3643 spin_unlock_irqrestore(&np->lock, flags); 3644 } 3645 } 3646 3647 if (unlikely(i > max_interrupt_work)) { 3648 spin_lock_irqsave(&np->lock, flags); 3649 /* disable interrupts on the nic */ 3650 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3651 pci_push(base); 3652 3653 if (!np->in_shutdown) { 3654 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; 3655 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3656 } 3657 spin_unlock_irqrestore(&np->lock, flags); 3658 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 3659 break; 3660 } 3661 } 3662 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 3663 3664 return IRQ_RETVAL(i); 3665} 3666 3667static irqreturn_t nv_nic_irq_other(int foo, void *data) 3668{ 3669 struct net_device *dev = (struct net_device *) data; 3670 struct fe_priv *np = netdev_priv(dev); 3671 u8 __iomem *base = get_hwbase(dev); 3672 u32 events; 3673 int i; 3674 unsigned long flags; 3675 3676 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 3677 3678 for (i=0; ; i++) { 3679 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3680 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3681 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3682 if (!(events & np->irqmask)) 3683 break; 3684 3685 /* check tx in case we reached max loop limit in tx isr */ 3686 spin_lock_irqsave(&np->lock, flags); 3687 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3688 spin_unlock_irqrestore(&np->lock, flags); 3689 3690 if (events & NVREG_IRQ_LINK) { 3691 spin_lock_irqsave(&np->lock, flags); 3692 nv_link_irq(dev); 3693 spin_unlock_irqrestore(&np->lock, flags); 3694 } 3695 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 3696 spin_lock_irqsave(&np->lock, flags); 3697 nv_linkchange(dev); 3698 spin_unlock_irqrestore(&np->lock, flags); 3699 np->link_timeout = jiffies + LINK_TIMEOUT; 3700 } 3701 if (events & NVREG_IRQ_RECOVER_ERROR) { 3702 spin_lock_irq(&np->lock); 3703 /* disable interrupts on the nic */ 3704 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3705 pci_push(base); 3706 3707 if (!np->in_shutdown) { 3708 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3709 np->recover_error = 1; 3710 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3711 } 3712 spin_unlock_irq(&np->lock); 3713 break; 3714 } 3715 if (unlikely(i > max_interrupt_work)) { 3716 spin_lock_irqsave(&np->lock, flags); 3717 /* disable interrupts on the nic */ 3718 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3719 pci_push(base); 3720 3721 if (!np->in_shutdown) { 3722 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3723 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3724 } 3725 spin_unlock_irqrestore(&np->lock, flags); 3726 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 3727 break; 3728 } 3729 3730 } 3731 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); 3732 3733 return IRQ_RETVAL(i); 3734} 3735 3736static irqreturn_t nv_nic_irq_test(int foo, void *data) 3737{ 3738 struct net_device *dev = (struct net_device *) data; 3739 struct fe_priv *np = netdev_priv(dev); 3740 u8 __iomem *base = get_hwbase(dev); 3741 u32 events; 3742 3743 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); 3744 3745 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3746 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3747 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3748 } else { 3749 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3750 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3751 } 3752 pci_push(base); 3753 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3754 if (!(events & NVREG_IRQ_TIMER)) 3755 return IRQ_RETVAL(0); 3756 3757 nv_msi_workaround(np); 3758 3759 spin_lock(&np->lock); 3760 np->intr_test = 1; 3761 spin_unlock(&np->lock); 3762 3763 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); 3764 3765 return IRQ_RETVAL(1); 3766} 3767 3768static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) 3769{ 3770 u8 __iomem *base = get_hwbase(dev); 3771 int i; 3772 u32 msixmap = 0; 3773 3774 /* Each interrupt bit can be mapped to a MSIX vector (4 bits). 3775 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents 3776 * the remaining 8 interrupts. 3777 */ 3778 for (i = 0; i < 8; i++) { 3779 if ((irqmask >> i) & 0x1) { 3780 msixmap |= vector << (i << 2); 3781 } 3782 } 3783 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3784 3785 msixmap = 0; 3786 for (i = 0; i < 8; i++) { 3787 if ((irqmask >> (i + 8)) & 0x1) { 3788 msixmap |= vector << (i << 2); 3789 } 3790 } 3791 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3792} 3793 3794static int nv_request_irq(struct net_device *dev, int intr_test) 3795{ 3796 struct fe_priv *np = get_nvpriv(dev); 3797 u8 __iomem *base = get_hwbase(dev); 3798 int ret = 1; 3799 int i; 3800 irqreturn_t (*handler)(int foo, void *data); 3801 3802 if (intr_test) { 3803 handler = nv_nic_irq_test; 3804 } else { 3805 if (nv_optimized(np)) 3806 handler = nv_nic_irq_optimized; 3807 else 3808 handler = nv_nic_irq; 3809 } 3810 3811 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3812 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3813 np->msi_x_entry[i].entry = i; 3814 } 3815 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 3816 np->msi_flags |= NV_MSI_X_ENABLED; 3817 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3818 /* Request irq for rx handling */ 3819 sprintf(np->name_rx, "%s-rx", dev->name); 3820 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, 3821 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) { 3822 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 3823 pci_disable_msix(np->pci_dev); 3824 np->msi_flags &= ~NV_MSI_X_ENABLED; 3825 goto out_err; 3826 } 3827 /* Request irq for tx handling */ 3828 sprintf(np->name_tx, "%s-tx", dev->name); 3829 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, 3830 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) { 3831 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 3832 pci_disable_msix(np->pci_dev); 3833 np->msi_flags &= ~NV_MSI_X_ENABLED; 3834 goto out_free_rx; 3835 } 3836 /* Request irq for link and timer handling */ 3837 sprintf(np->name_other, "%s-other", dev->name); 3838 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, 3839 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) { 3840 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 3841 pci_disable_msix(np->pci_dev); 3842 np->msi_flags &= ~NV_MSI_X_ENABLED; 3843 goto out_free_tx; 3844 } 3845 /* map interrupts to their respective vector */ 3846 writel(0, base + NvRegMSIXMap0); 3847 writel(0, base + NvRegMSIXMap1); 3848 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); 3849 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); 3850 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 3851 } else { 3852 /* Request irq for all interrupts */ 3853 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { 3854 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3855 pci_disable_msix(np->pci_dev); 3856 np->msi_flags &= ~NV_MSI_X_ENABLED; 3857 goto out_err; 3858 } 3859 3860 /* map interrupts to vector 0 */ 3861 writel(0, base + NvRegMSIXMap0); 3862 writel(0, base + NvRegMSIXMap1); 3863 } 3864 } 3865 } 3866 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3867 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3868 np->msi_flags |= NV_MSI_ENABLED; 3869 dev->irq = np->pci_dev->irq; 3870 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3871 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3872 pci_disable_msi(np->pci_dev); 3873 np->msi_flags &= ~NV_MSI_ENABLED; 3874 dev->irq = np->pci_dev->irq; 3875 goto out_err; 3876 } 3877 3878 /* map interrupts to vector 0 */ 3879 writel(0, base + NvRegMSIMap0); 3880 writel(0, base + NvRegMSIMap1); 3881 /* enable msi vector 0 */ 3882 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 3883 } 3884 } 3885 if (ret != 0) { 3886 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) 3887 goto out_err; 3888 3889 } 3890 3891 return 0; 3892out_free_tx: 3893 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); 3894out_free_rx: 3895 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); 3896out_err: 3897 return 1; 3898} 3899 3900static void nv_free_irq(struct net_device *dev) 3901{ 3902 struct fe_priv *np = get_nvpriv(dev); 3903 int i; 3904 3905 if (np->msi_flags & NV_MSI_X_ENABLED) { 3906 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3907 free_irq(np->msi_x_entry[i].vector, dev); 3908 } 3909 pci_disable_msix(np->pci_dev); 3910 np->msi_flags &= ~NV_MSI_X_ENABLED; 3911 } else { 3912 free_irq(np->pci_dev->irq, dev); 3913 if (np->msi_flags & NV_MSI_ENABLED) { 3914 pci_disable_msi(np->pci_dev); 3915 np->msi_flags &= ~NV_MSI_ENABLED; 3916 } 3917 } 3918} 3919 3920static void nv_do_nic_poll(unsigned long data) 3921{ 3922 struct net_device *dev = (struct net_device *) data; 3923 struct fe_priv *np = netdev_priv(dev); 3924 u8 __iomem *base = get_hwbase(dev); 3925 u32 mask = 0; 3926 3927 /* 3928 * First disable irq(s) and then 3929 * reenable interrupts on the nic, we have to do this before calling 3930 * nv_nic_irq because that may decide to do otherwise 3931 */ 3932 3933 if (!using_multi_irqs(dev)) { 3934 if (np->msi_flags & NV_MSI_X_ENABLED) 3935 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 3936 else 3937 disable_irq_lockdep(np->pci_dev->irq); 3938 mask = np->irqmask; 3939 } else { 3940 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 3941 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 3942 mask |= NVREG_IRQ_RX_ALL; 3943 } 3944 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 3945 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 3946 mask |= NVREG_IRQ_TX_ALL; 3947 } 3948 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 3949 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 3950 mask |= NVREG_IRQ_OTHER; 3951 } 3952 } 3953 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */ 3954 3955 if (np->recover_error) { 3956 np->recover_error = 0; 3957 printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name); 3958 if (netif_running(dev)) { 3959 netif_tx_lock_bh(dev); 3960 netif_addr_lock(dev); 3961 spin_lock(&np->lock); 3962 /* stop engines */ 3963 nv_stop_rxtx(dev); 3964 if (np->driver_data & DEV_HAS_POWER_CNTRL) 3965 nv_mac_reset(dev); 3966 nv_txrx_reset(dev); 3967 /* drain rx queue */ 3968 nv_drain_rxtx(dev); 3969 /* reinit driver view of the rx queue */ 3970 set_bufsize(dev); 3971 if (nv_init_ring(dev)) { 3972 if (!np->in_shutdown) 3973 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3974 } 3975 /* reinit nic view of the rx queue */ 3976 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3977 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3978 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3979 base + NvRegRingSizes); 3980 pci_push(base); 3981 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3982 pci_push(base); 3983 /* clear interrupts */ 3984 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3985 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3986 else 3987 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 3988 3989 /* restart rx engine */ 3990 nv_start_rxtx(dev); 3991 spin_unlock(&np->lock); 3992 netif_addr_unlock(dev); 3993 netif_tx_unlock_bh(dev); 3994 } 3995 } 3996 3997 writel(mask, base + NvRegIrqMask); 3998 pci_push(base); 3999 4000 if (!using_multi_irqs(dev)) { 4001 np->nic_poll_irq = 0; 4002 if (nv_optimized(np)) 4003 nv_nic_irq_optimized(0, dev); 4004 else 4005 nv_nic_irq(0, dev); 4006 if (np->msi_flags & NV_MSI_X_ENABLED) 4007 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 4008 else 4009 enable_irq_lockdep(np->pci_dev->irq); 4010 } else { 4011 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4012 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; 4013 nv_nic_irq_rx(0, dev); 4014 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 4015 } 4016 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4017 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; 4018 nv_nic_irq_tx(0, dev); 4019 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 4020 } 4021 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4022 np->nic_poll_irq &= ~NVREG_IRQ_OTHER; 4023 nv_nic_irq_other(0, dev); 4024 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 4025 } 4026 } 4027 4028} 4029 4030#ifdef CONFIG_NET_POLL_CONTROLLER 4031static void nv_poll_controller(struct net_device *dev) 4032{ 4033 nv_do_nic_poll((unsigned long) dev); 4034} 4035#endif 4036 4037static void nv_do_stats_poll(unsigned long data) 4038{ 4039 struct net_device *dev = (struct net_device *) data; 4040 struct fe_priv *np = netdev_priv(dev); 4041 4042 nv_get_hw_stats(dev); 4043 4044 if (!np->in_shutdown) 4045 mod_timer(&np->stats_poll, 4046 round_jiffies(jiffies + STATS_INTERVAL)); 4047} 4048 4049static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 4050{ 4051 struct fe_priv *np = netdev_priv(dev); 4052 strcpy(info->driver, DRV_NAME); 4053 strcpy(info->version, FORCEDETH_VERSION); 4054 strcpy(info->bus_info, pci_name(np->pci_dev)); 4055} 4056 4057static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4058{ 4059 struct fe_priv *np = netdev_priv(dev); 4060 wolinfo->supported = WAKE_MAGIC; 4061 4062 spin_lock_irq(&np->lock); 4063 if (np->wolenabled) 4064 wolinfo->wolopts = WAKE_MAGIC; 4065 spin_unlock_irq(&np->lock); 4066} 4067 4068static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4069{ 4070 struct fe_priv *np = netdev_priv(dev); 4071 u8 __iomem *base = get_hwbase(dev); 4072 u32 flags = 0; 4073 4074 if (wolinfo->wolopts == 0) { 4075 np->wolenabled = 0; 4076 } else if (wolinfo->wolopts & WAKE_MAGIC) { 4077 np->wolenabled = 1; 4078 flags = NVREG_WAKEUPFLAGS_ENABLE; 4079 } 4080 if (netif_running(dev)) { 4081 spin_lock_irq(&np->lock); 4082 writel(flags, base + NvRegWakeUpFlags); 4083 spin_unlock_irq(&np->lock); 4084 } 4085 return 0; 4086} 4087 4088static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 4089{ 4090 struct fe_priv *np = netdev_priv(dev); 4091 int adv; 4092 4093 spin_lock_irq(&np->lock); 4094 ecmd->port = PORT_MII; 4095 if (!netif_running(dev)) { 4096 /* We do not track link speed / duplex setting if the 4097 * interface is disabled. Force a link check */ 4098 if (nv_update_linkspeed(dev)) { 4099 if (!netif_carrier_ok(dev)) 4100 netif_carrier_on(dev); 4101 } else { 4102 if (netif_carrier_ok(dev)) 4103 netif_carrier_off(dev); 4104 } 4105 } 4106 4107 if (netif_carrier_ok(dev)) { 4108 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 4109 case NVREG_LINKSPEED_10: 4110 ecmd->speed = SPEED_10; 4111 break; 4112 case NVREG_LINKSPEED_100: 4113 ecmd->speed = SPEED_100; 4114 break; 4115 case NVREG_LINKSPEED_1000: 4116 ecmd->speed = SPEED_1000; 4117 break; 4118 } 4119 ecmd->duplex = DUPLEX_HALF; 4120 if (np->duplex) 4121 ecmd->duplex = DUPLEX_FULL; 4122 } else { 4123 ecmd->speed = -1; 4124 ecmd->duplex = -1; 4125 } 4126 4127 ecmd->autoneg = np->autoneg; 4128 4129 ecmd->advertising = ADVERTISED_MII; 4130 if (np->autoneg) { 4131 ecmd->advertising |= ADVERTISED_Autoneg; 4132 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4133 if (adv & ADVERTISE_10HALF) 4134 ecmd->advertising |= ADVERTISED_10baseT_Half; 4135 if (adv & ADVERTISE_10FULL) 4136 ecmd->advertising |= ADVERTISED_10baseT_Full; 4137 if (adv & ADVERTISE_100HALF) 4138 ecmd->advertising |= ADVERTISED_100baseT_Half; 4139 if (adv & ADVERTISE_100FULL) 4140 ecmd->advertising |= ADVERTISED_100baseT_Full; 4141 if (np->gigabit == PHY_GIGABIT) { 4142 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4143 if (adv & ADVERTISE_1000FULL) 4144 ecmd->advertising |= ADVERTISED_1000baseT_Full; 4145 } 4146 } 4147 ecmd->supported = (SUPPORTED_Autoneg | 4148 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 4149 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 4150 SUPPORTED_MII); 4151 if (np->gigabit == PHY_GIGABIT) 4152 ecmd->supported |= SUPPORTED_1000baseT_Full; 4153 4154 ecmd->phy_address = np->phyaddr; 4155 ecmd->transceiver = XCVR_EXTERNAL; 4156 4157 /* ignore maxtxpkt, maxrxpkt for now */ 4158 spin_unlock_irq(&np->lock); 4159 return 0; 4160} 4161 4162static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 4163{ 4164 struct fe_priv *np = netdev_priv(dev); 4165 4166 if (ecmd->port != PORT_MII) 4167 return -EINVAL; 4168 if (ecmd->transceiver != XCVR_EXTERNAL) 4169 return -EINVAL; 4170 if (ecmd->phy_address != np->phyaddr) { 4171 /* TODO: support switching between multiple phys. Should be 4172 * trivial, but not enabled due to lack of test hardware. */ 4173 return -EINVAL; 4174 } 4175 if (ecmd->autoneg == AUTONEG_ENABLE) { 4176 u32 mask; 4177 4178 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 4179 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 4180 if (np->gigabit == PHY_GIGABIT) 4181 mask |= ADVERTISED_1000baseT_Full; 4182 4183 if ((ecmd->advertising & mask) == 0) 4184 return -EINVAL; 4185 4186 } else if (ecmd->autoneg == AUTONEG_DISABLE) { 4187 /* Note: autonegotiation disable, speed 1000 intentionally 4188 * forbidden - noone should need that. */ 4189 4190 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) 4191 return -EINVAL; 4192 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 4193 return -EINVAL; 4194 } else { 4195 return -EINVAL; 4196 } 4197 4198 netif_carrier_off(dev); 4199 if (netif_running(dev)) { 4200 unsigned long flags; 4201 4202 nv_disable_irq(dev); 4203 netif_tx_lock_bh(dev); 4204 netif_addr_lock(dev); 4205 /* with plain spinlock lockdep complains */ 4206 spin_lock_irqsave(&np->lock, flags); 4207 /* stop engines */ 4208 /* FIXME: 4209 * this can take some time, and interrupts are disabled 4210 * due to spin_lock_irqsave, but let's hope no daemon 4211 * is going to change the settings very often... 4212 * Worst case: 4213 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX 4214 * + some minor delays, which is up to a second approximately 4215 */ 4216 nv_stop_rxtx(dev); 4217 spin_unlock_irqrestore(&np->lock, flags); 4218 netif_addr_unlock(dev); 4219 netif_tx_unlock_bh(dev); 4220 } 4221 4222 if (ecmd->autoneg == AUTONEG_ENABLE) { 4223 int adv, bmcr; 4224 4225 np->autoneg = 1; 4226 4227 /* advertise only what has been requested */ 4228 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4229 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4230 if (ecmd->advertising & ADVERTISED_10baseT_Half) 4231 adv |= ADVERTISE_10HALF; 4232 if (ecmd->advertising & ADVERTISED_10baseT_Full) 4233 adv |= ADVERTISE_10FULL; 4234 if (ecmd->advertising & ADVERTISED_100baseT_Half) 4235 adv |= ADVERTISE_100HALF; 4236 if (ecmd->advertising & ADVERTISED_100baseT_Full) 4237 adv |= ADVERTISE_100FULL; 4238 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 4239 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4240 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4241 adv |= ADVERTISE_PAUSE_ASYM; 4242 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4243 4244 if (np->gigabit == PHY_GIGABIT) { 4245 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4246 adv &= ~ADVERTISE_1000FULL; 4247 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 4248 adv |= ADVERTISE_1000FULL; 4249 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4250 } 4251 4252 if (netif_running(dev)) 4253 printk(KERN_INFO "%s: link down.\n", dev->name); 4254 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4255 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4256 bmcr |= BMCR_ANENABLE; 4257 /* reset the phy in order for settings to stick, 4258 * and cause autoneg to start */ 4259 if (phy_reset(dev, bmcr)) { 4260 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4261 return -EINVAL; 4262 } 4263 } else { 4264 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4265 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4266 } 4267 } else { 4268 int adv, bmcr; 4269 4270 np->autoneg = 0; 4271 4272 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4273 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4274 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) 4275 adv |= ADVERTISE_10HALF; 4276 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) 4277 adv |= ADVERTISE_10FULL; 4278 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) 4279 adv |= ADVERTISE_100HALF; 4280 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) 4281 adv |= ADVERTISE_100FULL; 4282 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4283 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ 4284 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4285 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4286 } 4287 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { 4288 adv |= ADVERTISE_PAUSE_ASYM; 4289 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4290 } 4291 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4292 np->fixed_mode = adv; 4293 4294 if (np->gigabit == PHY_GIGABIT) { 4295 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4296 adv &= ~ADVERTISE_1000FULL; 4297 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4298 } 4299 4300 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4301 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); 4302 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) 4303 bmcr |= BMCR_FULLDPLX; 4304 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 4305 bmcr |= BMCR_SPEED100; 4306 if (np->phy_oui == PHY_OUI_MARVELL) { 4307 /* reset the phy in order for forced mode settings to stick */ 4308 if (phy_reset(dev, bmcr)) { 4309 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4310 return -EINVAL; 4311 } 4312 } else { 4313 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4314 if (netif_running(dev)) { 4315 /* Wait a bit and then reconfigure the nic. */ 4316 udelay(10); 4317 nv_linkchange(dev); 4318 } 4319 } 4320 } 4321 4322 if (netif_running(dev)) { 4323 nv_start_rxtx(dev); 4324 nv_enable_irq(dev); 4325 } 4326 4327 return 0; 4328} 4329 4330#define FORCEDETH_REGS_VER 1 4331 4332static int nv_get_regs_len(struct net_device *dev) 4333{ 4334 struct fe_priv *np = netdev_priv(dev); 4335 return np->register_size; 4336} 4337 4338static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 4339{ 4340 struct fe_priv *np = netdev_priv(dev); 4341 u8 __iomem *base = get_hwbase(dev); 4342 u32 *rbuf = buf; 4343 int i; 4344 4345 regs->version = FORCEDETH_REGS_VER; 4346 spin_lock_irq(&np->lock); 4347 for (i = 0;i <= np->register_size/sizeof(u32); i++) 4348 rbuf[i] = readl(base + i*sizeof(u32)); 4349 spin_unlock_irq(&np->lock); 4350} 4351 4352static int nv_nway_reset(struct net_device *dev) 4353{ 4354 struct fe_priv *np = netdev_priv(dev); 4355 int ret; 4356 4357 if (np->autoneg) { 4358 int bmcr; 4359 4360 netif_carrier_off(dev); 4361 if (netif_running(dev)) { 4362 nv_disable_irq(dev); 4363 netif_tx_lock_bh(dev); 4364 netif_addr_lock(dev); 4365 spin_lock(&np->lock); 4366 /* stop engines */ 4367 nv_stop_rxtx(dev); 4368 spin_unlock(&np->lock); 4369 netif_addr_unlock(dev); 4370 netif_tx_unlock_bh(dev); 4371 printk(KERN_INFO "%s: link down.\n", dev->name); 4372 } 4373 4374 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4375 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4376 bmcr |= BMCR_ANENABLE; 4377 /* reset the phy in order for settings to stick*/ 4378 if (phy_reset(dev, bmcr)) { 4379 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4380 return -EINVAL; 4381 } 4382 } else { 4383 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4384 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4385 } 4386 4387 if (netif_running(dev)) { 4388 nv_start_rxtx(dev); 4389 nv_enable_irq(dev); 4390 } 4391 ret = 0; 4392 } else { 4393 ret = -EINVAL; 4394 } 4395 4396 return ret; 4397} 4398 4399static int nv_set_tso(struct net_device *dev, u32 value) 4400{ 4401 struct fe_priv *np = netdev_priv(dev); 4402 4403 if ((np->driver_data & DEV_HAS_CHECKSUM)) 4404 return ethtool_op_set_tso(dev, value); 4405 else 4406 return -EOPNOTSUPP; 4407} 4408 4409static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4410{ 4411 struct fe_priv *np = netdev_priv(dev); 4412 4413 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4414 ring->rx_mini_max_pending = 0; 4415 ring->rx_jumbo_max_pending = 0; 4416 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4417 4418 ring->rx_pending = np->rx_ring_size; 4419 ring->rx_mini_pending = 0; 4420 ring->rx_jumbo_pending = 0; 4421 ring->tx_pending = np->tx_ring_size; 4422} 4423 4424static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4425{ 4426 struct fe_priv *np = netdev_priv(dev); 4427 u8 __iomem *base = get_hwbase(dev); 4428 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; 4429 dma_addr_t ring_addr; 4430 4431 if (ring->rx_pending < RX_RING_MIN || 4432 ring->tx_pending < TX_RING_MIN || 4433 ring->rx_mini_pending != 0 || 4434 ring->rx_jumbo_pending != 0 || 4435 (np->desc_ver == DESC_VER_1 && 4436 (ring->rx_pending > RING_MAX_DESC_VER_1 || 4437 ring->tx_pending > RING_MAX_DESC_VER_1)) || 4438 (np->desc_ver != DESC_VER_1 && 4439 (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 4440 ring->tx_pending > RING_MAX_DESC_VER_2_3))) { 4441 return -EINVAL; 4442 } 4443 4444 /* allocate new rings */ 4445 if (!nv_optimized(np)) { 4446 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4447 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4448 &ring_addr); 4449 } else { 4450 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4451 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4452 &ring_addr); 4453 } 4454 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); 4455 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); 4456 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { 4457 /* fall back to old rings */ 4458 if (!nv_optimized(np)) { 4459 if (rxtx_ring) 4460 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4461 rxtx_ring, ring_addr); 4462 } else { 4463 if (rxtx_ring) 4464 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4465 rxtx_ring, ring_addr); 4466 } 4467 if (rx_skbuff) 4468 kfree(rx_skbuff); 4469 if (tx_skbuff) 4470 kfree(tx_skbuff); 4471 goto exit; 4472 } 4473 4474 if (netif_running(dev)) { 4475 nv_disable_irq(dev); 4476 nv_napi_disable(dev); 4477 netif_tx_lock_bh(dev); 4478 netif_addr_lock(dev); 4479 spin_lock(&np->lock); 4480 /* stop engines */ 4481 nv_stop_rxtx(dev); 4482 nv_txrx_reset(dev); 4483 /* drain queues */ 4484 nv_drain_rxtx(dev); 4485 /* delete queues */ 4486 free_rings(dev); 4487 } 4488 4489 /* set new values */ 4490 np->rx_ring_size = ring->rx_pending; 4491 np->tx_ring_size = ring->tx_pending; 4492 4493 if (!nv_optimized(np)) { 4494 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4495 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4496 } else { 4497 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4498 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4499 } 4500 np->rx_skb = (struct nv_skb_map*)rx_skbuff; 4501 np->tx_skb = (struct nv_skb_map*)tx_skbuff; 4502 np->ring_addr = ring_addr; 4503 4504 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4505 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); 4506 4507 if (netif_running(dev)) { 4508 /* reinit driver view of the queues */ 4509 set_bufsize(dev); 4510 if (nv_init_ring(dev)) { 4511 if (!np->in_shutdown) 4512 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4513 } 4514 4515 /* reinit nic view of the queues */ 4516 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4517 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4518 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4519 base + NvRegRingSizes); 4520 pci_push(base); 4521 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4522 pci_push(base); 4523 4524 /* restart engines */ 4525 nv_start_rxtx(dev); 4526 spin_unlock(&np->lock); 4527 netif_addr_unlock(dev); 4528 netif_tx_unlock_bh(dev); 4529 nv_napi_enable(dev); 4530 nv_enable_irq(dev); 4531 } 4532 return 0; 4533exit: 4534 return -ENOMEM; 4535} 4536 4537static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4538{ 4539 struct fe_priv *np = netdev_priv(dev); 4540 4541 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; 4542 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; 4543 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; 4544} 4545 4546static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4547{ 4548 struct fe_priv *np = netdev_priv(dev); 4549 int adv, bmcr; 4550 4551 if ((!np->autoneg && np->duplex == 0) || 4552 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 4553 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 4554 dev->name); 4555 return -EINVAL; 4556 } 4557 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 4558 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); 4559 return -EINVAL; 4560 } 4561 4562 netif_carrier_off(dev); 4563 if (netif_running(dev)) { 4564 nv_disable_irq(dev); 4565 netif_tx_lock_bh(dev); 4566 netif_addr_lock(dev); 4567 spin_lock(&np->lock); 4568 /* stop engines */ 4569 nv_stop_rxtx(dev); 4570 spin_unlock(&np->lock); 4571 netif_addr_unlock(dev); 4572 netif_tx_unlock_bh(dev); 4573 } 4574 4575 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); 4576 if (pause->rx_pause) 4577 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; 4578 if (pause->tx_pause) 4579 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; 4580 4581 if (np->autoneg && pause->autoneg) { 4582 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; 4583 4584 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4585 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4586 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 4587 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4588 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4589 adv |= ADVERTISE_PAUSE_ASYM; 4590 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4591 4592 if (netif_running(dev)) 4593 printk(KERN_INFO "%s: link down.\n", dev->name); 4594 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4595 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4596 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4597 } else { 4598 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4599 if (pause->rx_pause) 4600 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4601 if (pause->tx_pause) 4602 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4603 4604 if (!netif_running(dev)) 4605 nv_update_linkspeed(dev); 4606 else 4607 nv_update_pause(dev, np->pause_flags); 4608 } 4609 4610 if (netif_running(dev)) { 4611 nv_start_rxtx(dev); 4612 nv_enable_irq(dev); 4613 } 4614 return 0; 4615} 4616 4617static u32 nv_get_rx_csum(struct net_device *dev) 4618{ 4619 struct fe_priv *np = netdev_priv(dev); 4620 return np->rx_csum != 0; 4621} 4622 4623static int nv_set_rx_csum(struct net_device *dev, u32 data) 4624{ 4625 struct fe_priv *np = netdev_priv(dev); 4626 u8 __iomem *base = get_hwbase(dev); 4627 int retcode = 0; 4628 4629 if (np->driver_data & DEV_HAS_CHECKSUM) { 4630 if (data) { 4631 np->rx_csum = 1; 4632 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4633 } else { 4634 np->rx_csum = 0; 4635 /* vlan is dependent on rx checksum offload */ 4636 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) 4637 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; 4638 } 4639 if (netif_running(dev)) { 4640 spin_lock_irq(&np->lock); 4641 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4642 spin_unlock_irq(&np->lock); 4643 } 4644 } else { 4645 return -EINVAL; 4646 } 4647 4648 return retcode; 4649} 4650 4651static int nv_set_tx_csum(struct net_device *dev, u32 data) 4652{ 4653 struct fe_priv *np = netdev_priv(dev); 4654 4655 if (np->driver_data & DEV_HAS_CHECKSUM) 4656 return ethtool_op_set_tx_csum(dev, data); 4657 else 4658 return -EOPNOTSUPP; 4659} 4660 4661static int nv_set_sg(struct net_device *dev, u32 data) 4662{ 4663 struct fe_priv *np = netdev_priv(dev); 4664 4665 if (np->driver_data & DEV_HAS_CHECKSUM) 4666 return ethtool_op_set_sg(dev, data); 4667 else 4668 return -EOPNOTSUPP; 4669} 4670 4671static int nv_get_sset_count(struct net_device *dev, int sset) 4672{ 4673 struct fe_priv *np = netdev_priv(dev); 4674 4675 switch (sset) { 4676 case ETH_SS_TEST: 4677 if (np->driver_data & DEV_HAS_TEST_EXTENDED) 4678 return NV_TEST_COUNT_EXTENDED; 4679 else 4680 return NV_TEST_COUNT_BASE; 4681 case ETH_SS_STATS: 4682 if (np->driver_data & DEV_HAS_STATISTICS_V3) 4683 return NV_DEV_STATISTICS_V3_COUNT; 4684 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4685 return NV_DEV_STATISTICS_V2_COUNT; 4686 else if (np->driver_data & DEV_HAS_STATISTICS_V1) 4687 return NV_DEV_STATISTICS_V1_COUNT; 4688 else 4689 return 0; 4690 default: 4691 return -EOPNOTSUPP; 4692 } 4693} 4694 4695static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) 4696{ 4697 struct fe_priv *np = netdev_priv(dev); 4698 4699 /* update stats */ 4700 nv_do_stats_poll((unsigned long)dev); 4701 4702 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); 4703} 4704 4705static int nv_link_test(struct net_device *dev) 4706{ 4707 struct fe_priv *np = netdev_priv(dev); 4708 int mii_status; 4709 4710 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4711 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4712 4713 /* check phy link status */ 4714 if (!(mii_status & BMSR_LSTATUS)) 4715 return 0; 4716 else 4717 return 1; 4718} 4719 4720static int nv_register_test(struct net_device *dev) 4721{ 4722 u8 __iomem *base = get_hwbase(dev); 4723 int i = 0; 4724 u32 orig_read, new_read; 4725 4726 do { 4727 orig_read = readl(base + nv_registers_test[i].reg); 4728 4729 /* xor with mask to toggle bits */ 4730 orig_read ^= nv_registers_test[i].mask; 4731 4732 writel(orig_read, base + nv_registers_test[i].reg); 4733 4734 new_read = readl(base + nv_registers_test[i].reg); 4735 4736 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) 4737 return 0; 4738 4739 /* restore original value */ 4740 orig_read ^= nv_registers_test[i].mask; 4741 writel(orig_read, base + nv_registers_test[i].reg); 4742 4743 } while (nv_registers_test[++i].reg != 0); 4744 4745 return 1; 4746} 4747 4748static int nv_interrupt_test(struct net_device *dev) 4749{ 4750 struct fe_priv *np = netdev_priv(dev); 4751 u8 __iomem *base = get_hwbase(dev); 4752 int ret = 1; 4753 int testcnt; 4754 u32 save_msi_flags, save_poll_interval = 0; 4755 4756 if (netif_running(dev)) { 4757 /* free current irq */ 4758 nv_free_irq(dev); 4759 save_poll_interval = readl(base+NvRegPollingInterval); 4760 } 4761 4762 /* flag to test interrupt handler */ 4763 np->intr_test = 0; 4764 4765 /* setup test irq */ 4766 save_msi_flags = np->msi_flags; 4767 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; 4768 np->msi_flags |= 0x001; /* setup 1 vector */ 4769 if (nv_request_irq(dev, 1)) 4770 return 0; 4771 4772 /* setup timer interrupt */ 4773 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 4774 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4775 4776 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4777 4778 /* wait for at least one interrupt */ 4779 msleep(100); 4780 4781 spin_lock_irq(&np->lock); 4782 4783 /* flag should be set within ISR */ 4784 testcnt = np->intr_test; 4785 if (!testcnt) 4786 ret = 2; 4787 4788 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4789 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 4790 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4791 else 4792 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4793 4794 spin_unlock_irq(&np->lock); 4795 4796 nv_free_irq(dev); 4797 4798 np->msi_flags = save_msi_flags; 4799 4800 if (netif_running(dev)) { 4801 writel(save_poll_interval, base + NvRegPollingInterval); 4802 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4803 /* restore original irq */ 4804 if (nv_request_irq(dev, 0)) 4805 return 0; 4806 } 4807 4808 return ret; 4809} 4810 4811static int nv_loopback_test(struct net_device *dev) 4812{ 4813 struct fe_priv *np = netdev_priv(dev); 4814 u8 __iomem *base = get_hwbase(dev); 4815 struct sk_buff *tx_skb, *rx_skb; 4816 dma_addr_t test_dma_addr; 4817 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 4818 u32 flags; 4819 int len, i, pkt_len; 4820 u8 *pkt_data; 4821 u32 filter_flags = 0; 4822 u32 misc1_flags = 0; 4823 int ret = 1; 4824 4825 if (netif_running(dev)) { 4826 nv_disable_irq(dev); 4827 filter_flags = readl(base + NvRegPacketFilterFlags); 4828 misc1_flags = readl(base + NvRegMisc1); 4829 } else { 4830 nv_txrx_reset(dev); 4831 } 4832 4833 /* reinit driver view of the rx queue */ 4834 set_bufsize(dev); 4835 nv_init_ring(dev); 4836 4837 /* setup hardware for loopback */ 4838 writel(NVREG_MISC1_FORCE, base + NvRegMisc1); 4839 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); 4840 4841 /* reinit nic view of the rx queue */ 4842 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4843 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4844 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4845 base + NvRegRingSizes); 4846 pci_push(base); 4847 4848 /* restart rx engine */ 4849 nv_start_rxtx(dev); 4850 4851 /* setup packet for tx */ 4852 pkt_len = ETH_DATA_LEN; 4853 tx_skb = dev_alloc_skb(pkt_len); 4854 if (!tx_skb) { 4855 printk(KERN_ERR "dev_alloc_skb() failed during loopback test" 4856 " of %s\n", dev->name); 4857 ret = 0; 4858 goto out; 4859 } 4860 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, 4861 skb_tailroom(tx_skb), 4862 PCI_DMA_FROMDEVICE); 4863 pkt_data = skb_put(tx_skb, pkt_len); 4864 for (i = 0; i < pkt_len; i++) 4865 pkt_data[i] = (u8)(i & 0xff); 4866 4867 if (!nv_optimized(np)) { 4868 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 4869 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4870 } else { 4871 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); 4872 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); 4873 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4874 } 4875 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4876 pci_push(get_hwbase(dev)); 4877 4878 msleep(500); 4879 4880 /* check for rx of the packet */ 4881 if (!nv_optimized(np)) { 4882 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); 4883 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 4884 4885 } else { 4886 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); 4887 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 4888 } 4889 4890 if (flags & NV_RX_AVAIL) { 4891 ret = 0; 4892 } else if (np->desc_ver == DESC_VER_1) { 4893 if (flags & NV_RX_ERROR) 4894 ret = 0; 4895 } else { 4896 if (flags & NV_RX2_ERROR) { 4897 ret = 0; 4898 } 4899 } 4900 4901 if (ret) { 4902 if (len != pkt_len) { 4903 ret = 0; 4904 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 4905 dev->name, len, pkt_len); 4906 } else { 4907 rx_skb = np->rx_skb[0].skb; 4908 for (i = 0; i < pkt_len; i++) { 4909 if (rx_skb->data[i] != (u8)(i & 0xff)) { 4910 ret = 0; 4911 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", 4912 dev->name, i); 4913 break; 4914 } 4915 } 4916 } 4917 } else { 4918 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); 4919 } 4920 4921 pci_unmap_single(np->pci_dev, test_dma_addr, 4922 (skb_end_pointer(tx_skb) - tx_skb->data), 4923 PCI_DMA_TODEVICE); 4924 dev_kfree_skb_any(tx_skb); 4925 out: 4926 /* stop engines */ 4927 nv_stop_rxtx(dev); 4928 nv_txrx_reset(dev); 4929 /* drain rx queue */ 4930 nv_drain_rxtx(dev); 4931 4932 if (netif_running(dev)) { 4933 writel(misc1_flags, base + NvRegMisc1); 4934 writel(filter_flags, base + NvRegPacketFilterFlags); 4935 nv_enable_irq(dev); 4936 } 4937 4938 return ret; 4939} 4940 4941static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) 4942{ 4943 struct fe_priv *np = netdev_priv(dev); 4944 u8 __iomem *base = get_hwbase(dev); 4945 int result; 4946 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); 4947 4948 if (!nv_link_test(dev)) { 4949 test->flags |= ETH_TEST_FL_FAILED; 4950 buffer[0] = 1; 4951 } 4952 4953 if (test->flags & ETH_TEST_FL_OFFLINE) { 4954 if (netif_running(dev)) { 4955 netif_stop_queue(dev); 4956 nv_napi_disable(dev); 4957 netif_tx_lock_bh(dev); 4958 netif_addr_lock(dev); 4959 spin_lock_irq(&np->lock); 4960 nv_disable_hw_interrupts(dev, np->irqmask); 4961 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 4962 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4963 } else { 4964 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4965 } 4966 /* stop engines */ 4967 nv_stop_rxtx(dev); 4968 nv_txrx_reset(dev); 4969 /* drain rx queue */ 4970 nv_drain_rxtx(dev); 4971 spin_unlock_irq(&np->lock); 4972 netif_addr_unlock(dev); 4973 netif_tx_unlock_bh(dev); 4974 } 4975 4976 if (!nv_register_test(dev)) { 4977 test->flags |= ETH_TEST_FL_FAILED; 4978 buffer[1] = 1; 4979 } 4980 4981 result = nv_interrupt_test(dev); 4982 if (result != 1) { 4983 test->flags |= ETH_TEST_FL_FAILED; 4984 buffer[2] = 1; 4985 } 4986 if (result == 0) { 4987 /* bail out */ 4988 return; 4989 } 4990 4991 if (!nv_loopback_test(dev)) { 4992 test->flags |= ETH_TEST_FL_FAILED; 4993 buffer[3] = 1; 4994 } 4995 4996 if (netif_running(dev)) { 4997 /* reinit driver view of the rx queue */ 4998 set_bufsize(dev); 4999 if (nv_init_ring(dev)) { 5000 if (!np->in_shutdown) 5001 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5002 } 5003 /* reinit nic view of the rx queue */ 5004 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5005 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5006 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5007 base + NvRegRingSizes); 5008 pci_push(base); 5009 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5010 pci_push(base); 5011 /* restart rx engine */ 5012 nv_start_rxtx(dev); 5013 netif_start_queue(dev); 5014 nv_napi_enable(dev); 5015 nv_enable_hw_interrupts(dev, np->irqmask); 5016 } 5017 } 5018} 5019 5020static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) 5021{ 5022 switch (stringset) { 5023 case ETH_SS_STATS: 5024 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); 5025 break; 5026 case ETH_SS_TEST: 5027 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); 5028 break; 5029 } 5030} 5031 5032static const struct ethtool_ops ops = { 5033 .get_drvinfo = nv_get_drvinfo, 5034 .get_link = ethtool_op_get_link, 5035 .get_wol = nv_get_wol, 5036 .set_wol = nv_set_wol, 5037 .get_settings = nv_get_settings, 5038 .set_settings = nv_set_settings, 5039 .get_regs_len = nv_get_regs_len, 5040 .get_regs = nv_get_regs, 5041 .nway_reset = nv_nway_reset, 5042 .set_tso = nv_set_tso, 5043 .get_ringparam = nv_get_ringparam, 5044 .set_ringparam = nv_set_ringparam, 5045 .get_pauseparam = nv_get_pauseparam, 5046 .set_pauseparam = nv_set_pauseparam, 5047 .get_rx_csum = nv_get_rx_csum, 5048 .set_rx_csum = nv_set_rx_csum, 5049 .set_tx_csum = nv_set_tx_csum, 5050 .set_sg = nv_set_sg, 5051 .get_strings = nv_get_strings, 5052 .get_ethtool_stats = nv_get_ethtool_stats, 5053 .get_sset_count = nv_get_sset_count, 5054 .self_test = nv_self_test, 5055}; 5056 5057static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 5058{ 5059 struct fe_priv *np = get_nvpriv(dev); 5060 5061 spin_lock_irq(&np->lock); 5062 5063 /* save vlan group */ 5064 np->vlangrp = grp; 5065 5066 if (grp) { 5067 /* enable vlan on MAC */ 5068 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; 5069 } else { 5070 /* disable vlan on MAC */ 5071 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; 5072 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; 5073 } 5074 5075 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5076 5077 spin_unlock_irq(&np->lock); 5078} 5079 5080/* The mgmt unit and driver use a semaphore to access the phy during init */ 5081static int nv_mgmt_acquire_sema(struct net_device *dev) 5082{ 5083 struct fe_priv *np = netdev_priv(dev); 5084 u8 __iomem *base = get_hwbase(dev); 5085 int i; 5086 u32 tx_ctrl, mgmt_sema; 5087 5088 for (i = 0; i < 10; i++) { 5089 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; 5090 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) 5091 break; 5092 msleep(500); 5093 } 5094 5095 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) 5096 return 0; 5097 5098 for (i = 0; i < 2; i++) { 5099 tx_ctrl = readl(base + NvRegTransmitterControl); 5100 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; 5101 writel(tx_ctrl, base + NvRegTransmitterControl); 5102 5103 /* verify that semaphore was acquired */ 5104 tx_ctrl = readl(base + NvRegTransmitterControl); 5105 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && 5106 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) { 5107 np->mgmt_sema = 1; 5108 return 1; 5109 } 5110 else 5111 udelay(50); 5112 } 5113 5114 return 0; 5115} 5116 5117static void nv_mgmt_release_sema(struct net_device *dev) 5118{ 5119 struct fe_priv *np = netdev_priv(dev); 5120 u8 __iomem *base = get_hwbase(dev); 5121 u32 tx_ctrl; 5122 5123 if (np->driver_data & DEV_HAS_MGMT_UNIT) { 5124 if (np->mgmt_sema) { 5125 tx_ctrl = readl(base + NvRegTransmitterControl); 5126 tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ; 5127 writel(tx_ctrl, base + NvRegTransmitterControl); 5128 } 5129 } 5130} 5131 5132 5133static int nv_mgmt_get_version(struct net_device *dev) 5134{ 5135 struct fe_priv *np = netdev_priv(dev); 5136 u8 __iomem *base = get_hwbase(dev); 5137 u32 data_ready = readl(base + NvRegTransmitterControl); 5138 u32 data_ready2 = 0; 5139 unsigned long start; 5140 int ready = 0; 5141 5142 writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion); 5143 writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl); 5144 start = jiffies; 5145 while (time_before(jiffies, start + 5*HZ)) { 5146 data_ready2 = readl(base + NvRegTransmitterControl); 5147 if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) { 5148 ready = 1; 5149 break; 5150 } 5151 schedule_timeout_uninterruptible(1); 5152 } 5153 5154 if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR)) 5155 return 0; 5156 5157 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION; 5158 5159 return 1; 5160} 5161 5162static int nv_open(struct net_device *dev) 5163{ 5164 struct fe_priv *np = netdev_priv(dev); 5165 u8 __iomem *base = get_hwbase(dev); 5166 int ret = 1; 5167 int oom, i; 5168 u32 low; 5169 5170 dprintk(KERN_DEBUG "nv_open: begin\n"); 5171 5172 /* power up phy */ 5173 mii_rw(dev, np->phyaddr, MII_BMCR, 5174 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); 5175 5176 nv_txrx_gate(dev, false); 5177 /* erase previous misconfiguration */ 5178 if (np->driver_data & DEV_HAS_POWER_CNTRL) 5179 nv_mac_reset(dev); 5180 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5181 writel(0, base + NvRegMulticastAddrB); 5182 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5183 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5184 writel(0, base + NvRegPacketFilterFlags); 5185 5186 writel(0, base + NvRegTransmitterControl); 5187 writel(0, base + NvRegReceiverControl); 5188 5189 writel(0, base + NvRegAdapterControl); 5190 5191 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 5192 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 5193 5194 /* initialize descriptor rings */ 5195 set_bufsize(dev); 5196 oom = nv_init_ring(dev); 5197 5198 writel(0, base + NvRegLinkSpeed); 5199 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5200 nv_txrx_reset(dev); 5201 writel(0, base + NvRegUnknownSetupReg6); 5202 5203 np->in_shutdown = 0; 5204 5205 /* give hw rings */ 5206 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5207 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5208 base + NvRegRingSizes); 5209 5210 writel(np->linkspeed, base + NvRegLinkSpeed); 5211 if (np->desc_ver == DESC_VER_1) 5212 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 5213 else 5214 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); 5215 writel(np->txrxctl_bits, base + NvRegTxRxControl); 5216 writel(np->vlanctl_bits, base + NvRegVlanControl); 5217 pci_push(base); 5218 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 5219 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 5220 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 5221 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 5222 5223 writel(0, base + NvRegMIIMask); 5224 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5225 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5226 5227 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 5228 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 5229 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 5230 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5231 5232 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 5233 5234 get_random_bytes(&low, sizeof(low)); 5235 low &= NVREG_SLOTTIME_MASK; 5236 if (np->desc_ver == DESC_VER_1) { 5237 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime); 5238 } else { 5239 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) { 5240 /* setup legacy backoff */ 5241 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime); 5242 } else { 5243 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime); 5244 nv_gear_backoff_reseed(dev); 5245 } 5246 } 5247 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); 5248 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); 5249 if (poll_interval == -1) { 5250 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 5251 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 5252 else 5253 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 5254 } 5255 else 5256 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 5257 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5258 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 5259 base + NvRegAdapterControl); 5260 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); 5261 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); 5262 if (np->wolenabled) 5263 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 5264 5265 i = readl(base + NvRegPowerState); 5266 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 5267 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 5268 5269 pci_push(base); 5270 udelay(10); 5271 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 5272 5273 nv_disable_hw_interrupts(dev, np->irqmask); 5274 pci_push(base); 5275 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5276 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5277 pci_push(base); 5278 5279 if (nv_request_irq(dev, 0)) { 5280 goto out_drain; 5281 } 5282 5283 /* ask for interrupts */ 5284 nv_enable_hw_interrupts(dev, np->irqmask); 5285 5286 spin_lock_irq(&np->lock); 5287 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5288 writel(0, base + NvRegMulticastAddrB); 5289 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5290 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5291 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5292 /* One manual link speed update: Interrupts are enabled, future link 5293 * speed changes cause interrupts and are handled by nv_link_irq(). 5294 */ 5295 { 5296 u32 miistat; 5297 miistat = readl(base + NvRegMIIStatus); 5298 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5299 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); 5300 } 5301 /* set linkspeed to invalid value, thus force nv_update_linkspeed 5302 * to init hw */ 5303 np->linkspeed = 0; 5304 ret = nv_update_linkspeed(dev); 5305 nv_start_rxtx(dev); 5306 netif_start_queue(dev); 5307 nv_napi_enable(dev); 5308 5309 if (ret) { 5310 netif_carrier_on(dev); 5311 } else { 5312 printk(KERN_INFO "%s: no link during initialization.\n", dev->name); 5313 netif_carrier_off(dev); 5314 } 5315 if (oom) 5316 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5317 5318 /* start statistics timer */ 5319 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) 5320 mod_timer(&np->stats_poll, 5321 round_jiffies(jiffies + STATS_INTERVAL)); 5322 5323 spin_unlock_irq(&np->lock); 5324 5325 return 0; 5326out_drain: 5327 nv_drain_rxtx(dev); 5328 return ret; 5329} 5330 5331static int nv_close(struct net_device *dev) 5332{ 5333 struct fe_priv *np = netdev_priv(dev); 5334 u8 __iomem *base; 5335 5336 spin_lock_irq(&np->lock); 5337 np->in_shutdown = 1; 5338 spin_unlock_irq(&np->lock); 5339 nv_napi_disable(dev); 5340 synchronize_irq(np->pci_dev->irq); 5341 5342 del_timer_sync(&np->oom_kick); 5343 del_timer_sync(&np->nic_poll); 5344 del_timer_sync(&np->stats_poll); 5345 5346 netif_stop_queue(dev); 5347 spin_lock_irq(&np->lock); 5348 nv_stop_rxtx(dev); 5349 nv_txrx_reset(dev); 5350 5351 /* disable interrupts on the nic or we will lock up */ 5352 base = get_hwbase(dev); 5353 nv_disable_hw_interrupts(dev, np->irqmask); 5354 pci_push(base); 5355 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 5356 5357 spin_unlock_irq(&np->lock); 5358 5359 nv_free_irq(dev); 5360 5361 nv_drain_rxtx(dev); 5362 5363 if (np->wolenabled || !phy_power_down) { 5364 nv_txrx_gate(dev, false); 5365 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5366 nv_start_rx(dev); 5367 } else { 5368 /* power down phy */ 5369 mii_rw(dev, np->phyaddr, MII_BMCR, 5370 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN); 5371 nv_txrx_gate(dev, true); 5372 } 5373 5374 /* FIXME: power down nic */ 5375 5376 return 0; 5377} 5378 5379static const struct net_device_ops nv_netdev_ops = { 5380 .ndo_open = nv_open, 5381 .ndo_stop = nv_close, 5382 .ndo_get_stats = nv_get_stats, 5383 .ndo_start_xmit = nv_start_xmit, 5384 .ndo_tx_timeout = nv_tx_timeout, 5385 .ndo_change_mtu = nv_change_mtu, 5386 .ndo_validate_addr = eth_validate_addr, 5387 .ndo_set_mac_address = nv_set_mac_address, 5388 .ndo_set_multicast_list = nv_set_multicast, 5389 .ndo_vlan_rx_register = nv_vlan_rx_register, 5390#ifdef CONFIG_NET_POLL_CONTROLLER 5391 .ndo_poll_controller = nv_poll_controller, 5392#endif 5393}; 5394 5395static const struct net_device_ops nv_netdev_ops_optimized = { 5396 .ndo_open = nv_open, 5397 .ndo_stop = nv_close, 5398 .ndo_get_stats = nv_get_stats, 5399 .ndo_start_xmit = nv_start_xmit_optimized, 5400 .ndo_tx_timeout = nv_tx_timeout, 5401 .ndo_change_mtu = nv_change_mtu, 5402 .ndo_validate_addr = eth_validate_addr, 5403 .ndo_set_mac_address = nv_set_mac_address, 5404 .ndo_set_multicast_list = nv_set_multicast, 5405 .ndo_vlan_rx_register = nv_vlan_rx_register, 5406#ifdef CONFIG_NET_POLL_CONTROLLER 5407 .ndo_poll_controller = nv_poll_controller, 5408#endif 5409}; 5410 5411static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 5412{ 5413 struct net_device *dev; 5414 struct fe_priv *np; 5415 unsigned long addr; 5416 u8 __iomem *base; 5417 int err, i; 5418 u32 powerstate, txreg; 5419 u32 phystate_orig = 0, phystate; 5420 int phyinitialized = 0; 5421 static int printed_version; 5422 5423 if (!printed_version++) 5424 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet" 5425 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION); 5426 5427 dev = alloc_etherdev(sizeof(struct fe_priv)); 5428 err = -ENOMEM; 5429 if (!dev) 5430 goto out; 5431 5432 np = netdev_priv(dev); 5433 np->dev = dev; 5434 np->pci_dev = pci_dev; 5435 spin_lock_init(&np->lock); 5436 SET_NETDEV_DEV(dev, &pci_dev->dev); 5437 5438 init_timer(&np->oom_kick); 5439 np->oom_kick.data = (unsigned long) dev; 5440 np->oom_kick.function = nv_do_rx_refill; /* timer handler */ 5441 init_timer(&np->nic_poll); 5442 np->nic_poll.data = (unsigned long) dev; 5443 np->nic_poll.function = nv_do_nic_poll; /* timer handler */ 5444 init_timer(&np->stats_poll); 5445 np->stats_poll.data = (unsigned long) dev; 5446 np->stats_poll.function = nv_do_stats_poll; /* timer handler */ 5447 5448 err = pci_enable_device(pci_dev); 5449 if (err) 5450 goto out_free; 5451 5452 pci_set_master(pci_dev); 5453 5454 err = pci_request_regions(pci_dev, DRV_NAME); 5455 if (err < 0) 5456 goto out_disable; 5457 5458 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) 5459 np->register_size = NV_PCI_REGSZ_VER3; 5460 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 5461 np->register_size = NV_PCI_REGSZ_VER2; 5462 else 5463 np->register_size = NV_PCI_REGSZ_VER1; 5464 5465 err = -EINVAL; 5466 addr = 0; 5467 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5468 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", 5469 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), 5470 pci_resource_len(pci_dev, i), 5471 pci_resource_flags(pci_dev, i)); 5472 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 5473 pci_resource_len(pci_dev, i) >= np->register_size) { 5474 addr = pci_resource_start(pci_dev, i); 5475 break; 5476 } 5477 } 5478 if (i == DEVICE_COUNT_RESOURCE) { 5479 dev_printk(KERN_INFO, &pci_dev->dev, 5480 "Couldn't find register window\n"); 5481 goto out_relreg; 5482 } 5483 5484 /* copy of driver data */ 5485 np->driver_data = id->driver_data; 5486 /* copy of device id */ 5487 np->device_id = id->device; 5488 5489 /* handle different descriptor versions */ 5490 if (id->driver_data & DEV_HAS_HIGH_DMA) { 5491 /* packet format 3: supports 40-bit addressing */ 5492 np->desc_ver = DESC_VER_3; 5493 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 5494 if (dma_64bit) { 5495 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39))) 5496 dev_printk(KERN_INFO, &pci_dev->dev, 5497 "64-bit DMA failed, using 32-bit addressing\n"); 5498 else 5499 dev->features |= NETIF_F_HIGHDMA; 5500 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) { 5501 dev_printk(KERN_INFO, &pci_dev->dev, 5502 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); 5503 } 5504 } 5505 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 5506 /* packet format 2: supports jumbo frames */ 5507 np->desc_ver = DESC_VER_2; 5508 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; 5509 } else { 5510 /* original packet format */ 5511 np->desc_ver = DESC_VER_1; 5512 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; 5513 } 5514 5515 np->pkt_limit = NV_PKTLIMIT_1; 5516 if (id->driver_data & DEV_HAS_LARGEDESC) 5517 np->pkt_limit = NV_PKTLIMIT_2; 5518 5519 if (id->driver_data & DEV_HAS_CHECKSUM) { 5520 np->rx_csum = 1; 5521 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5522 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 5523 dev->features |= NETIF_F_TSO; 5524 dev->features |= NETIF_F_GRO; 5525 } 5526 5527 np->vlanctl_bits = 0; 5528 if (id->driver_data & DEV_HAS_VLAN) { 5529 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 5530 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; 5531 } 5532 5533 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 5534 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || 5535 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || 5536 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) { 5537 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 5538 } 5539 5540 5541 err = -ENOMEM; 5542 np->base = ioremap(addr, np->register_size); 5543 if (!np->base) 5544 goto out_relreg; 5545 dev->base_addr = (unsigned long)np->base; 5546 5547 dev->irq = pci_dev->irq; 5548 5549 np->rx_ring_size = RX_RING_DEFAULT; 5550 np->tx_ring_size = TX_RING_DEFAULT; 5551 5552 if (!nv_optimized(np)) { 5553 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 5554 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 5555 &np->ring_addr); 5556 if (!np->rx_ring.orig) 5557 goto out_unmap; 5558 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 5559 } else { 5560 np->rx_ring.ex = pci_alloc_consistent(pci_dev, 5561 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 5562 &np->ring_addr); 5563 if (!np->rx_ring.ex) 5564 goto out_unmap; 5565 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 5566 } 5567 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5568 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5569 if (!np->rx_skb || !np->tx_skb) 5570 goto out_freering; 5571 5572 if (!nv_optimized(np)) 5573 dev->netdev_ops = &nv_netdev_ops; 5574 else 5575 dev->netdev_ops = &nv_netdev_ops_optimized; 5576 5577 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5578 SET_ETHTOOL_OPS(dev, &ops); 5579 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5580 5581 pci_set_drvdata(pci_dev, dev); 5582 5583 /* read the mac address */ 5584 base = get_hwbase(dev); 5585 np->orig_mac[0] = readl(base + NvRegMacAddrA); 5586 np->orig_mac[1] = readl(base + NvRegMacAddrB); 5587 5588 /* check the workaround bit for correct mac address order */ 5589 txreg = readl(base + NvRegTransmitPoll); 5590 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) { 5591 /* mac address is already in correct order */ 5592 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5593 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5594 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5595 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5596 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5597 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5598 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { 5599 /* mac address is already in correct order */ 5600 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5601 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5602 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5603 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5604 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5605 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5606 /* 5607 * Set orig mac address back to the reversed version. 5608 * This flag will be cleared during low power transition. 5609 * Therefore, we should always put back the reversed address. 5610 */ 5611 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) + 5612 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24); 5613 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8); 5614 } else { 5615 /* need to reverse mac address to correct order */ 5616 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 5617 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 5618 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 5619 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 5620 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 5621 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 5622 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5623 printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n"); 5624 } 5625 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 5626 5627 if (!is_valid_ether_addr(dev->perm_addr)) { 5628 /* 5629 * Bad mac address. At least one bios sets the mac address 5630 * to 01:23:45:67:89:ab 5631 */ 5632 dev_printk(KERN_ERR, &pci_dev->dev, 5633 "Invalid Mac address detected: %pM\n", 5634 dev->dev_addr); 5635 dev_printk(KERN_ERR, &pci_dev->dev, 5636 "Please complain to your hardware vendor. Switching to a random MAC.\n"); 5637 random_ether_addr(dev->dev_addr); 5638 } 5639 5640 dprintk(KERN_DEBUG "%s: MAC Address %pM\n", 5641 pci_name(pci_dev), dev->dev_addr); 5642 5643 /* set mac address */ 5644 nv_copy_mac_to_hw(dev); 5645 5646 /* Workaround current PCI init glitch: wakeup bits aren't 5647 * being set from PCI PM capability. 5648 */ 5649 device_init_wakeup(&pci_dev->dev, 1); 5650 5651 /* disable WOL */ 5652 writel(0, base + NvRegWakeUpFlags); 5653 np->wolenabled = 0; 5654 5655 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5656 5657 /* take phy and nic out of low power mode */ 5658 powerstate = readl(base + NvRegPowerState2); 5659 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; 5660 if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) && 5661 pci_dev->revision >= 0xA3) 5662 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; 5663 writel(powerstate, base + NvRegPowerState2); 5664 } 5665 5666 if (np->desc_ver == DESC_VER_1) { 5667 np->tx_flags = NV_TX_VALID; 5668 } else { 5669 np->tx_flags = NV_TX2_VALID; 5670 } 5671 5672 np->msi_flags = 0; 5673 if ((id->driver_data & DEV_HAS_MSI) && msi) { 5674 np->msi_flags |= NV_MSI_CAPABLE; 5675 } 5676 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 5677 /* msix has had reported issues when modifying irqmask 5678 as in the case of napi, therefore, disable for now 5679 */ 5680#if 0 5681 np->msi_flags |= NV_MSI_X_CAPABLE; 5682#endif 5683 } 5684 5685 if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) { 5686 np->irqmask = NVREG_IRQMASK_CPU; 5687 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5688 np->msi_flags |= 0x0001; 5689 } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC && 5690 !(id->driver_data & DEV_NEED_TIMERIRQ)) { 5691 /* start off in throughput mode */ 5692 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5693 /* remove support for msix mode */ 5694 np->msi_flags &= ~NV_MSI_X_CAPABLE; 5695 } else { 5696 optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; 5697 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5698 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5699 np->msi_flags |= 0x0003; 5700 } 5701 5702 if (id->driver_data & DEV_NEED_TIMERIRQ) 5703 np->irqmask |= NVREG_IRQ_TIMER; 5704 if (id->driver_data & DEV_NEED_LINKTIMER) { 5705 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); 5706 np->need_linktimer = 1; 5707 np->link_timeout = jiffies + LINK_TIMEOUT; 5708 } else { 5709 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); 5710 np->need_linktimer = 0; 5711 } 5712 5713 /* Limit the number of tx's outstanding for hw bug */ 5714 if (id->driver_data & DEV_NEED_TX_LIMIT) { 5715 np->tx_limit = 1; 5716 if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) && 5717 pci_dev->revision >= 0xA2) 5718 np->tx_limit = 0; 5719 } 5720 5721 /* clear phy state and temporarily halt phy interrupts */ 5722 writel(0, base + NvRegMIIMask); 5723 phystate = readl(base + NvRegAdapterControl); 5724 if (phystate & NVREG_ADAPTCTL_RUNNING) { 5725 phystate_orig = 1; 5726 phystate &= ~NVREG_ADAPTCTL_RUNNING; 5727 writel(phystate, base + NvRegAdapterControl); 5728 } 5729 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5730 5731 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 5732 /* management unit running on the mac? */ 5733 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) && 5734 (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) && 5735 nv_mgmt_acquire_sema(dev) && 5736 nv_mgmt_get_version(dev)) { 5737 np->mac_in_use = 1; 5738 if (np->mgmt_version > 0) { 5739 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; 5740 } 5741 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", 5742 pci_name(pci_dev), np->mac_in_use); 5743 /* management unit setup the phy already? */ 5744 if (np->mac_in_use && 5745 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 5746 NVREG_XMITCTL_SYNC_PHY_INIT)) { 5747 /* phy is inited by mgmt unit */ 5748 phyinitialized = 1; 5749 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", 5750 pci_name(pci_dev)); 5751 } else { 5752 /* we need to init the phy */ 5753 } 5754 } 5755 } 5756 5757 /* find a suitable phy */ 5758 for (i = 1; i <= 32; i++) { 5759 int id1, id2; 5760 int phyaddr = i & 0x1F; 5761 5762 spin_lock_irq(&np->lock); 5763 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); 5764 spin_unlock_irq(&np->lock); 5765 if (id1 < 0 || id1 == 0xffff) 5766 continue; 5767 spin_lock_irq(&np->lock); 5768 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); 5769 spin_unlock_irq(&np->lock); 5770 if (id2 < 0 || id2 == 0xffff) 5771 continue; 5772 5773 np->phy_model = id2 & PHYID2_MODEL_MASK; 5774 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 5775 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 5776 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 5777 pci_name(pci_dev), id1, id2, phyaddr); 5778 np->phyaddr = phyaddr; 5779 np->phy_oui = id1 | id2; 5780 5781 /* Realtek hardcoded phy id1 to all zero's on certain phys */ 5782 if (np->phy_oui == PHY_OUI_REALTEK2) 5783 np->phy_oui = PHY_OUI_REALTEK; 5784 /* Setup phy revision for Realtek */ 5785 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211) 5786 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK; 5787 5788 break; 5789 } 5790 if (i == 33) { 5791 dev_printk(KERN_INFO, &pci_dev->dev, 5792 "open: Could not find a valid PHY.\n"); 5793 goto out_error; 5794 } 5795 5796 if (!phyinitialized) { 5797 /* reset it */ 5798 phy_init(dev); 5799 } else { 5800 /* see if it is a gigabit phy */ 5801 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 5802 if (mii_status & PHY_GIGABIT) { 5803 np->gigabit = PHY_GIGABIT; 5804 } 5805 } 5806 5807 /* set default link speed settings */ 5808 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 5809 np->duplex = 0; 5810 np->autoneg = 1; 5811 5812 err = register_netdev(dev); 5813 if (err) { 5814 dev_printk(KERN_INFO, &pci_dev->dev, 5815 "unable to register netdev: %d\n", err); 5816 goto out_error; 5817 } 5818 5819 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, " 5820 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 5821 dev->name, 5822 np->phy_oui, 5823 np->phyaddr, 5824 dev->dev_addr[0], 5825 dev->dev_addr[1], 5826 dev->dev_addr[2], 5827 dev->dev_addr[3], 5828 dev->dev_addr[4], 5829 dev->dev_addr[5]); 5830 5831 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 5832 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5833 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? 5834 "csum " : "", 5835 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 5836 "vlan " : "", 5837 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", 5838 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", 5839 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", 5840 np->gigabit == PHY_GIGABIT ? "gbit " : "", 5841 np->need_linktimer ? "lnktim " : "", 5842 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", 5843 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", 5844 np->desc_ver); 5845 5846 return 0; 5847 5848out_error: 5849 if (phystate_orig) 5850 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 5851 pci_set_drvdata(pci_dev, NULL); 5852out_freering: 5853 free_rings(dev); 5854out_unmap: 5855 iounmap(get_hwbase(dev)); 5856out_relreg: 5857 pci_release_regions(pci_dev); 5858out_disable: 5859 pci_disable_device(pci_dev); 5860out_free: 5861 free_netdev(dev); 5862out: 5863 return err; 5864} 5865 5866static void nv_restore_phy(struct net_device *dev) 5867{ 5868 struct fe_priv *np = netdev_priv(dev); 5869 u16 phy_reserved, mii_control; 5870 5871 if (np->phy_oui == PHY_OUI_REALTEK && 5872 np->phy_model == PHY_MODEL_REALTEK_8201 && 5873 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 5874 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3); 5875 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); 5876 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 5877 phy_reserved |= PHY_REALTEK_INIT8; 5878 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved); 5879 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1); 5880 5881 /* restart auto negotiation */ 5882 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 5883 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 5884 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control); 5885 } 5886} 5887 5888static void nv_restore_mac_addr(struct pci_dev *pci_dev) 5889{ 5890 struct net_device *dev = pci_get_drvdata(pci_dev); 5891 struct fe_priv *np = netdev_priv(dev); 5892 u8 __iomem *base = get_hwbase(dev); 5893 5894 /* special op: write back the misordered MAC address - otherwise 5895 * the next nv_probe would see a wrong address. 5896 */ 5897 writel(np->orig_mac[0], base + NvRegMacAddrA); 5898 writel(np->orig_mac[1], base + NvRegMacAddrB); 5899 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV, 5900 base + NvRegTransmitPoll); 5901} 5902 5903static void __devexit nv_remove(struct pci_dev *pci_dev) 5904{ 5905 struct net_device *dev = pci_get_drvdata(pci_dev); 5906 5907 unregister_netdev(dev); 5908 5909 nv_restore_mac_addr(pci_dev); 5910 5911 /* restore any phy related changes */ 5912 nv_restore_phy(dev); 5913 5914 nv_mgmt_release_sema(dev); 5915 5916 /* free all structures */ 5917 free_rings(dev); 5918 iounmap(get_hwbase(dev)); 5919 pci_release_regions(pci_dev); 5920 pci_disable_device(pci_dev); 5921 free_netdev(dev); 5922 pci_set_drvdata(pci_dev, NULL); 5923} 5924 5925#ifdef CONFIG_PM 5926static int nv_suspend(struct pci_dev *pdev, pm_message_t state) 5927{ 5928 struct net_device *dev = pci_get_drvdata(pdev); 5929 struct fe_priv *np = netdev_priv(dev); 5930 u8 __iomem *base = get_hwbase(dev); 5931 int i; 5932 5933 if (netif_running(dev)) { 5934 // Gross. 5935 nv_close(dev); 5936 } 5937 netif_device_detach(dev); 5938 5939 /* save non-pci configuration space */ 5940 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5941 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 5942 5943 pci_save_state(pdev); 5944 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); 5945 pci_disable_device(pdev); 5946 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5947 return 0; 5948} 5949 5950static int nv_resume(struct pci_dev *pdev) 5951{ 5952 struct net_device *dev = pci_get_drvdata(pdev); 5953 struct fe_priv *np = netdev_priv(dev); 5954 u8 __iomem *base = get_hwbase(dev); 5955 int i, rc = 0; 5956 5957 pci_set_power_state(pdev, PCI_D0); 5958 pci_restore_state(pdev); 5959 /* ack any pending wake events, disable PME */ 5960 pci_enable_wake(pdev, PCI_D0, 0); 5961 5962 /* restore non-pci configuration space */ 5963 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5964 writel(np->saved_config_space[i], base+i*sizeof(u32)); 5965 5966 if (np->driver_data & DEV_NEED_MSI_FIX) 5967 pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE); 5968 5969 /* restore phy state, including autoneg */ 5970 phy_init(dev); 5971 5972 netif_device_attach(dev); 5973 if (netif_running(dev)) { 5974 rc = nv_open(dev); 5975 nv_set_multicast(dev); 5976 } 5977 return rc; 5978} 5979 5980static void nv_shutdown(struct pci_dev *pdev) 5981{ 5982 struct net_device *dev = pci_get_drvdata(pdev); 5983 struct fe_priv *np = netdev_priv(dev); 5984 5985 if (netif_running(dev)) 5986 nv_close(dev); 5987 5988 /* 5989 * Restore the MAC so a kernel started by kexec won't get confused. 5990 * If we really go for poweroff, we must not restore the MAC, 5991 * otherwise the MAC for WOL will be reversed at least on some boards. 5992 */ 5993 if (system_state != SYSTEM_POWER_OFF) { 5994 nv_restore_mac_addr(pdev); 5995 } 5996 5997 pci_disable_device(pdev); 5998 /* 5999 * Apparently it is not possible to reinitialise from D3 hot, 6000 * only put the device into D3 if we really go for poweroff. 6001 */ 6002 if (system_state == SYSTEM_POWER_OFF) { 6003 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled)) 6004 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled); 6005 pci_set_power_state(pdev, PCI_D3hot); 6006 } 6007} 6008#else 6009#define nv_suspend NULL 6010#define nv_shutdown NULL 6011#define nv_resume NULL 6012#endif /* CONFIG_PM */ 6013 6014static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { 6015 { /* nForce Ethernet Controller */ 6016 PCI_DEVICE(0x10DE, 0x01C3), 6017 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6018 }, 6019 { /* nForce2 Ethernet Controller */ 6020 PCI_DEVICE(0x10DE, 0x0066), 6021 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6022 }, 6023 { /* nForce3 Ethernet Controller */ 6024 PCI_DEVICE(0x10DE, 0x00D6), 6025 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6026 }, 6027 { /* nForce3 Ethernet Controller */ 6028 PCI_DEVICE(0x10DE, 0x0086), 6029 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6030 }, 6031 { /* nForce3 Ethernet Controller */ 6032 PCI_DEVICE(0x10DE, 0x008C), 6033 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6034 }, 6035 { /* nForce3 Ethernet Controller */ 6036 PCI_DEVICE(0x10DE, 0x00E6), 6037 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6038 }, 6039 { /* nForce3 Ethernet Controller */ 6040 PCI_DEVICE(0x10DE, 0x00DF), 6041 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6042 }, 6043 { /* CK804 Ethernet Controller */ 6044 PCI_DEVICE(0x10DE, 0x0056), 6045 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6046 }, 6047 { /* CK804 Ethernet Controller */ 6048 PCI_DEVICE(0x10DE, 0x0057), 6049 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6050 }, 6051 { /* MCP04 Ethernet Controller */ 6052 PCI_DEVICE(0x10DE, 0x0037), 6053 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6054 }, 6055 { /* MCP04 Ethernet Controller */ 6056 PCI_DEVICE(0x10DE, 0x0038), 6057 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6058 }, 6059 { /* MCP51 Ethernet Controller */ 6060 PCI_DEVICE(0x10DE, 0x0268), 6061 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, 6062 }, 6063 { /* MCP51 Ethernet Controller */ 6064 PCI_DEVICE(0x10DE, 0x0269), 6065 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, 6066 }, 6067 { /* MCP55 Ethernet Controller */ 6068 PCI_DEVICE(0x10DE, 0x0372), 6069 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, 6070 }, 6071 { /* MCP55 Ethernet Controller */ 6072 PCI_DEVICE(0x10DE, 0x0373), 6073 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, 6074 }, 6075 { /* MCP61 Ethernet Controller */ 6076 PCI_DEVICE(0x10DE, 0x03E5), 6077 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6078 }, 6079 { /* MCP61 Ethernet Controller */ 6080 PCI_DEVICE(0x10DE, 0x03E6), 6081 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6082 }, 6083 { /* MCP61 Ethernet Controller */ 6084 PCI_DEVICE(0x10DE, 0x03EE), 6085 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6086 }, 6087 { /* MCP61 Ethernet Controller */ 6088 PCI_DEVICE(0x10DE, 0x03EF), 6089 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6090 }, 6091 { /* MCP65 Ethernet Controller */ 6092 PCI_DEVICE(0x10DE, 0x0450), 6093 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6094 }, 6095 { /* MCP65 Ethernet Controller */ 6096 PCI_DEVICE(0x10DE, 0x0451), 6097 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6098 }, 6099 { /* MCP65 Ethernet Controller */ 6100 PCI_DEVICE(0x10DE, 0x0452), 6101 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6102 }, 6103 { /* MCP65 Ethernet Controller */ 6104 PCI_DEVICE(0x10DE, 0x0453), 6105 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6106 }, 6107 { /* MCP67 Ethernet Controller */ 6108 PCI_DEVICE(0x10DE, 0x054C), 6109 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6110 }, 6111 { /* MCP67 Ethernet Controller */ 6112 PCI_DEVICE(0x10DE, 0x054D), 6113 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6114 }, 6115 { /* MCP67 Ethernet Controller */ 6116 PCI_DEVICE(0x10DE, 0x054E), 6117 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6118 }, 6119 { /* MCP67 Ethernet Controller */ 6120 PCI_DEVICE(0x10DE, 0x054F), 6121 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6122 }, 6123 { /* MCP73 Ethernet Controller */ 6124 PCI_DEVICE(0x10DE, 0x07DC), 6125 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6126 }, 6127 { /* MCP73 Ethernet Controller */ 6128 PCI_DEVICE(0x10DE, 0x07DD), 6129 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6130 }, 6131 { /* MCP73 Ethernet Controller */ 6132 PCI_DEVICE(0x10DE, 0x07DE), 6133 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6134 }, 6135 { /* MCP73 Ethernet Controller */ 6136 PCI_DEVICE(0x10DE, 0x07DF), 6137 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6138 }, 6139 { /* MCP77 Ethernet Controller */ 6140 PCI_DEVICE(0x10DE, 0x0760), 6141 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6142 }, 6143 { /* MCP77 Ethernet Controller */ 6144 PCI_DEVICE(0x10DE, 0x0761), 6145 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6146 }, 6147 { /* MCP77 Ethernet Controller */ 6148 PCI_DEVICE(0x10DE, 0x0762), 6149 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6150 }, 6151 { /* MCP77 Ethernet Controller */ 6152 PCI_DEVICE(0x10DE, 0x0763), 6153 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6154 }, 6155 { /* MCP79 Ethernet Controller */ 6156 PCI_DEVICE(0x10DE, 0x0AB0), 6157 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6158 }, 6159 { /* MCP79 Ethernet Controller */ 6160 PCI_DEVICE(0x10DE, 0x0AB1), 6161 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6162 }, 6163 { /* MCP79 Ethernet Controller */ 6164 PCI_DEVICE(0x10DE, 0x0AB2), 6165 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6166 }, 6167 { /* MCP79 Ethernet Controller */ 6168 PCI_DEVICE(0x10DE, 0x0AB3), 6169 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6170 }, 6171 { /* MCP89 Ethernet Controller */ 6172 PCI_DEVICE(0x10DE, 0x0D7D), 6173 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX, 6174 }, 6175 {0,}, 6176}; 6177 6178static struct pci_driver driver = { 6179 .name = DRV_NAME, 6180 .id_table = pci_tbl, 6181 .probe = nv_probe, 6182 .remove = __devexit_p(nv_remove), 6183 .suspend = nv_suspend, 6184 .resume = nv_resume, 6185 .shutdown = nv_shutdown, 6186}; 6187 6188static int __init init_nic(void) 6189{ 6190 return pci_register_driver(&driver); 6191} 6192 6193static void __exit exit_nic(void) 6194{ 6195 pci_unregister_driver(&driver); 6196} 6197 6198module_param(max_interrupt_work, int, 0); 6199MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 6200module_param(optimization_mode, int, 0); 6201MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load."); 6202module_param(poll_interval, int, 0); 6203MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 6204module_param(msi, int, 0); 6205MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); 6206module_param(msix, int, 0); 6207MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); 6208module_param(dma_64bit, int, 0); 6209MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 6210module_param(phy_cross, int, 0); 6211MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); 6212module_param(phy_power_down, int, 0); 6213MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0)."); 6214 6215MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 6216MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 6217MODULE_LICENSE("GPL"); 6218 6219MODULE_DEVICE_TABLE(pci, pci_tbl); 6220 6221module_init(init_nic); 6222module_exit(exit_nic);