Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.27-rc6 2494 lines 73 kB view raw
1/* 2 * tc35815.c: A TOSHIBA TC35815CF PCI 10/100Mbps ethernet driver for linux. 3 * 4 * Based on skelton.c by Donald Becker. 5 * 6 * This driver is a replacement of older and less maintained version. 7 * This is a header of the older version: 8 * -----<snip>----- 9 * Copyright 2001 MontaVista Software Inc. 10 * Author: MontaVista Software, Inc. 11 * ahennessy@mvista.com 12 * Copyright (C) 2000-2001 Toshiba Corporation 13 * static const char *version = 14 * "tc35815.c:v0.00 26/07/2000 by Toshiba Corporation\n"; 15 * -----<snip>----- 16 * 17 * This file is subject to the terms and conditions of the GNU General Public 18 * License. See the file "COPYING" in the main directory of this archive 19 * for more details. 20 * 21 * (C) Copyright TOSHIBA CORPORATION 2004-2005 22 * All Rights Reserved. 23 */ 24 25#ifdef TC35815_NAPI 26#define DRV_VERSION "1.37-NAPI" 27#else 28#define DRV_VERSION "1.37" 29#endif 30static const char *version = "tc35815.c:v" DRV_VERSION "\n"; 31#define MODNAME "tc35815" 32 33#include <linux/module.h> 34#include <linux/kernel.h> 35#include <linux/types.h> 36#include <linux/fcntl.h> 37#include <linux/interrupt.h> 38#include <linux/ioport.h> 39#include <linux/in.h> 40#include <linux/slab.h> 41#include <linux/string.h> 42#include <linux/spinlock.h> 43#include <linux/errno.h> 44#include <linux/init.h> 45#include <linux/netdevice.h> 46#include <linux/etherdevice.h> 47#include <linux/skbuff.h> 48#include <linux/delay.h> 49#include <linux/pci.h> 50#include <linux/phy.h> 51#include <linux/workqueue.h> 52#include <linux/platform_device.h> 53#include <asm/io.h> 54#include <asm/byteorder.h> 55 56/* First, a few definitions that the brave might change. */ 57 58#define GATHER_TXINT /* On-Demand Tx Interrupt */ 59#define WORKAROUND_LOSTCAR 60#define WORKAROUND_100HALF_PROMISC 61/* #define TC35815_USE_PACKEDBUFFER */ 62 63enum tc35815_chiptype { 64 TC35815CF = 0, 65 TC35815_NWU, 66 TC35815_TX4939, 67}; 68 69/* indexed by tc35815_chiptype, above */ 70static const struct { 71 const char *name; 72} chip_info[] __devinitdata = { 73 { "TOSHIBA TC35815CF 10/100BaseTX" }, 74 { "TOSHIBA TC35815 with Wake on LAN" }, 75 { "TOSHIBA TC35815/TX4939" }, 76}; 77 78static const struct pci_device_id tc35815_pci_tbl[] = { 79 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF }, 80 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU }, 81 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 }, 82 {0,} 83}; 84MODULE_DEVICE_TABLE(pci, tc35815_pci_tbl); 85 86/* see MODULE_PARM_DESC */ 87static struct tc35815_options { 88 int speed; 89 int duplex; 90} options; 91 92/* 93 * Registers 94 */ 95struct tc35815_regs { 96 __u32 DMA_Ctl; /* 0x00 */ 97 __u32 TxFrmPtr; 98 __u32 TxThrsh; 99 __u32 TxPollCtr; 100 __u32 BLFrmPtr; 101 __u32 RxFragSize; 102 __u32 Int_En; 103 __u32 FDA_Bas; 104 __u32 FDA_Lim; /* 0x20 */ 105 __u32 Int_Src; 106 __u32 unused0[2]; 107 __u32 PauseCnt; 108 __u32 RemPauCnt; 109 __u32 TxCtlFrmStat; 110 __u32 unused1; 111 __u32 MAC_Ctl; /* 0x40 */ 112 __u32 CAM_Ctl; 113 __u32 Tx_Ctl; 114 __u32 Tx_Stat; 115 __u32 Rx_Ctl; 116 __u32 Rx_Stat; 117 __u32 MD_Data; 118 __u32 MD_CA; 119 __u32 CAM_Adr; /* 0x60 */ 120 __u32 CAM_Data; 121 __u32 CAM_Ena; 122 __u32 PROM_Ctl; 123 __u32 PROM_Data; 124 __u32 Algn_Cnt; 125 __u32 CRC_Cnt; 126 __u32 Miss_Cnt; 127}; 128 129/* 130 * Bit assignments 131 */ 132/* DMA_Ctl bit asign ------------------------------------------------------- */ 133#define DMA_RxAlign 0x00c00000 /* 1:Reception Alignment */ 134#define DMA_RxAlign_1 0x00400000 135#define DMA_RxAlign_2 0x00800000 136#define DMA_RxAlign_3 0x00c00000 137#define DMA_M66EnStat 0x00080000 /* 1:66MHz Enable State */ 138#define DMA_IntMask 0x00040000 /* 1:Interupt mask */ 139#define DMA_SWIntReq 0x00020000 /* 1:Software Interrupt request */ 140#define DMA_TxWakeUp 0x00010000 /* 1:Transmit Wake Up */ 141#define DMA_RxBigE 0x00008000 /* 1:Receive Big Endian */ 142#define DMA_TxBigE 0x00004000 /* 1:Transmit Big Endian */ 143#define DMA_TestMode 0x00002000 /* 1:Test Mode */ 144#define DMA_PowrMgmnt 0x00001000 /* 1:Power Management */ 145#define DMA_DmBurst_Mask 0x000001fc /* DMA Burst size */ 146 147/* RxFragSize bit asign ---------------------------------------------------- */ 148#define RxFrag_EnPack 0x00008000 /* 1:Enable Packing */ 149#define RxFrag_MinFragMask 0x00000ffc /* Minimum Fragment */ 150 151/* MAC_Ctl bit asign ------------------------------------------------------- */ 152#define MAC_Link10 0x00008000 /* 1:Link Status 10Mbits */ 153#define MAC_EnMissRoll 0x00002000 /* 1:Enable Missed Roll */ 154#define MAC_MissRoll 0x00000400 /* 1:Missed Roll */ 155#define MAC_Loop10 0x00000080 /* 1:Loop 10 Mbps */ 156#define MAC_Conn_Auto 0x00000000 /*00:Connection mode (Automatic) */ 157#define MAC_Conn_10M 0x00000020 /*01: (10Mbps endec)*/ 158#define MAC_Conn_Mll 0x00000040 /*10: (Mll clock) */ 159#define MAC_MacLoop 0x00000010 /* 1:MAC Loopback */ 160#define MAC_FullDup 0x00000008 /* 1:Full Duplex 0:Half Duplex */ 161#define MAC_Reset 0x00000004 /* 1:Software Reset */ 162#define MAC_HaltImm 0x00000002 /* 1:Halt Immediate */ 163#define MAC_HaltReq 0x00000001 /* 1:Halt request */ 164 165/* PROM_Ctl bit asign ------------------------------------------------------ */ 166#define PROM_Busy 0x00008000 /* 1:Busy (Start Operation) */ 167#define PROM_Read 0x00004000 /*10:Read operation */ 168#define PROM_Write 0x00002000 /*01:Write operation */ 169#define PROM_Erase 0x00006000 /*11:Erase operation */ 170 /*00:Enable or Disable Writting, */ 171 /* as specified in PROM_Addr. */ 172#define PROM_Addr_Ena 0x00000030 /*11xxxx:PROM Write enable */ 173 /*00xxxx: disable */ 174 175/* CAM_Ctl bit asign ------------------------------------------------------- */ 176#define CAM_CompEn 0x00000010 /* 1:CAM Compare Enable */ 177#define CAM_NegCAM 0x00000008 /* 1:Reject packets CAM recognizes,*/ 178 /* accept other */ 179#define CAM_BroadAcc 0x00000004 /* 1:Broadcast assept */ 180#define CAM_GroupAcc 0x00000002 /* 1:Multicast assept */ 181#define CAM_StationAcc 0x00000001 /* 1:unicast accept */ 182 183/* CAM_Ena bit asign ------------------------------------------------------- */ 184#define CAM_ENTRY_MAX 21 /* CAM Data entry max count */ 185#define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */ 186#define CAM_Ena_Bit(index) (1 << (index)) 187#define CAM_ENTRY_DESTINATION 0 188#define CAM_ENTRY_SOURCE 1 189#define CAM_ENTRY_MACCTL 20 190 191/* Tx_Ctl bit asign -------------------------------------------------------- */ 192#define Tx_En 0x00000001 /* 1:Transmit enable */ 193#define Tx_TxHalt 0x00000002 /* 1:Transmit Halt Request */ 194#define Tx_NoPad 0x00000004 /* 1:Suppress Padding */ 195#define Tx_NoCRC 0x00000008 /* 1:Suppress Padding */ 196#define Tx_FBack 0x00000010 /* 1:Fast Back-off */ 197#define Tx_EnUnder 0x00000100 /* 1:Enable Underrun */ 198#define Tx_EnExDefer 0x00000200 /* 1:Enable Excessive Deferral */ 199#define Tx_EnLCarr 0x00000400 /* 1:Enable Lost Carrier */ 200#define Tx_EnExColl 0x00000800 /* 1:Enable Excessive Collision */ 201#define Tx_EnLateColl 0x00001000 /* 1:Enable Late Collision */ 202#define Tx_EnTxPar 0x00002000 /* 1:Enable Transmit Parity */ 203#define Tx_EnComp 0x00004000 /* 1:Enable Completion */ 204 205/* Tx_Stat bit asign ------------------------------------------------------- */ 206#define Tx_TxColl_MASK 0x0000000F /* Tx Collision Count */ 207#define Tx_ExColl 0x00000010 /* Excessive Collision */ 208#define Tx_TXDefer 0x00000020 /* Transmit Defered */ 209#define Tx_Paused 0x00000040 /* Transmit Paused */ 210#define Tx_IntTx 0x00000080 /* Interrupt on Tx */ 211#define Tx_Under 0x00000100 /* Underrun */ 212#define Tx_Defer 0x00000200 /* Deferral */ 213#define Tx_NCarr 0x00000400 /* No Carrier */ 214#define Tx_10Stat 0x00000800 /* 10Mbps Status */ 215#define Tx_LateColl 0x00001000 /* Late Collision */ 216#define Tx_TxPar 0x00002000 /* Tx Parity Error */ 217#define Tx_Comp 0x00004000 /* Completion */ 218#define Tx_Halted 0x00008000 /* Tx Halted */ 219#define Tx_SQErr 0x00010000 /* Signal Quality Error(SQE) */ 220 221/* Rx_Ctl bit asign -------------------------------------------------------- */ 222#define Rx_EnGood 0x00004000 /* 1:Enable Good */ 223#define Rx_EnRxPar 0x00002000 /* 1:Enable Receive Parity */ 224#define Rx_EnLongErr 0x00000800 /* 1:Enable Long Error */ 225#define Rx_EnOver 0x00000400 /* 1:Enable OverFlow */ 226#define Rx_EnCRCErr 0x00000200 /* 1:Enable CRC Error */ 227#define Rx_EnAlign 0x00000100 /* 1:Enable Alignment */ 228#define Rx_IgnoreCRC 0x00000040 /* 1:Ignore CRC Value */ 229#define Rx_StripCRC 0x00000010 /* 1:Strip CRC Value */ 230#define Rx_ShortEn 0x00000008 /* 1:Short Enable */ 231#define Rx_LongEn 0x00000004 /* 1:Long Enable */ 232#define Rx_RxHalt 0x00000002 /* 1:Receive Halt Request */ 233#define Rx_RxEn 0x00000001 /* 1:Receive Intrrupt Enable */ 234 235/* Rx_Stat bit asign ------------------------------------------------------- */ 236#define Rx_Halted 0x00008000 /* Rx Halted */ 237#define Rx_Good 0x00004000 /* Rx Good */ 238#define Rx_RxPar 0x00002000 /* Rx Parity Error */ 239 /* 0x00001000 not use */ 240#define Rx_LongErr 0x00000800 /* Rx Long Error */ 241#define Rx_Over 0x00000400 /* Rx Overflow */ 242#define Rx_CRCErr 0x00000200 /* Rx CRC Error */ 243#define Rx_Align 0x00000100 /* Rx Alignment Error */ 244#define Rx_10Stat 0x00000080 /* Rx 10Mbps Status */ 245#define Rx_IntRx 0x00000040 /* Rx Interrupt */ 246#define Rx_CtlRecd 0x00000020 /* Rx Control Receive */ 247 248#define Rx_Stat_Mask 0x0000EFC0 /* Rx All Status Mask */ 249 250/* Int_En bit asign -------------------------------------------------------- */ 251#define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */ 252#define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Ctl Complete Enable */ 253#define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */ 254#define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */ 255#define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */ 256#define Int_DParErrEn 0x00000040 /* 1:Detected Parity Error Enable */ 257#define Int_SSysErrEn 0x00000020 /* 1:Signalled System Error Enable */ 258#define Int_RMasAbtEn 0x00000010 /* 1:Received Master Abort Enable */ 259#define Int_RTargAbtEn 0x00000008 /* 1:Received Target Abort Enable */ 260#define Int_STargAbtEn 0x00000004 /* 1:Signalled Target Abort Enable */ 261#define Int_BLExEn 0x00000002 /* 1:Buffer List Exhausted Enable */ 262#define Int_FDAExEn 0x00000001 /* 1:Free Descriptor Area */ 263 /* Exhausted Enable */ 264 265/* Int_Src bit asign ------------------------------------------------------- */ 266#define Int_NRabt 0x00004000 /* 1:Non Recoverable error */ 267#define Int_DmParErrStat 0x00002000 /* 1:DMA Parity Error & Clear */ 268#define Int_BLEx 0x00001000 /* 1:Buffer List Empty & Clear */ 269#define Int_FDAEx 0x00000800 /* 1:FDA Empty & Clear */ 270#define Int_IntNRAbt 0x00000400 /* 1:Non Recoverable Abort */ 271#define Int_IntCmp 0x00000200 /* 1:MAC control packet complete */ 272#define Int_IntExBD 0x00000100 /* 1:Interrupt Extra BD & Clear */ 273#define Int_DmParErr 0x00000080 /* 1:DMA Parity Error & Clear */ 274#define Int_IntEarNot 0x00000040 /* 1:Receive Data write & Clear */ 275#define Int_SWInt 0x00000020 /* 1:Software request & Clear */ 276#define Int_IntBLEx 0x00000010 /* 1:Buffer List Empty & Clear */ 277#define Int_IntFDAEx 0x00000008 /* 1:FDA Empty & Clear */ 278#define Int_IntPCI 0x00000004 /* 1:PCI controller & Clear */ 279#define Int_IntMacRx 0x00000002 /* 1:Rx controller & Clear */ 280#define Int_IntMacTx 0x00000001 /* 1:Tx controller & Clear */ 281 282/* MD_CA bit asign --------------------------------------------------------- */ 283#define MD_CA_PreSup 0x00001000 /* 1:Preamble Supress */ 284#define MD_CA_Busy 0x00000800 /* 1:Busy (Start Operation) */ 285#define MD_CA_Wr 0x00000400 /* 1:Write 0:Read */ 286 287 288/* 289 * Descriptors 290 */ 291 292/* Frame descripter */ 293struct FDesc { 294 volatile __u32 FDNext; 295 volatile __u32 FDSystem; 296 volatile __u32 FDStat; 297 volatile __u32 FDCtl; 298}; 299 300/* Buffer descripter */ 301struct BDesc { 302 volatile __u32 BuffData; 303 volatile __u32 BDCtl; 304}; 305 306#define FD_ALIGN 16 307 308/* Frame Descripter bit asign ---------------------------------------------- */ 309#define FD_FDLength_MASK 0x0000FFFF /* Length MASK */ 310#define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */ 311#define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */ 312#define FD_FrmOpt_BigEndian 0x40000000 /* Tx/Rx */ 313#define FD_FrmOpt_IntTx 0x20000000 /* Tx only */ 314#define FD_FrmOpt_NoCRC 0x10000000 /* Tx only */ 315#define FD_FrmOpt_NoPadding 0x08000000 /* Tx only */ 316#define FD_FrmOpt_Packing 0x04000000 /* Rx only */ 317#define FD_CownsFD 0x80000000 /* FD Controller owner bit */ 318#define FD_Next_EOL 0x00000001 /* FD EOL indicator */ 319#define FD_BDCnt_SHIFT 16 320 321/* Buffer Descripter bit asign --------------------------------------------- */ 322#define BD_BuffLength_MASK 0x0000FFFF /* Recieve Data Size */ 323#define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */ 324#define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */ 325#define BD_CownsBD 0x80000000 /* BD Controller owner bit */ 326#define BD_RxBDID_SHIFT 16 327#define BD_RxBDSeqN_SHIFT 24 328 329 330/* Some useful constants. */ 331#undef NO_CHECK_CARRIER /* Does not check No-Carrier with TP */ 332 333#ifdef NO_CHECK_CARRIER 334#define TX_CTL_CMD (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \ 335 Tx_EnExColl | Tx_EnExDefer | Tx_EnUnder | \ 336 Tx_En) /* maybe 0x7b01 */ 337#else 338#define TX_CTL_CMD (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \ 339 Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \ 340 Tx_En) /* maybe 0x7b01 */ 341#endif 342#define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \ 343 | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */ 344#define INT_EN_CMD (Int_NRAbtEn | \ 345 Int_DmParErrEn | Int_DParDEn | Int_DParErrEn | \ 346 Int_SSysErrEn | Int_RMasAbtEn | Int_RTargAbtEn | \ 347 Int_STargAbtEn | \ 348 Int_BLExEn | Int_FDAExEn) /* maybe 0xb7f*/ 349#define DMA_CTL_CMD DMA_BURST_SIZE 350#define HAVE_DMA_RXALIGN(lp) likely((lp)->chiptype != TC35815CF) 351 352/* Tuning parameters */ 353#define DMA_BURST_SIZE 32 354#define TX_THRESHOLD 1024 355/* used threshold with packet max byte for low pci transfer ability.*/ 356#define TX_THRESHOLD_MAX 1536 357/* setting threshold max value when overrun error occured this count. */ 358#define TX_THRESHOLD_KEEP_LIMIT 10 359 360/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */ 361#ifdef TC35815_USE_PACKEDBUFFER 362#define FD_PAGE_NUM 2 363#define RX_BUF_NUM 8 /* >= 2 */ 364#define RX_FD_NUM 250 /* >= 32 */ 365#define TX_FD_NUM 128 366#define RX_BUF_SIZE PAGE_SIZE 367#else /* TC35815_USE_PACKEDBUFFER */ 368#define FD_PAGE_NUM 4 369#define RX_BUF_NUM 128 /* < 256 */ 370#define RX_FD_NUM 256 /* >= 32 */ 371#define TX_FD_NUM 128 372#if RX_CTL_CMD & Rx_LongEn 373#define RX_BUF_SIZE PAGE_SIZE 374#elif RX_CTL_CMD & Rx_StripCRC 375#define RX_BUF_SIZE ALIGN(ETH_FRAME_LEN + 4 + 2, 32) /* +2: reserve */ 376#else 377#define RX_BUF_SIZE ALIGN(ETH_FRAME_LEN + 2, 32) /* +2: reserve */ 378#endif 379#endif /* TC35815_USE_PACKEDBUFFER */ 380#define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */ 381#define NAPI_WEIGHT 16 382 383struct TxFD { 384 struct FDesc fd; 385 struct BDesc bd; 386 struct BDesc unused; 387}; 388 389struct RxFD { 390 struct FDesc fd; 391 struct BDesc bd[0]; /* variable length */ 392}; 393 394struct FrFD { 395 struct FDesc fd; 396 struct BDesc bd[RX_BUF_NUM]; 397}; 398 399 400#define tc_readl(addr) ioread32(addr) 401#define tc_writel(d, addr) iowrite32(d, addr) 402 403#define TC35815_TX_TIMEOUT msecs_to_jiffies(400) 404 405/* Information that need to be kept for each controller. */ 406struct tc35815_local { 407 struct pci_dev *pci_dev; 408 409 struct net_device *dev; 410 struct napi_struct napi; 411 412 /* statistics */ 413 struct { 414 int max_tx_qlen; 415 int tx_ints; 416 int rx_ints; 417 int tx_underrun; 418 } lstats; 419 420 /* Tx control lock. This protects the transmit buffer ring 421 * state along with the "tx full" state of the driver. This 422 * means all netif_queue flow control actions are protected 423 * by this lock as well. 424 */ 425 spinlock_t lock; 426 427 struct mii_bus mii_bus; 428 struct phy_device *phy_dev; 429 int duplex; 430 int speed; 431 int link; 432 struct work_struct restart_work; 433 434 /* 435 * Transmitting: Batch Mode. 436 * 1 BD in 1 TxFD. 437 * Receiving: Packing Mode. (TC35815_USE_PACKEDBUFFER) 438 * 1 circular FD for Free Buffer List. 439 * RX_BUF_NUM BD in Free Buffer FD. 440 * One Free Buffer BD has PAGE_SIZE data buffer. 441 * Or Non-Packing Mode. 442 * 1 circular FD for Free Buffer List. 443 * RX_BUF_NUM BD in Free Buffer FD. 444 * One Free Buffer BD has ETH_FRAME_LEN data buffer. 445 */ 446 void *fd_buf; /* for TxFD, RxFD, FrFD */ 447 dma_addr_t fd_buf_dma; 448 struct TxFD *tfd_base; 449 unsigned int tfd_start; 450 unsigned int tfd_end; 451 struct RxFD *rfd_base; 452 struct RxFD *rfd_limit; 453 struct RxFD *rfd_cur; 454 struct FrFD *fbl_ptr; 455#ifdef TC35815_USE_PACKEDBUFFER 456 unsigned char fbl_curid; 457 void *data_buf[RX_BUF_NUM]; /* packing */ 458 dma_addr_t data_buf_dma[RX_BUF_NUM]; 459 struct { 460 struct sk_buff *skb; 461 dma_addr_t skb_dma; 462 } tx_skbs[TX_FD_NUM]; 463#else 464 unsigned int fbl_count; 465 struct { 466 struct sk_buff *skb; 467 dma_addr_t skb_dma; 468 } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM]; 469#endif 470 u32 msg_enable; 471 enum tc35815_chiptype chiptype; 472}; 473 474static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt) 475{ 476 return lp->fd_buf_dma + ((u8 *)virt - (u8 *)lp->fd_buf); 477} 478#ifdef DEBUG 479static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus) 480{ 481 return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma)); 482} 483#endif 484#ifdef TC35815_USE_PACKEDBUFFER 485static inline void *rxbuf_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus) 486{ 487 int i; 488 for (i = 0; i < RX_BUF_NUM; i++) { 489 if (bus >= lp->data_buf_dma[i] && 490 bus < lp->data_buf_dma[i] + PAGE_SIZE) 491 return (void *)((u8 *)lp->data_buf[i] + 492 (bus - lp->data_buf_dma[i])); 493 } 494 return NULL; 495} 496 497#define TC35815_DMA_SYNC_ONDEMAND 498static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle) 499{ 500#ifdef TC35815_DMA_SYNC_ONDEMAND 501 void *buf; 502 /* pci_map + pci_dma_sync will be more effective than 503 * pci_alloc_consistent on some archs. */ 504 buf = (void *)__get_free_page(GFP_ATOMIC); 505 if (!buf) 506 return NULL; 507 *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, 508 PCI_DMA_FROMDEVICE); 509 if (pci_dma_mapping_error(hwdev, *dma_handle)) { 510 free_page((unsigned long)buf); 511 return NULL; 512 } 513 return buf; 514#else 515 return pci_alloc_consistent(hwdev, PAGE_SIZE, dma_handle); 516#endif 517} 518 519static void free_rxbuf_page(struct pci_dev *hwdev, void *buf, dma_addr_t dma_handle) 520{ 521#ifdef TC35815_DMA_SYNC_ONDEMAND 522 pci_unmap_single(hwdev, dma_handle, PAGE_SIZE, PCI_DMA_FROMDEVICE); 523 free_page((unsigned long)buf); 524#else 525 pci_free_consistent(hwdev, PAGE_SIZE, buf, dma_handle); 526#endif 527} 528#else /* TC35815_USE_PACKEDBUFFER */ 529static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev, 530 struct pci_dev *hwdev, 531 dma_addr_t *dma_handle) 532{ 533 struct sk_buff *skb; 534 skb = dev_alloc_skb(RX_BUF_SIZE); 535 if (!skb) 536 return NULL; 537 *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE, 538 PCI_DMA_FROMDEVICE); 539 if (pci_dma_mapping_error(hwdev, *dma_handle)) { 540 dev_kfree_skb_any(skb); 541 return NULL; 542 } 543 skb_reserve(skb, 2); /* make IP header 4byte aligned */ 544 return skb; 545} 546 547static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_t dma_handle) 548{ 549 pci_unmap_single(hwdev, dma_handle, RX_BUF_SIZE, 550 PCI_DMA_FROMDEVICE); 551 dev_kfree_skb_any(skb); 552} 553#endif /* TC35815_USE_PACKEDBUFFER */ 554 555/* Index to functions, as function prototypes. */ 556 557static int tc35815_open(struct net_device *dev); 558static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev); 559static irqreturn_t tc35815_interrupt(int irq, void *dev_id); 560#ifdef TC35815_NAPI 561static int tc35815_rx(struct net_device *dev, int limit); 562static int tc35815_poll(struct napi_struct *napi, int budget); 563#else 564static void tc35815_rx(struct net_device *dev); 565#endif 566static void tc35815_txdone(struct net_device *dev); 567static int tc35815_close(struct net_device *dev); 568static struct net_device_stats *tc35815_get_stats(struct net_device *dev); 569static void tc35815_set_multicast_list(struct net_device *dev); 570static void tc35815_tx_timeout(struct net_device *dev); 571static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 572#ifdef CONFIG_NET_POLL_CONTROLLER 573static void tc35815_poll_controller(struct net_device *dev); 574#endif 575static const struct ethtool_ops tc35815_ethtool_ops; 576 577/* Example routines you must write ;->. */ 578static void tc35815_chip_reset(struct net_device *dev); 579static void tc35815_chip_init(struct net_device *dev); 580 581#ifdef DEBUG 582static void panic_queues(struct net_device *dev); 583#endif 584 585static void tc35815_restart_work(struct work_struct *work); 586 587static int tc_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 588{ 589 struct net_device *dev = bus->priv; 590 struct tc35815_regs __iomem *tr = 591 (struct tc35815_regs __iomem *)dev->base_addr; 592 unsigned long timeout = jiffies + 10; 593 594 tc_writel(MD_CA_Busy | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA); 595 while (tc_readl(&tr->MD_CA) & MD_CA_Busy) { 596 if (time_after(jiffies, timeout)) 597 return -EIO; 598 cpu_relax(); 599 } 600 return tc_readl(&tr->MD_Data) & 0xffff; 601} 602 603static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val) 604{ 605 struct net_device *dev = bus->priv; 606 struct tc35815_regs __iomem *tr = 607 (struct tc35815_regs __iomem *)dev->base_addr; 608 unsigned long timeout = jiffies + 10; 609 610 tc_writel(val, &tr->MD_Data); 611 tc_writel(MD_CA_Busy | MD_CA_Wr | (mii_id << 5) | (regnum & 0x1f), 612 &tr->MD_CA); 613 while (tc_readl(&tr->MD_CA) & MD_CA_Busy) { 614 if (time_after(jiffies, timeout)) 615 return -EIO; 616 cpu_relax(); 617 } 618 return 0; 619} 620 621static void tc_handle_link_change(struct net_device *dev) 622{ 623 struct tc35815_local *lp = netdev_priv(dev); 624 struct phy_device *phydev = lp->phy_dev; 625 unsigned long flags; 626 int status_change = 0; 627 628 spin_lock_irqsave(&lp->lock, flags); 629 if (phydev->link && 630 (lp->speed != phydev->speed || lp->duplex != phydev->duplex)) { 631 struct tc35815_regs __iomem *tr = 632 (struct tc35815_regs __iomem *)dev->base_addr; 633 u32 reg; 634 635 reg = tc_readl(&tr->MAC_Ctl); 636 reg |= MAC_HaltReq; 637 tc_writel(reg, &tr->MAC_Ctl); 638 if (phydev->duplex == DUPLEX_FULL) 639 reg |= MAC_FullDup; 640 else 641 reg &= ~MAC_FullDup; 642 tc_writel(reg, &tr->MAC_Ctl); 643 reg &= ~MAC_HaltReq; 644 tc_writel(reg, &tr->MAC_Ctl); 645 646 /* 647 * TX4939 PCFG.SPEEDn bit will be changed on 648 * NETDEV_CHANGE event. 649 */ 650 651#if !defined(NO_CHECK_CARRIER) && defined(WORKAROUND_LOSTCAR) 652 /* 653 * WORKAROUND: enable LostCrS only if half duplex 654 * operation. 655 * (TX4939 does not have EnLCarr) 656 */ 657 if (phydev->duplex == DUPLEX_HALF && 658 lp->chiptype != TC35815_TX4939) 659 tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, 660 &tr->Tx_Ctl); 661#endif 662 663 lp->speed = phydev->speed; 664 lp->duplex = phydev->duplex; 665 status_change = 1; 666 } 667 668 if (phydev->link != lp->link) { 669 if (phydev->link) { 670#ifdef WORKAROUND_100HALF_PROMISC 671 /* delayed promiscuous enabling */ 672 if (dev->flags & IFF_PROMISC) 673 tc35815_set_multicast_list(dev); 674#endif 675 } else { 676 lp->speed = 0; 677 lp->duplex = -1; 678 } 679 lp->link = phydev->link; 680 681 status_change = 1; 682 } 683 spin_unlock_irqrestore(&lp->lock, flags); 684 685 if (status_change && netif_msg_link(lp)) { 686 phy_print_status(phydev); 687#ifdef DEBUG 688 printk(KERN_DEBUG 689 "%s: MII BMCR %04x BMSR %04x LPA %04x\n", 690 dev->name, 691 phy_read(phydev, MII_BMCR), 692 phy_read(phydev, MII_BMSR), 693 phy_read(phydev, MII_LPA)); 694#endif 695 } 696} 697 698static int tc_mii_probe(struct net_device *dev) 699{ 700 struct tc35815_local *lp = netdev_priv(dev); 701 struct phy_device *phydev = NULL; 702 int phy_addr; 703 u32 dropmask; 704 705 /* find the first phy */ 706 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { 707 if (lp->mii_bus.phy_map[phy_addr]) { 708 if (phydev) { 709 printk(KERN_ERR "%s: multiple PHYs found\n", 710 dev->name); 711 return -EINVAL; 712 } 713 phydev = lp->mii_bus.phy_map[phy_addr]; 714 break; 715 } 716 } 717 718 if (!phydev) { 719 printk(KERN_ERR "%s: no PHY found\n", dev->name); 720 return -ENODEV; 721 } 722 723 /* attach the mac to the phy */ 724 phydev = phy_connect(dev, phydev->dev.bus_id, 725 &tc_handle_link_change, 0, 726 lp->chiptype == TC35815_TX4939 ? 727 PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); 728 if (IS_ERR(phydev)) { 729 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 730 return PTR_ERR(phydev); 731 } 732 printk(KERN_INFO "%s: attached PHY driver [%s] " 733 "(mii_bus:phy_addr=%s, id=%x)\n", 734 dev->name, phydev->drv->name, phydev->dev.bus_id, 735 phydev->phy_id); 736 737 /* mask with MAC supported features */ 738 phydev->supported &= PHY_BASIC_FEATURES; 739 dropmask = 0; 740 if (options.speed == 10) 741 dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; 742 else if (options.speed == 100) 743 dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full; 744 if (options.duplex == 1) 745 dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full; 746 else if (options.duplex == 2) 747 dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half; 748 phydev->supported &= ~dropmask; 749 phydev->advertising = phydev->supported; 750 751 lp->link = 0; 752 lp->speed = 0; 753 lp->duplex = -1; 754 lp->phy_dev = phydev; 755 756 return 0; 757} 758 759static int tc_mii_init(struct net_device *dev) 760{ 761 struct tc35815_local *lp = netdev_priv(dev); 762 int err; 763 int i; 764 765 lp->mii_bus.name = "tc35815_mii_bus"; 766 lp->mii_bus.read = tc_mdio_read; 767 lp->mii_bus.write = tc_mdio_write; 768 snprintf(lp->mii_bus.id, MII_BUS_ID_SIZE, "%x", 769 (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn); 770 lp->mii_bus.priv = dev; 771 lp->mii_bus.dev = &lp->pci_dev->dev; 772 lp->mii_bus.irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 773 if (!lp->mii_bus.irq) { 774 err = -ENOMEM; 775 goto err_out; 776 } 777 778 for (i = 0; i < PHY_MAX_ADDR; i++) 779 lp->mii_bus.irq[i] = PHY_POLL; 780 781 err = mdiobus_register(&lp->mii_bus); 782 if (err) 783 goto err_out_free_mdio_irq; 784 err = tc_mii_probe(dev); 785 if (err) 786 goto err_out_unregister_bus; 787 return 0; 788 789err_out_unregister_bus: 790 mdiobus_unregister(&lp->mii_bus); 791err_out_free_mdio_irq: 792 kfree(lp->mii_bus.irq); 793err_out: 794 return err; 795} 796 797#ifdef CONFIG_CPU_TX49XX 798/* 799 * Find a platform_device providing a MAC address. The platform code 800 * should provide a "tc35815-mac" device with a MAC address in its 801 * platform_data. 802 */ 803static int __devinit tc35815_mac_match(struct device *dev, void *data) 804{ 805 struct platform_device *plat_dev = to_platform_device(dev); 806 struct pci_dev *pci_dev = data; 807 unsigned int id = pci_dev->irq; 808 return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id; 809} 810 811static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev) 812{ 813 struct tc35815_local *lp = netdev_priv(dev); 814 struct device *pd = bus_find_device(&platform_bus_type, NULL, 815 lp->pci_dev, tc35815_mac_match); 816 if (pd) { 817 if (pd->platform_data) 818 memcpy(dev->dev_addr, pd->platform_data, ETH_ALEN); 819 put_device(pd); 820 return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV; 821 } 822 return -ENODEV; 823} 824#else 825static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev) 826{ 827 return -ENODEV; 828} 829#endif 830 831static int __devinit tc35815_init_dev_addr(struct net_device *dev) 832{ 833 struct tc35815_regs __iomem *tr = 834 (struct tc35815_regs __iomem *)dev->base_addr; 835 int i; 836 837 while (tc_readl(&tr->PROM_Ctl) & PROM_Busy) 838 ; 839 for (i = 0; i < 6; i += 2) { 840 unsigned short data; 841 tc_writel(PROM_Busy | PROM_Read | (i / 2 + 2), &tr->PROM_Ctl); 842 while (tc_readl(&tr->PROM_Ctl) & PROM_Busy) 843 ; 844 data = tc_readl(&tr->PROM_Data); 845 dev->dev_addr[i] = data & 0xff; 846 dev->dev_addr[i+1] = data >> 8; 847 } 848 if (!is_valid_ether_addr(dev->dev_addr)) 849 return tc35815_read_plat_dev_addr(dev); 850 return 0; 851} 852 853static int __devinit tc35815_init_one(struct pci_dev *pdev, 854 const struct pci_device_id *ent) 855{ 856 void __iomem *ioaddr = NULL; 857 struct net_device *dev; 858 struct tc35815_local *lp; 859 int rc; 860 DECLARE_MAC_BUF(mac); 861 862 static int printed_version; 863 if (!printed_version++) { 864 printk(version); 865 dev_printk(KERN_DEBUG, &pdev->dev, 866 "speed:%d duplex:%d\n", 867 options.speed, options.duplex); 868 } 869 870 if (!pdev->irq) { 871 dev_warn(&pdev->dev, "no IRQ assigned.\n"); 872 return -ENODEV; 873 } 874 875 /* dev zeroed in alloc_etherdev */ 876 dev = alloc_etherdev(sizeof(*lp)); 877 if (dev == NULL) { 878 dev_err(&pdev->dev, "unable to alloc new ethernet\n"); 879 return -ENOMEM; 880 } 881 SET_NETDEV_DEV(dev, &pdev->dev); 882 lp = netdev_priv(dev); 883 lp->dev = dev; 884 885 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 886 rc = pcim_enable_device(pdev); 887 if (rc) 888 goto err_out; 889 rc = pcim_iomap_regions(pdev, 1 << 1, MODNAME); 890 if (rc) 891 goto err_out; 892 pci_set_master(pdev); 893 ioaddr = pcim_iomap_table(pdev)[1]; 894 895 /* Initialize the device structure. */ 896 dev->open = tc35815_open; 897 dev->hard_start_xmit = tc35815_send_packet; 898 dev->stop = tc35815_close; 899 dev->get_stats = tc35815_get_stats; 900 dev->set_multicast_list = tc35815_set_multicast_list; 901 dev->do_ioctl = tc35815_ioctl; 902 dev->ethtool_ops = &tc35815_ethtool_ops; 903 dev->tx_timeout = tc35815_tx_timeout; 904 dev->watchdog_timeo = TC35815_TX_TIMEOUT; 905#ifdef TC35815_NAPI 906 netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); 907#endif 908#ifdef CONFIG_NET_POLL_CONTROLLER 909 dev->poll_controller = tc35815_poll_controller; 910#endif 911 912 dev->irq = pdev->irq; 913 dev->base_addr = (unsigned long)ioaddr; 914 915 INIT_WORK(&lp->restart_work, tc35815_restart_work); 916 spin_lock_init(&lp->lock); 917 lp->pci_dev = pdev; 918 lp->chiptype = ent->driver_data; 919 920 lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK; 921 pci_set_drvdata(pdev, dev); 922 923 /* Soft reset the chip. */ 924 tc35815_chip_reset(dev); 925 926 /* Retrieve the ethernet address. */ 927 if (tc35815_init_dev_addr(dev)) { 928 dev_warn(&pdev->dev, "not valid ether addr\n"); 929 random_ether_addr(dev->dev_addr); 930 } 931 932 rc = register_netdev(dev); 933 if (rc) 934 goto err_out; 935 936 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 937 printk(KERN_INFO "%s: %s at 0x%lx, %s, IRQ %d\n", 938 dev->name, 939 chip_info[ent->driver_data].name, 940 dev->base_addr, 941 print_mac(mac, dev->dev_addr), 942 dev->irq); 943 944 rc = tc_mii_init(dev); 945 if (rc) 946 goto err_out_unregister; 947 948 return 0; 949 950err_out_unregister: 951 unregister_netdev(dev); 952err_out: 953 free_netdev(dev); 954 return rc; 955} 956 957 958static void __devexit tc35815_remove_one(struct pci_dev *pdev) 959{ 960 struct net_device *dev = pci_get_drvdata(pdev); 961 struct tc35815_local *lp = netdev_priv(dev); 962 963 phy_disconnect(lp->phy_dev); 964 mdiobus_unregister(&lp->mii_bus); 965 kfree(lp->mii_bus.irq); 966 unregister_netdev(dev); 967 free_netdev(dev); 968 pci_set_drvdata(pdev, NULL); 969} 970 971static int 972tc35815_init_queues(struct net_device *dev) 973{ 974 struct tc35815_local *lp = netdev_priv(dev); 975 int i; 976 unsigned long fd_addr; 977 978 if (!lp->fd_buf) { 979 BUG_ON(sizeof(struct FDesc) + 980 sizeof(struct BDesc) * RX_BUF_NUM + 981 sizeof(struct FDesc) * RX_FD_NUM + 982 sizeof(struct TxFD) * TX_FD_NUM > 983 PAGE_SIZE * FD_PAGE_NUM); 984 985 lp->fd_buf = pci_alloc_consistent(lp->pci_dev, 986 PAGE_SIZE * FD_PAGE_NUM, 987 &lp->fd_buf_dma); 988 if (!lp->fd_buf) 989 return -ENOMEM; 990 for (i = 0; i < RX_BUF_NUM; i++) { 991#ifdef TC35815_USE_PACKEDBUFFER 992 lp->data_buf[i] = 993 alloc_rxbuf_page(lp->pci_dev, 994 &lp->data_buf_dma[i]); 995 if (!lp->data_buf[i]) { 996 while (--i >= 0) { 997 free_rxbuf_page(lp->pci_dev, 998 lp->data_buf[i], 999 lp->data_buf_dma[i]); 1000 lp->data_buf[i] = NULL; 1001 } 1002 pci_free_consistent(lp->pci_dev, 1003 PAGE_SIZE * FD_PAGE_NUM, 1004 lp->fd_buf, 1005 lp->fd_buf_dma); 1006 lp->fd_buf = NULL; 1007 return -ENOMEM; 1008 } 1009#else 1010 lp->rx_skbs[i].skb = 1011 alloc_rxbuf_skb(dev, lp->pci_dev, 1012 &lp->rx_skbs[i].skb_dma); 1013 if (!lp->rx_skbs[i].skb) { 1014 while (--i >= 0) { 1015 free_rxbuf_skb(lp->pci_dev, 1016 lp->rx_skbs[i].skb, 1017 lp->rx_skbs[i].skb_dma); 1018 lp->rx_skbs[i].skb = NULL; 1019 } 1020 pci_free_consistent(lp->pci_dev, 1021 PAGE_SIZE * FD_PAGE_NUM, 1022 lp->fd_buf, 1023 lp->fd_buf_dma); 1024 lp->fd_buf = NULL; 1025 return -ENOMEM; 1026 } 1027#endif 1028 } 1029 printk(KERN_DEBUG "%s: FD buf %p DataBuf", 1030 dev->name, lp->fd_buf); 1031#ifdef TC35815_USE_PACKEDBUFFER 1032 printk(" DataBuf"); 1033 for (i = 0; i < RX_BUF_NUM; i++) 1034 printk(" %p", lp->data_buf[i]); 1035#endif 1036 printk("\n"); 1037 } else { 1038 for (i = 0; i < FD_PAGE_NUM; i++) 1039 clear_page((void *)((unsigned long)lp->fd_buf + 1040 i * PAGE_SIZE)); 1041 } 1042 fd_addr = (unsigned long)lp->fd_buf; 1043 1044 /* Free Descriptors (for Receive) */ 1045 lp->rfd_base = (struct RxFD *)fd_addr; 1046 fd_addr += sizeof(struct RxFD) * RX_FD_NUM; 1047 for (i = 0; i < RX_FD_NUM; i++) 1048 lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD); 1049 lp->rfd_cur = lp->rfd_base; 1050 lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1); 1051 1052 /* Transmit Descriptors */ 1053 lp->tfd_base = (struct TxFD *)fd_addr; 1054 fd_addr += sizeof(struct TxFD) * TX_FD_NUM; 1055 for (i = 0; i < TX_FD_NUM; i++) { 1056 lp->tfd_base[i].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[i+1])); 1057 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); 1058 lp->tfd_base[i].fd.FDCtl = cpu_to_le32(0); 1059 } 1060 lp->tfd_base[TX_FD_NUM-1].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[0])); 1061 lp->tfd_start = 0; 1062 lp->tfd_end = 0; 1063 1064 /* Buffer List (for Receive) */ 1065 lp->fbl_ptr = (struct FrFD *)fd_addr; 1066 lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr)); 1067 lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD); 1068#ifndef TC35815_USE_PACKEDBUFFER 1069 /* 1070 * move all allocated skbs to head of rx_skbs[] array. 1071 * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in 1072 * tc35815_rx() had failed. 1073 */ 1074 lp->fbl_count = 0; 1075 for (i = 0; i < RX_BUF_NUM; i++) { 1076 if (lp->rx_skbs[i].skb) { 1077 if (i != lp->fbl_count) { 1078 lp->rx_skbs[lp->fbl_count].skb = 1079 lp->rx_skbs[i].skb; 1080 lp->rx_skbs[lp->fbl_count].skb_dma = 1081 lp->rx_skbs[i].skb_dma; 1082 } 1083 lp->fbl_count++; 1084 } 1085 } 1086#endif 1087 for (i = 0; i < RX_BUF_NUM; i++) { 1088#ifdef TC35815_USE_PACKEDBUFFER 1089 lp->fbl_ptr->bd[i].BuffData = cpu_to_le32(lp->data_buf_dma[i]); 1090#else 1091 if (i >= lp->fbl_count) { 1092 lp->fbl_ptr->bd[i].BuffData = 0; 1093 lp->fbl_ptr->bd[i].BDCtl = 0; 1094 continue; 1095 } 1096 lp->fbl_ptr->bd[i].BuffData = 1097 cpu_to_le32(lp->rx_skbs[i].skb_dma); 1098#endif 1099 /* BDID is index of FrFD.bd[] */ 1100 lp->fbl_ptr->bd[i].BDCtl = 1101 cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) | 1102 RX_BUF_SIZE); 1103 } 1104#ifdef TC35815_USE_PACKEDBUFFER 1105 lp->fbl_curid = 0; 1106#endif 1107 1108 printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n", 1109 dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr); 1110 return 0; 1111} 1112 1113static void 1114tc35815_clear_queues(struct net_device *dev) 1115{ 1116 struct tc35815_local *lp = netdev_priv(dev); 1117 int i; 1118 1119 for (i = 0; i < TX_FD_NUM; i++) { 1120 u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem); 1121 struct sk_buff *skb = 1122 fdsystem != 0xffffffff ? 1123 lp->tx_skbs[fdsystem].skb : NULL; 1124#ifdef DEBUG 1125 if (lp->tx_skbs[i].skb != skb) { 1126 printk("%s: tx_skbs mismatch(%d).\n", dev->name, i); 1127 panic_queues(dev); 1128 } 1129#else 1130 BUG_ON(lp->tx_skbs[i].skb != skb); 1131#endif 1132 if (skb) { 1133 pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE); 1134 lp->tx_skbs[i].skb = NULL; 1135 lp->tx_skbs[i].skb_dma = 0; 1136 dev_kfree_skb_any(skb); 1137 } 1138 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); 1139 } 1140 1141 tc35815_init_queues(dev); 1142} 1143 1144static void 1145tc35815_free_queues(struct net_device *dev) 1146{ 1147 struct tc35815_local *lp = netdev_priv(dev); 1148 int i; 1149 1150 if (lp->tfd_base) { 1151 for (i = 0; i < TX_FD_NUM; i++) { 1152 u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem); 1153 struct sk_buff *skb = 1154 fdsystem != 0xffffffff ? 1155 lp->tx_skbs[fdsystem].skb : NULL; 1156#ifdef DEBUG 1157 if (lp->tx_skbs[i].skb != skb) { 1158 printk("%s: tx_skbs mismatch(%d).\n", dev->name, i); 1159 panic_queues(dev); 1160 } 1161#else 1162 BUG_ON(lp->tx_skbs[i].skb != skb); 1163#endif 1164 if (skb) { 1165 dev_kfree_skb(skb); 1166 pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE); 1167 lp->tx_skbs[i].skb = NULL; 1168 lp->tx_skbs[i].skb_dma = 0; 1169 } 1170 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); 1171 } 1172 } 1173 1174 lp->rfd_base = NULL; 1175 lp->rfd_limit = NULL; 1176 lp->rfd_cur = NULL; 1177 lp->fbl_ptr = NULL; 1178 1179 for (i = 0; i < RX_BUF_NUM; i++) { 1180#ifdef TC35815_USE_PACKEDBUFFER 1181 if (lp->data_buf[i]) { 1182 free_rxbuf_page(lp->pci_dev, 1183 lp->data_buf[i], lp->data_buf_dma[i]); 1184 lp->data_buf[i] = NULL; 1185 } 1186#else 1187 if (lp->rx_skbs[i].skb) { 1188 free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb, 1189 lp->rx_skbs[i].skb_dma); 1190 lp->rx_skbs[i].skb = NULL; 1191 } 1192#endif 1193 } 1194 if (lp->fd_buf) { 1195 pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, 1196 lp->fd_buf, lp->fd_buf_dma); 1197 lp->fd_buf = NULL; 1198 } 1199} 1200 1201static void 1202dump_txfd(struct TxFD *fd) 1203{ 1204 printk("TxFD(%p): %08x %08x %08x %08x\n", fd, 1205 le32_to_cpu(fd->fd.FDNext), 1206 le32_to_cpu(fd->fd.FDSystem), 1207 le32_to_cpu(fd->fd.FDStat), 1208 le32_to_cpu(fd->fd.FDCtl)); 1209 printk("BD: "); 1210 printk(" %08x %08x", 1211 le32_to_cpu(fd->bd.BuffData), 1212 le32_to_cpu(fd->bd.BDCtl)); 1213 printk("\n"); 1214} 1215 1216static int 1217dump_rxfd(struct RxFD *fd) 1218{ 1219 int i, bd_count = (le32_to_cpu(fd->fd.FDCtl) & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT; 1220 if (bd_count > 8) 1221 bd_count = 8; 1222 printk("RxFD(%p): %08x %08x %08x %08x\n", fd, 1223 le32_to_cpu(fd->fd.FDNext), 1224 le32_to_cpu(fd->fd.FDSystem), 1225 le32_to_cpu(fd->fd.FDStat), 1226 le32_to_cpu(fd->fd.FDCtl)); 1227 if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD) 1228 return 0; 1229 printk("BD: "); 1230 for (i = 0; i < bd_count; i++) 1231 printk(" %08x %08x", 1232 le32_to_cpu(fd->bd[i].BuffData), 1233 le32_to_cpu(fd->bd[i].BDCtl)); 1234 printk("\n"); 1235 return bd_count; 1236} 1237 1238#if defined(DEBUG) || defined(TC35815_USE_PACKEDBUFFER) 1239static void 1240dump_frfd(struct FrFD *fd) 1241{ 1242 int i; 1243 printk("FrFD(%p): %08x %08x %08x %08x\n", fd, 1244 le32_to_cpu(fd->fd.FDNext), 1245 le32_to_cpu(fd->fd.FDSystem), 1246 le32_to_cpu(fd->fd.FDStat), 1247 le32_to_cpu(fd->fd.FDCtl)); 1248 printk("BD: "); 1249 for (i = 0; i < RX_BUF_NUM; i++) 1250 printk(" %08x %08x", 1251 le32_to_cpu(fd->bd[i].BuffData), 1252 le32_to_cpu(fd->bd[i].BDCtl)); 1253 printk("\n"); 1254} 1255#endif 1256 1257#ifdef DEBUG 1258static void 1259panic_queues(struct net_device *dev) 1260{ 1261 struct tc35815_local *lp = netdev_priv(dev); 1262 int i; 1263 1264 printk("TxFD base %p, start %u, end %u\n", 1265 lp->tfd_base, lp->tfd_start, lp->tfd_end); 1266 printk("RxFD base %p limit %p cur %p\n", 1267 lp->rfd_base, lp->rfd_limit, lp->rfd_cur); 1268 printk("FrFD %p\n", lp->fbl_ptr); 1269 for (i = 0; i < TX_FD_NUM; i++) 1270 dump_txfd(&lp->tfd_base[i]); 1271 for (i = 0; i < RX_FD_NUM; i++) { 1272 int bd_count = dump_rxfd(&lp->rfd_base[i]); 1273 i += (bd_count + 1) / 2; /* skip BDs */ 1274 } 1275 dump_frfd(lp->fbl_ptr); 1276 panic("%s: Illegal queue state.", dev->name); 1277} 1278#endif 1279 1280static void print_eth(const u8 *add) 1281{ 1282 DECLARE_MAC_BUF(mac); 1283 1284 printk(KERN_DEBUG "print_eth(%p)\n", add); 1285 printk(KERN_DEBUG " %s =>", print_mac(mac, add + 6)); 1286 printk(KERN_CONT " %s : %02x%02x\n", 1287 print_mac(mac, add), add[12], add[13]); 1288} 1289 1290static int tc35815_tx_full(struct net_device *dev) 1291{ 1292 struct tc35815_local *lp = netdev_priv(dev); 1293 return ((lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end); 1294} 1295 1296static void tc35815_restart(struct net_device *dev) 1297{ 1298 struct tc35815_local *lp = netdev_priv(dev); 1299 1300 if (lp->phy_dev) { 1301 int timeout; 1302 1303 phy_write(lp->phy_dev, MII_BMCR, BMCR_RESET); 1304 timeout = 100; 1305 while (--timeout) { 1306 if (!(phy_read(lp->phy_dev, MII_BMCR) & BMCR_RESET)) 1307 break; 1308 udelay(1); 1309 } 1310 if (!timeout) 1311 printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name); 1312 } 1313 1314 spin_lock_irq(&lp->lock); 1315 tc35815_chip_reset(dev); 1316 tc35815_clear_queues(dev); 1317 tc35815_chip_init(dev); 1318 /* Reconfigure CAM again since tc35815_chip_init() initialize it. */ 1319 tc35815_set_multicast_list(dev); 1320 spin_unlock_irq(&lp->lock); 1321 1322 netif_wake_queue(dev); 1323} 1324 1325static void tc35815_restart_work(struct work_struct *work) 1326{ 1327 struct tc35815_local *lp = 1328 container_of(work, struct tc35815_local, restart_work); 1329 struct net_device *dev = lp->dev; 1330 1331 tc35815_restart(dev); 1332} 1333 1334static void tc35815_schedule_restart(struct net_device *dev) 1335{ 1336 struct tc35815_local *lp = netdev_priv(dev); 1337 struct tc35815_regs __iomem *tr = 1338 (struct tc35815_regs __iomem *)dev->base_addr; 1339 1340 /* disable interrupts */ 1341 tc_writel(0, &tr->Int_En); 1342 tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl); 1343 schedule_work(&lp->restart_work); 1344} 1345 1346static void tc35815_tx_timeout(struct net_device *dev) 1347{ 1348 struct tc35815_regs __iomem *tr = 1349 (struct tc35815_regs __iomem *)dev->base_addr; 1350 1351 printk(KERN_WARNING "%s: transmit timed out, status %#x\n", 1352 dev->name, tc_readl(&tr->Tx_Stat)); 1353 1354 /* Try to restart the adaptor. */ 1355 tc35815_schedule_restart(dev); 1356 dev->stats.tx_errors++; 1357} 1358 1359/* 1360 * Open/initialize the controller. This is called (in the current kernel) 1361 * sometime after booting when the 'ifconfig' program is run. 1362 * 1363 * This routine should set everything up anew at each open, even 1364 * registers that "should" only need to be set once at boot, so that 1365 * there is non-reboot way to recover if something goes wrong. 1366 */ 1367static int 1368tc35815_open(struct net_device *dev) 1369{ 1370 struct tc35815_local *lp = netdev_priv(dev); 1371 1372 /* 1373 * This is used if the interrupt line can turned off (shared). 1374 * See 3c503.c for an example of selecting the IRQ at config-time. 1375 */ 1376 if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED, 1377 dev->name, dev)) 1378 return -EAGAIN; 1379 1380 tc35815_chip_reset(dev); 1381 1382 if (tc35815_init_queues(dev) != 0) { 1383 free_irq(dev->irq, dev); 1384 return -EAGAIN; 1385 } 1386 1387#ifdef TC35815_NAPI 1388 napi_enable(&lp->napi); 1389#endif 1390 1391 /* Reset the hardware here. Don't forget to set the station address. */ 1392 spin_lock_irq(&lp->lock); 1393 tc35815_chip_init(dev); 1394 spin_unlock_irq(&lp->lock); 1395 1396 netif_carrier_off(dev); 1397 /* schedule a link state check */ 1398 phy_start(lp->phy_dev); 1399 1400 /* We are now ready to accept transmit requeusts from 1401 * the queueing layer of the networking. 1402 */ 1403 netif_start_queue(dev); 1404 1405 return 0; 1406} 1407 1408/* This will only be invoked if your driver is _not_ in XOFF state. 1409 * What this means is that you need not check it, and that this 1410 * invariant will hold if you make sure that the netif_*_queue() 1411 * calls are done at the proper times. 1412 */ 1413static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) 1414{ 1415 struct tc35815_local *lp = netdev_priv(dev); 1416 struct TxFD *txfd; 1417 unsigned long flags; 1418 1419 /* If some error occurs while trying to transmit this 1420 * packet, you should return '1' from this function. 1421 * In such a case you _may not_ do anything to the 1422 * SKB, it is still owned by the network queueing 1423 * layer when an error is returned. This means you 1424 * may not modify any SKB fields, you may not free 1425 * the SKB, etc. 1426 */ 1427 1428 /* This is the most common case for modern hardware. 1429 * The spinlock protects this code from the TX complete 1430 * hardware interrupt handler. Queue flow control is 1431 * thus managed under this lock as well. 1432 */ 1433 spin_lock_irqsave(&lp->lock, flags); 1434 1435 /* failsafe... (handle txdone now if half of FDs are used) */ 1436 if ((lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM > 1437 TX_FD_NUM / 2) 1438 tc35815_txdone(dev); 1439 1440 if (netif_msg_pktdata(lp)) 1441 print_eth(skb->data); 1442#ifdef DEBUG 1443 if (lp->tx_skbs[lp->tfd_start].skb) { 1444 printk("%s: tx_skbs conflict.\n", dev->name); 1445 panic_queues(dev); 1446 } 1447#else 1448 BUG_ON(lp->tx_skbs[lp->tfd_start].skb); 1449#endif 1450 lp->tx_skbs[lp->tfd_start].skb = skb; 1451 lp->tx_skbs[lp->tfd_start].skb_dma = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); 1452 1453 /*add to ring */ 1454 txfd = &lp->tfd_base[lp->tfd_start]; 1455 txfd->bd.BuffData = cpu_to_le32(lp->tx_skbs[lp->tfd_start].skb_dma); 1456 txfd->bd.BDCtl = cpu_to_le32(skb->len); 1457 txfd->fd.FDSystem = cpu_to_le32(lp->tfd_start); 1458 txfd->fd.FDCtl = cpu_to_le32(FD_CownsFD | (1 << FD_BDCnt_SHIFT)); 1459 1460 if (lp->tfd_start == lp->tfd_end) { 1461 struct tc35815_regs __iomem *tr = 1462 (struct tc35815_regs __iomem *)dev->base_addr; 1463 /* Start DMA Transmitter. */ 1464 txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL); 1465#ifdef GATHER_TXINT 1466 txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); 1467#endif 1468 if (netif_msg_tx_queued(lp)) { 1469 printk("%s: starting TxFD.\n", dev->name); 1470 dump_txfd(txfd); 1471 } 1472 tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr); 1473 } else { 1474 txfd->fd.FDNext &= cpu_to_le32(~FD_Next_EOL); 1475 if (netif_msg_tx_queued(lp)) { 1476 printk("%s: queueing TxFD.\n", dev->name); 1477 dump_txfd(txfd); 1478 } 1479 } 1480 lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM; 1481 1482 dev->trans_start = jiffies; 1483 1484 /* If we just used up the very last entry in the 1485 * TX ring on this device, tell the queueing 1486 * layer to send no more. 1487 */ 1488 if (tc35815_tx_full(dev)) { 1489 if (netif_msg_tx_queued(lp)) 1490 printk(KERN_WARNING "%s: TxFD Exhausted.\n", dev->name); 1491 netif_stop_queue(dev); 1492 } 1493 1494 /* When the TX completion hw interrupt arrives, this 1495 * is when the transmit statistics are updated. 1496 */ 1497 1498 spin_unlock_irqrestore(&lp->lock, flags); 1499 return 0; 1500} 1501 1502#define FATAL_ERROR_INT \ 1503 (Int_IntPCI | Int_DmParErr | Int_IntNRAbt) 1504static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status) 1505{ 1506 static int count; 1507 printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):", 1508 dev->name, status); 1509 if (status & Int_IntPCI) 1510 printk(" IntPCI"); 1511 if (status & Int_DmParErr) 1512 printk(" DmParErr"); 1513 if (status & Int_IntNRAbt) 1514 printk(" IntNRAbt"); 1515 printk("\n"); 1516 if (count++ > 100) 1517 panic("%s: Too many fatal errors.", dev->name); 1518 printk(KERN_WARNING "%s: Resetting ...\n", dev->name); 1519 /* Try to restart the adaptor. */ 1520 tc35815_schedule_restart(dev); 1521} 1522 1523#ifdef TC35815_NAPI 1524static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit) 1525#else 1526static int tc35815_do_interrupt(struct net_device *dev, u32 status) 1527#endif 1528{ 1529 struct tc35815_local *lp = netdev_priv(dev); 1530 struct tc35815_regs __iomem *tr = 1531 (struct tc35815_regs __iomem *)dev->base_addr; 1532 int ret = -1; 1533 1534 /* Fatal errors... */ 1535 if (status & FATAL_ERROR_INT) { 1536 tc35815_fatal_error_interrupt(dev, status); 1537 return 0; 1538 } 1539 /* recoverable errors */ 1540 if (status & Int_IntFDAEx) { 1541 /* disable FDAEx int. (until we make rooms...) */ 1542 tc_writel(tc_readl(&tr->Int_En) & ~Int_FDAExEn, &tr->Int_En); 1543 printk(KERN_WARNING 1544 "%s: Free Descriptor Area Exhausted (%#x).\n", 1545 dev->name, status); 1546 dev->stats.rx_dropped++; 1547 ret = 0; 1548 } 1549 if (status & Int_IntBLEx) { 1550 /* disable BLEx int. (until we make rooms...) */ 1551 tc_writel(tc_readl(&tr->Int_En) & ~Int_BLExEn, &tr->Int_En); 1552 printk(KERN_WARNING 1553 "%s: Buffer List Exhausted (%#x).\n", 1554 dev->name, status); 1555 dev->stats.rx_dropped++; 1556 ret = 0; 1557 } 1558 if (status & Int_IntExBD) { 1559 printk(KERN_WARNING 1560 "%s: Excessive Buffer Descriptiors (%#x).\n", 1561 dev->name, status); 1562 dev->stats.rx_length_errors++; 1563 ret = 0; 1564 } 1565 1566 /* normal notification */ 1567 if (status & Int_IntMacRx) { 1568 /* Got a packet(s). */ 1569#ifdef TC35815_NAPI 1570 ret = tc35815_rx(dev, limit); 1571#else 1572 tc35815_rx(dev); 1573 ret = 0; 1574#endif 1575 lp->lstats.rx_ints++; 1576 } 1577 if (status & Int_IntMacTx) { 1578 /* Transmit complete. */ 1579 lp->lstats.tx_ints++; 1580 tc35815_txdone(dev); 1581 netif_wake_queue(dev); 1582 ret = 0; 1583 } 1584 return ret; 1585} 1586 1587/* 1588 * The typical workload of the driver: 1589 * Handle the network interface interrupts. 1590 */ 1591static irqreturn_t tc35815_interrupt(int irq, void *dev_id) 1592{ 1593 struct net_device *dev = dev_id; 1594 struct tc35815_local *lp = netdev_priv(dev); 1595 struct tc35815_regs __iomem *tr = 1596 (struct tc35815_regs __iomem *)dev->base_addr; 1597#ifdef TC35815_NAPI 1598 u32 dmactl = tc_readl(&tr->DMA_Ctl); 1599 1600 if (!(dmactl & DMA_IntMask)) { 1601 /* disable interrupts */ 1602 tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); 1603 if (netif_rx_schedule_prep(dev, &lp->napi)) 1604 __netif_rx_schedule(dev, &lp->napi); 1605 else { 1606 printk(KERN_ERR "%s: interrupt taken in poll\n", 1607 dev->name); 1608 BUG(); 1609 } 1610 (void)tc_readl(&tr->Int_Src); /* flush */ 1611 return IRQ_HANDLED; 1612 } 1613 return IRQ_NONE; 1614#else 1615 int handled; 1616 u32 status; 1617 1618 spin_lock(&lp->lock); 1619 status = tc_readl(&tr->Int_Src); 1620 tc_writel(status, &tr->Int_Src); /* write to clear */ 1621 handled = tc35815_do_interrupt(dev, status); 1622 (void)tc_readl(&tr->Int_Src); /* flush */ 1623 spin_unlock(&lp->lock); 1624 return IRQ_RETVAL(handled >= 0); 1625#endif /* TC35815_NAPI */ 1626} 1627 1628#ifdef CONFIG_NET_POLL_CONTROLLER 1629static void tc35815_poll_controller(struct net_device *dev) 1630{ 1631 disable_irq(dev->irq); 1632 tc35815_interrupt(dev->irq, dev); 1633 enable_irq(dev->irq); 1634} 1635#endif 1636 1637/* We have a good packet(s), get it/them out of the buffers. */ 1638#ifdef TC35815_NAPI 1639static int 1640tc35815_rx(struct net_device *dev, int limit) 1641#else 1642static void 1643tc35815_rx(struct net_device *dev) 1644#endif 1645{ 1646 struct tc35815_local *lp = netdev_priv(dev); 1647 unsigned int fdctl; 1648 int i; 1649 int buf_free_count = 0; 1650 int fd_free_count = 0; 1651#ifdef TC35815_NAPI 1652 int received = 0; 1653#endif 1654 1655 while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) { 1656 int status = le32_to_cpu(lp->rfd_cur->fd.FDStat); 1657 int pkt_len = fdctl & FD_FDLength_MASK; 1658 int bd_count = (fdctl & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT; 1659#ifdef DEBUG 1660 struct RxFD *next_rfd; 1661#endif 1662#if (RX_CTL_CMD & Rx_StripCRC) == 0 1663 pkt_len -= 4; 1664#endif 1665 1666 if (netif_msg_rx_status(lp)) 1667 dump_rxfd(lp->rfd_cur); 1668 if (status & Rx_Good) { 1669 struct sk_buff *skb; 1670 unsigned char *data; 1671 int cur_bd; 1672#ifdef TC35815_USE_PACKEDBUFFER 1673 int offset; 1674#endif 1675 1676#ifdef TC35815_NAPI 1677 if (--limit < 0) 1678 break; 1679#endif 1680#ifdef TC35815_USE_PACKEDBUFFER 1681 BUG_ON(bd_count > 2); 1682 skb = dev_alloc_skb(pkt_len + 2); /* +2: for reserve */ 1683 if (skb == NULL) { 1684 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", 1685 dev->name); 1686 dev->stats.rx_dropped++; 1687 break; 1688 } 1689 skb_reserve(skb, 2); /* 16 bit alignment */ 1690 1691 data = skb_put(skb, pkt_len); 1692 1693 /* copy from receive buffer */ 1694 cur_bd = 0; 1695 offset = 0; 1696 while (offset < pkt_len && cur_bd < bd_count) { 1697 int len = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BDCtl) & 1698 BD_BuffLength_MASK; 1699 dma_addr_t dma = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BuffData); 1700 void *rxbuf = rxbuf_bus_to_virt(lp, dma); 1701 if (offset + len > pkt_len) 1702 len = pkt_len - offset; 1703#ifdef TC35815_DMA_SYNC_ONDEMAND 1704 pci_dma_sync_single_for_cpu(lp->pci_dev, 1705 dma, len, 1706 PCI_DMA_FROMDEVICE); 1707#endif 1708 memcpy(data + offset, rxbuf, len); 1709#ifdef TC35815_DMA_SYNC_ONDEMAND 1710 pci_dma_sync_single_for_device(lp->pci_dev, 1711 dma, len, 1712 PCI_DMA_FROMDEVICE); 1713#endif 1714 offset += len; 1715 cur_bd++; 1716 } 1717#else /* TC35815_USE_PACKEDBUFFER */ 1718 BUG_ON(bd_count > 1); 1719 cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl) 1720 & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT; 1721#ifdef DEBUG 1722 if (cur_bd >= RX_BUF_NUM) { 1723 printk("%s: invalid BDID.\n", dev->name); 1724 panic_queues(dev); 1725 } 1726 BUG_ON(lp->rx_skbs[cur_bd].skb_dma != 1727 (le32_to_cpu(lp->rfd_cur->bd[0].BuffData) & ~3)); 1728 if (!lp->rx_skbs[cur_bd].skb) { 1729 printk("%s: NULL skb.\n", dev->name); 1730 panic_queues(dev); 1731 } 1732#else 1733 BUG_ON(cur_bd >= RX_BUF_NUM); 1734#endif 1735 skb = lp->rx_skbs[cur_bd].skb; 1736 prefetch(skb->data); 1737 lp->rx_skbs[cur_bd].skb = NULL; 1738 pci_unmap_single(lp->pci_dev, 1739 lp->rx_skbs[cur_bd].skb_dma, 1740 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1741 if (!HAVE_DMA_RXALIGN(lp)) 1742 memmove(skb->data, skb->data - 2, pkt_len); 1743 data = skb_put(skb, pkt_len); 1744#endif /* TC35815_USE_PACKEDBUFFER */ 1745 if (netif_msg_pktdata(lp)) 1746 print_eth(data); 1747 skb->protocol = eth_type_trans(skb, dev); 1748#ifdef TC35815_NAPI 1749 netif_receive_skb(skb); 1750 received++; 1751#else 1752 netif_rx(skb); 1753#endif 1754 dev->last_rx = jiffies; 1755 dev->stats.rx_packets++; 1756 dev->stats.rx_bytes += pkt_len; 1757 } else { 1758 dev->stats.rx_errors++; 1759 printk(KERN_DEBUG "%s: Rx error (status %x)\n", 1760 dev->name, status & Rx_Stat_Mask); 1761 /* WORKAROUND: LongErr and CRCErr means Overflow. */ 1762 if ((status & Rx_LongErr) && (status & Rx_CRCErr)) { 1763 status &= ~(Rx_LongErr|Rx_CRCErr); 1764 status |= Rx_Over; 1765 } 1766 if (status & Rx_LongErr) 1767 dev->stats.rx_length_errors++; 1768 if (status & Rx_Over) 1769 dev->stats.rx_fifo_errors++; 1770 if (status & Rx_CRCErr) 1771 dev->stats.rx_crc_errors++; 1772 if (status & Rx_Align) 1773 dev->stats.rx_frame_errors++; 1774 } 1775 1776 if (bd_count > 0) { 1777 /* put Free Buffer back to controller */ 1778 int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl); 1779 unsigned char id = 1780 (bdctl & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT; 1781#ifdef DEBUG 1782 if (id >= RX_BUF_NUM) { 1783 printk("%s: invalid BDID.\n", dev->name); 1784 panic_queues(dev); 1785 } 1786#else 1787 BUG_ON(id >= RX_BUF_NUM); 1788#endif 1789 /* free old buffers */ 1790#ifdef TC35815_USE_PACKEDBUFFER 1791 while (lp->fbl_curid != id) 1792#else 1793 lp->fbl_count--; 1794 while (lp->fbl_count < RX_BUF_NUM) 1795#endif 1796 { 1797#ifdef TC35815_USE_PACKEDBUFFER 1798 unsigned char curid = lp->fbl_curid; 1799#else 1800 unsigned char curid = 1801 (id + 1 + lp->fbl_count) % RX_BUF_NUM; 1802#endif 1803 struct BDesc *bd = &lp->fbl_ptr->bd[curid]; 1804#ifdef DEBUG 1805 bdctl = le32_to_cpu(bd->BDCtl); 1806 if (bdctl & BD_CownsBD) { 1807 printk("%s: Freeing invalid BD.\n", 1808 dev->name); 1809 panic_queues(dev); 1810 } 1811#endif 1812 /* pass BD to controller */ 1813#ifndef TC35815_USE_PACKEDBUFFER 1814 if (!lp->rx_skbs[curid].skb) { 1815 lp->rx_skbs[curid].skb = 1816 alloc_rxbuf_skb(dev, 1817 lp->pci_dev, 1818 &lp->rx_skbs[curid].skb_dma); 1819 if (!lp->rx_skbs[curid].skb) 1820 break; /* try on next reception */ 1821 bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma); 1822 } 1823#endif /* TC35815_USE_PACKEDBUFFER */ 1824 /* Note: BDLength was modified by chip. */ 1825 bd->BDCtl = cpu_to_le32(BD_CownsBD | 1826 (curid << BD_RxBDID_SHIFT) | 1827 RX_BUF_SIZE); 1828#ifdef TC35815_USE_PACKEDBUFFER 1829 lp->fbl_curid = (curid + 1) % RX_BUF_NUM; 1830 if (netif_msg_rx_status(lp)) { 1831 printk("%s: Entering new FBD %d\n", 1832 dev->name, lp->fbl_curid); 1833 dump_frfd(lp->fbl_ptr); 1834 } 1835#else 1836 lp->fbl_count++; 1837#endif 1838 buf_free_count++; 1839 } 1840 } 1841 1842 /* put RxFD back to controller */ 1843#ifdef DEBUG 1844 next_rfd = fd_bus_to_virt(lp, 1845 le32_to_cpu(lp->rfd_cur->fd.FDNext)); 1846 if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) { 1847 printk("%s: RxFD FDNext invalid.\n", dev->name); 1848 panic_queues(dev); 1849 } 1850#endif 1851 for (i = 0; i < (bd_count + 1) / 2 + 1; i++) { 1852 /* pass FD to controller */ 1853#ifdef DEBUG 1854 lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead); 1855#else 1856 lp->rfd_cur->fd.FDNext = cpu_to_le32(FD_Next_EOL); 1857#endif 1858 lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD); 1859 lp->rfd_cur++; 1860 fd_free_count++; 1861 } 1862 if (lp->rfd_cur > lp->rfd_limit) 1863 lp->rfd_cur = lp->rfd_base; 1864#ifdef DEBUG 1865 if (lp->rfd_cur != next_rfd) 1866 printk("rfd_cur = %p, next_rfd %p\n", 1867 lp->rfd_cur, next_rfd); 1868#endif 1869 } 1870 1871 /* re-enable BL/FDA Exhaust interrupts. */ 1872 if (fd_free_count) { 1873 struct tc35815_regs __iomem *tr = 1874 (struct tc35815_regs __iomem *)dev->base_addr; 1875 u32 en, en_old = tc_readl(&tr->Int_En); 1876 en = en_old | Int_FDAExEn; 1877 if (buf_free_count) 1878 en |= Int_BLExEn; 1879 if (en != en_old) 1880 tc_writel(en, &tr->Int_En); 1881 } 1882#ifdef TC35815_NAPI 1883 return received; 1884#endif 1885} 1886 1887#ifdef TC35815_NAPI 1888static int tc35815_poll(struct napi_struct *napi, int budget) 1889{ 1890 struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi); 1891 struct net_device *dev = lp->dev; 1892 struct tc35815_regs __iomem *tr = 1893 (struct tc35815_regs __iomem *)dev->base_addr; 1894 int received = 0, handled; 1895 u32 status; 1896 1897 spin_lock(&lp->lock); 1898 status = tc_readl(&tr->Int_Src); 1899 do { 1900 tc_writel(status, &tr->Int_Src); /* write to clear */ 1901 1902 handled = tc35815_do_interrupt(dev, status, limit); 1903 if (handled >= 0) { 1904 received += handled; 1905 if (received >= budget) 1906 break; 1907 } 1908 status = tc_readl(&tr->Int_Src); 1909 } while (status); 1910 spin_unlock(&lp->lock); 1911 1912 if (received < budget) { 1913 netif_rx_complete(dev, napi); 1914 /* enable interrupts */ 1915 tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); 1916 } 1917 return received; 1918} 1919#endif 1920 1921#ifdef NO_CHECK_CARRIER 1922#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_LateColl|Tx_TxPar|Tx_SQErr) 1923#else 1924#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr) 1925#endif 1926 1927static void 1928tc35815_check_tx_stat(struct net_device *dev, int status) 1929{ 1930 struct tc35815_local *lp = netdev_priv(dev); 1931 const char *msg = NULL; 1932 1933 /* count collisions */ 1934 if (status & Tx_ExColl) 1935 dev->stats.collisions += 16; 1936 if (status & Tx_TxColl_MASK) 1937 dev->stats.collisions += status & Tx_TxColl_MASK; 1938 1939#ifndef NO_CHECK_CARRIER 1940 /* TX4939 does not have NCarr */ 1941 if (lp->chiptype == TC35815_TX4939) 1942 status &= ~Tx_NCarr; 1943#ifdef WORKAROUND_LOSTCAR 1944 /* WORKAROUND: ignore LostCrS in full duplex operation */ 1945 if (!lp->link || lp->duplex == DUPLEX_FULL) 1946 status &= ~Tx_NCarr; 1947#endif 1948#endif 1949 1950 if (!(status & TX_STA_ERR)) { 1951 /* no error. */ 1952 dev->stats.tx_packets++; 1953 return; 1954 } 1955 1956 dev->stats.tx_errors++; 1957 if (status & Tx_ExColl) { 1958 dev->stats.tx_aborted_errors++; 1959 msg = "Excessive Collision."; 1960 } 1961 if (status & Tx_Under) { 1962 dev->stats.tx_fifo_errors++; 1963 msg = "Tx FIFO Underrun."; 1964 if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) { 1965 lp->lstats.tx_underrun++; 1966 if (lp->lstats.tx_underrun >= TX_THRESHOLD_KEEP_LIMIT) { 1967 struct tc35815_regs __iomem *tr = 1968 (struct tc35815_regs __iomem *)dev->base_addr; 1969 tc_writel(TX_THRESHOLD_MAX, &tr->TxThrsh); 1970 msg = "Tx FIFO Underrun.Change Tx threshold to max."; 1971 } 1972 } 1973 } 1974 if (status & Tx_Defer) { 1975 dev->stats.tx_fifo_errors++; 1976 msg = "Excessive Deferral."; 1977 } 1978#ifndef NO_CHECK_CARRIER 1979 if (status & Tx_NCarr) { 1980 dev->stats.tx_carrier_errors++; 1981 msg = "Lost Carrier Sense."; 1982 } 1983#endif 1984 if (status & Tx_LateColl) { 1985 dev->stats.tx_aborted_errors++; 1986 msg = "Late Collision."; 1987 } 1988 if (status & Tx_TxPar) { 1989 dev->stats.tx_fifo_errors++; 1990 msg = "Transmit Parity Error."; 1991 } 1992 if (status & Tx_SQErr) { 1993 dev->stats.tx_heartbeat_errors++; 1994 msg = "Signal Quality Error."; 1995 } 1996 if (msg && netif_msg_tx_err(lp)) 1997 printk(KERN_WARNING "%s: %s (%#x)\n", dev->name, msg, status); 1998} 1999 2000/* This handles TX complete events posted by the device 2001 * via interrupts. 2002 */ 2003static void 2004tc35815_txdone(struct net_device *dev) 2005{ 2006 struct tc35815_local *lp = netdev_priv(dev); 2007 struct TxFD *txfd; 2008 unsigned int fdctl; 2009 2010 txfd = &lp->tfd_base[lp->tfd_end]; 2011 while (lp->tfd_start != lp->tfd_end && 2012 !((fdctl = le32_to_cpu(txfd->fd.FDCtl)) & FD_CownsFD)) { 2013 int status = le32_to_cpu(txfd->fd.FDStat); 2014 struct sk_buff *skb; 2015 unsigned long fdnext = le32_to_cpu(txfd->fd.FDNext); 2016 u32 fdsystem = le32_to_cpu(txfd->fd.FDSystem); 2017 2018 if (netif_msg_tx_done(lp)) { 2019 printk("%s: complete TxFD.\n", dev->name); 2020 dump_txfd(txfd); 2021 } 2022 tc35815_check_tx_stat(dev, status); 2023 2024 skb = fdsystem != 0xffffffff ? 2025 lp->tx_skbs[fdsystem].skb : NULL; 2026#ifdef DEBUG 2027 if (lp->tx_skbs[lp->tfd_end].skb != skb) { 2028 printk("%s: tx_skbs mismatch.\n", dev->name); 2029 panic_queues(dev); 2030 } 2031#else 2032 BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb); 2033#endif 2034 if (skb) { 2035 dev->stats.tx_bytes += skb->len; 2036 pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE); 2037 lp->tx_skbs[lp->tfd_end].skb = NULL; 2038 lp->tx_skbs[lp->tfd_end].skb_dma = 0; 2039#ifdef TC35815_NAPI 2040 dev_kfree_skb_any(skb); 2041#else 2042 dev_kfree_skb_irq(skb); 2043#endif 2044 } 2045 txfd->fd.FDSystem = cpu_to_le32(0xffffffff); 2046 2047 lp->tfd_end = (lp->tfd_end + 1) % TX_FD_NUM; 2048 txfd = &lp->tfd_base[lp->tfd_end]; 2049#ifdef DEBUG 2050 if ((fdnext & ~FD_Next_EOL) != fd_virt_to_bus(lp, txfd)) { 2051 printk("%s: TxFD FDNext invalid.\n", dev->name); 2052 panic_queues(dev); 2053 } 2054#endif 2055 if (fdnext & FD_Next_EOL) { 2056 /* DMA Transmitter has been stopping... */ 2057 if (lp->tfd_end != lp->tfd_start) { 2058 struct tc35815_regs __iomem *tr = 2059 (struct tc35815_regs __iomem *)dev->base_addr; 2060 int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM; 2061 struct TxFD *txhead = &lp->tfd_base[head]; 2062 int qlen = (lp->tfd_start + TX_FD_NUM 2063 - lp->tfd_end) % TX_FD_NUM; 2064 2065#ifdef DEBUG 2066 if (!(le32_to_cpu(txfd->fd.FDCtl) & FD_CownsFD)) { 2067 printk("%s: TxFD FDCtl invalid.\n", dev->name); 2068 panic_queues(dev); 2069 } 2070#endif 2071 /* log max queue length */ 2072 if (lp->lstats.max_tx_qlen < qlen) 2073 lp->lstats.max_tx_qlen = qlen; 2074 2075 2076 /* start DMA Transmitter again */ 2077 txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL); 2078#ifdef GATHER_TXINT 2079 txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); 2080#endif 2081 if (netif_msg_tx_queued(lp)) { 2082 printk("%s: start TxFD on queue.\n", 2083 dev->name); 2084 dump_txfd(txfd); 2085 } 2086 tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr); 2087 } 2088 break; 2089 } 2090 } 2091 2092 /* If we had stopped the queue due to a "tx full" 2093 * condition, and space has now been made available, 2094 * wake up the queue. 2095 */ 2096 if (netif_queue_stopped(dev) && !tc35815_tx_full(dev)) 2097 netif_wake_queue(dev); 2098} 2099 2100/* The inverse routine to tc35815_open(). */ 2101static int 2102tc35815_close(struct net_device *dev) 2103{ 2104 struct tc35815_local *lp = netdev_priv(dev); 2105 2106 netif_stop_queue(dev); 2107#ifdef TC35815_NAPI 2108 napi_disable(&lp->napi); 2109#endif 2110 if (lp->phy_dev) 2111 phy_stop(lp->phy_dev); 2112 cancel_work_sync(&lp->restart_work); 2113 2114 /* Flush the Tx and disable Rx here. */ 2115 tc35815_chip_reset(dev); 2116 free_irq(dev->irq, dev); 2117 2118 tc35815_free_queues(dev); 2119 2120 return 0; 2121 2122} 2123 2124/* 2125 * Get the current statistics. 2126 * This may be called with the card open or closed. 2127 */ 2128static struct net_device_stats *tc35815_get_stats(struct net_device *dev) 2129{ 2130 struct tc35815_regs __iomem *tr = 2131 (struct tc35815_regs __iomem *)dev->base_addr; 2132 if (netif_running(dev)) 2133 /* Update the statistics from the device registers. */ 2134 dev->stats.rx_missed_errors = tc_readl(&tr->Miss_Cnt); 2135 2136 return &dev->stats; 2137} 2138 2139static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr) 2140{ 2141 struct tc35815_local *lp = netdev_priv(dev); 2142 struct tc35815_regs __iomem *tr = 2143 (struct tc35815_regs __iomem *)dev->base_addr; 2144 int cam_index = index * 6; 2145 u32 cam_data; 2146 u32 saved_addr; 2147 DECLARE_MAC_BUF(mac); 2148 2149 saved_addr = tc_readl(&tr->CAM_Adr); 2150 2151 if (netif_msg_hw(lp)) 2152 printk(KERN_DEBUG "%s: CAM %d: %s\n", 2153 dev->name, index, print_mac(mac, addr)); 2154 if (index & 1) { 2155 /* read modify write */ 2156 tc_writel(cam_index - 2, &tr->CAM_Adr); 2157 cam_data = tc_readl(&tr->CAM_Data) & 0xffff0000; 2158 cam_data |= addr[0] << 8 | addr[1]; 2159 tc_writel(cam_data, &tr->CAM_Data); 2160 /* write whole word */ 2161 tc_writel(cam_index + 2, &tr->CAM_Adr); 2162 cam_data = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]; 2163 tc_writel(cam_data, &tr->CAM_Data); 2164 } else { 2165 /* write whole word */ 2166 tc_writel(cam_index, &tr->CAM_Adr); 2167 cam_data = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; 2168 tc_writel(cam_data, &tr->CAM_Data); 2169 /* read modify write */ 2170 tc_writel(cam_index + 4, &tr->CAM_Adr); 2171 cam_data = tc_readl(&tr->CAM_Data) & 0x0000ffff; 2172 cam_data |= addr[4] << 24 | (addr[5] << 16); 2173 tc_writel(cam_data, &tr->CAM_Data); 2174 } 2175 2176 tc_writel(saved_addr, &tr->CAM_Adr); 2177} 2178 2179 2180/* 2181 * Set or clear the multicast filter for this adaptor. 2182 * num_addrs == -1 Promiscuous mode, receive all packets 2183 * num_addrs == 0 Normal mode, clear multicast list 2184 * num_addrs > 0 Multicast mode, receive normal and MC packets, 2185 * and do best-effort filtering. 2186 */ 2187static void 2188tc35815_set_multicast_list(struct net_device *dev) 2189{ 2190 struct tc35815_regs __iomem *tr = 2191 (struct tc35815_regs __iomem *)dev->base_addr; 2192 2193 if (dev->flags & IFF_PROMISC) { 2194#ifdef WORKAROUND_100HALF_PROMISC 2195 /* With some (all?) 100MHalf HUB, controller will hang 2196 * if we enabled promiscuous mode before linkup... */ 2197 struct tc35815_local *lp = netdev_priv(dev); 2198 2199 if (!lp->link) 2200 return; 2201#endif 2202 /* Enable promiscuous mode */ 2203 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); 2204 } else if ((dev->flags & IFF_ALLMULTI) || 2205 dev->mc_count > CAM_ENTRY_MAX - 3) { 2206 /* CAM 0, 1, 20 are reserved. */ 2207 /* Disable promiscuous mode, use normal mode. */ 2208 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); 2209 } else if (dev->mc_count) { 2210 struct dev_mc_list *cur_addr = dev->mc_list; 2211 int i; 2212 int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE); 2213 2214 tc_writel(0, &tr->CAM_Ctl); 2215 /* Walk the address list, and load the filter */ 2216 for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) { 2217 if (!cur_addr) 2218 break; 2219 /* entry 0,1 is reserved. */ 2220 tc35815_set_cam_entry(dev, i + 2, cur_addr->dmi_addr); 2221 ena_bits |= CAM_Ena_Bit(i + 2); 2222 } 2223 tc_writel(ena_bits, &tr->CAM_Ena); 2224 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); 2225 } else { 2226 tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); 2227 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); 2228 } 2229} 2230 2231static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2232{ 2233 struct tc35815_local *lp = netdev_priv(dev); 2234 strcpy(info->driver, MODNAME); 2235 strcpy(info->version, DRV_VERSION); 2236 strcpy(info->bus_info, pci_name(lp->pci_dev)); 2237} 2238 2239static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2240{ 2241 struct tc35815_local *lp = netdev_priv(dev); 2242 2243 if (!lp->phy_dev) 2244 return -ENODEV; 2245 return phy_ethtool_gset(lp->phy_dev, cmd); 2246} 2247 2248static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2249{ 2250 struct tc35815_local *lp = netdev_priv(dev); 2251 2252 if (!lp->phy_dev) 2253 return -ENODEV; 2254 return phy_ethtool_sset(lp->phy_dev, cmd); 2255} 2256 2257static u32 tc35815_get_msglevel(struct net_device *dev) 2258{ 2259 struct tc35815_local *lp = netdev_priv(dev); 2260 return lp->msg_enable; 2261} 2262 2263static void tc35815_set_msglevel(struct net_device *dev, u32 datum) 2264{ 2265 struct tc35815_local *lp = netdev_priv(dev); 2266 lp->msg_enable = datum; 2267} 2268 2269static int tc35815_get_sset_count(struct net_device *dev, int sset) 2270{ 2271 struct tc35815_local *lp = netdev_priv(dev); 2272 2273 switch (sset) { 2274 case ETH_SS_STATS: 2275 return sizeof(lp->lstats) / sizeof(int); 2276 default: 2277 return -EOPNOTSUPP; 2278 } 2279} 2280 2281static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) 2282{ 2283 struct tc35815_local *lp = netdev_priv(dev); 2284 data[0] = lp->lstats.max_tx_qlen; 2285 data[1] = lp->lstats.tx_ints; 2286 data[2] = lp->lstats.rx_ints; 2287 data[3] = lp->lstats.tx_underrun; 2288} 2289 2290static struct { 2291 const char str[ETH_GSTRING_LEN]; 2292} ethtool_stats_keys[] = { 2293 { "max_tx_qlen" }, 2294 { "tx_ints" }, 2295 { "rx_ints" }, 2296 { "tx_underrun" }, 2297}; 2298 2299static void tc35815_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2300{ 2301 memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys)); 2302} 2303 2304static const struct ethtool_ops tc35815_ethtool_ops = { 2305 .get_drvinfo = tc35815_get_drvinfo, 2306 .get_settings = tc35815_get_settings, 2307 .set_settings = tc35815_set_settings, 2308 .get_link = ethtool_op_get_link, 2309 .get_msglevel = tc35815_get_msglevel, 2310 .set_msglevel = tc35815_set_msglevel, 2311 .get_strings = tc35815_get_strings, 2312 .get_sset_count = tc35815_get_sset_count, 2313 .get_ethtool_stats = tc35815_get_ethtool_stats, 2314}; 2315 2316static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2317{ 2318 struct tc35815_local *lp = netdev_priv(dev); 2319 2320 if (!netif_running(dev)) 2321 return -EINVAL; 2322 if (!lp->phy_dev) 2323 return -ENODEV; 2324 return phy_mii_ioctl(lp->phy_dev, if_mii(rq), cmd); 2325} 2326 2327static void tc35815_chip_reset(struct net_device *dev) 2328{ 2329 struct tc35815_regs __iomem *tr = 2330 (struct tc35815_regs __iomem *)dev->base_addr; 2331 int i; 2332 /* reset the controller */ 2333 tc_writel(MAC_Reset, &tr->MAC_Ctl); 2334 udelay(4); /* 3200ns */ 2335 i = 0; 2336 while (tc_readl(&tr->MAC_Ctl) & MAC_Reset) { 2337 if (i++ > 100) { 2338 printk(KERN_ERR "%s: MAC reset failed.\n", dev->name); 2339 break; 2340 } 2341 mdelay(1); 2342 } 2343 tc_writel(0, &tr->MAC_Ctl); 2344 2345 /* initialize registers to default value */ 2346 tc_writel(0, &tr->DMA_Ctl); 2347 tc_writel(0, &tr->TxThrsh); 2348 tc_writel(0, &tr->TxPollCtr); 2349 tc_writel(0, &tr->RxFragSize); 2350 tc_writel(0, &tr->Int_En); 2351 tc_writel(0, &tr->FDA_Bas); 2352 tc_writel(0, &tr->FDA_Lim); 2353 tc_writel(0xffffffff, &tr->Int_Src); /* Write 1 to clear */ 2354 tc_writel(0, &tr->CAM_Ctl); 2355 tc_writel(0, &tr->Tx_Ctl); 2356 tc_writel(0, &tr->Rx_Ctl); 2357 tc_writel(0, &tr->CAM_Ena); 2358 (void)tc_readl(&tr->Miss_Cnt); /* Read to clear */ 2359 2360 /* initialize internal SRAM */ 2361 tc_writel(DMA_TestMode, &tr->DMA_Ctl); 2362 for (i = 0; i < 0x1000; i += 4) { 2363 tc_writel(i, &tr->CAM_Adr); 2364 tc_writel(0, &tr->CAM_Data); 2365 } 2366 tc_writel(0, &tr->DMA_Ctl); 2367} 2368 2369static void tc35815_chip_init(struct net_device *dev) 2370{ 2371 struct tc35815_local *lp = netdev_priv(dev); 2372 struct tc35815_regs __iomem *tr = 2373 (struct tc35815_regs __iomem *)dev->base_addr; 2374 unsigned long txctl = TX_CTL_CMD; 2375 2376 /* load station address to CAM */ 2377 tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr); 2378 2379 /* Enable CAM (broadcast and unicast) */ 2380 tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); 2381 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); 2382 2383 /* Use DMA_RxAlign_2 to make IP header 4-byte aligned. */ 2384 if (HAVE_DMA_RXALIGN(lp)) 2385 tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl); 2386 else 2387 tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl); 2388#ifdef TC35815_USE_PACKEDBUFFER 2389 tc_writel(RxFrag_EnPack | ETH_ZLEN, &tr->RxFragSize); /* Packing */ 2390#else 2391 tc_writel(ETH_ZLEN, &tr->RxFragSize); 2392#endif 2393 tc_writel(0, &tr->TxPollCtr); /* Batch mode */ 2394 tc_writel(TX_THRESHOLD, &tr->TxThrsh); 2395 tc_writel(INT_EN_CMD, &tr->Int_En); 2396 2397 /* set queues */ 2398 tc_writel(fd_virt_to_bus(lp, lp->rfd_base), &tr->FDA_Bas); 2399 tc_writel((unsigned long)lp->rfd_limit - (unsigned long)lp->rfd_base, 2400 &tr->FDA_Lim); 2401 /* 2402 * Activation method: 2403 * First, enable the MAC Transmitter and the DMA Receive circuits. 2404 * Then enable the DMA Transmitter and the MAC Receive circuits. 2405 */ 2406 tc_writel(fd_virt_to_bus(lp, lp->fbl_ptr), &tr->BLFrmPtr); /* start DMA receiver */ 2407 tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */ 2408 2409 /* start MAC transmitter */ 2410#ifndef NO_CHECK_CARRIER 2411 /* TX4939 does not have EnLCarr */ 2412 if (lp->chiptype == TC35815_TX4939) 2413 txctl &= ~Tx_EnLCarr; 2414#ifdef WORKAROUND_LOSTCAR 2415 /* WORKAROUND: ignore LostCrS in full duplex operation */ 2416 if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL) 2417 txctl &= ~Tx_EnLCarr; 2418#endif 2419#endif /* !NO_CHECK_CARRIER */ 2420#ifdef GATHER_TXINT 2421 txctl &= ~Tx_EnComp; /* disable global tx completion int. */ 2422#endif 2423 tc_writel(txctl, &tr->Tx_Ctl); 2424} 2425 2426#ifdef CONFIG_PM 2427static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state) 2428{ 2429 struct net_device *dev = pci_get_drvdata(pdev); 2430 struct tc35815_local *lp = netdev_priv(dev); 2431 unsigned long flags; 2432 2433 pci_save_state(pdev); 2434 if (!netif_running(dev)) 2435 return 0; 2436 netif_device_detach(dev); 2437 if (lp->phy_dev) 2438 phy_stop(lp->phy_dev); 2439 spin_lock_irqsave(&lp->lock, flags); 2440 tc35815_chip_reset(dev); 2441 spin_unlock_irqrestore(&lp->lock, flags); 2442 pci_set_power_state(pdev, PCI_D3hot); 2443 return 0; 2444} 2445 2446static int tc35815_resume(struct pci_dev *pdev) 2447{ 2448 struct net_device *dev = pci_get_drvdata(pdev); 2449 struct tc35815_local *lp = netdev_priv(dev); 2450 2451 pci_restore_state(pdev); 2452 if (!netif_running(dev)) 2453 return 0; 2454 pci_set_power_state(pdev, PCI_D0); 2455 tc35815_restart(dev); 2456 netif_carrier_off(dev); 2457 if (lp->phy_dev) 2458 phy_start(lp->phy_dev); 2459 netif_device_attach(dev); 2460 return 0; 2461} 2462#endif /* CONFIG_PM */ 2463 2464static struct pci_driver tc35815_pci_driver = { 2465 .name = MODNAME, 2466 .id_table = tc35815_pci_tbl, 2467 .probe = tc35815_init_one, 2468 .remove = __devexit_p(tc35815_remove_one), 2469#ifdef CONFIG_PM 2470 .suspend = tc35815_suspend, 2471 .resume = tc35815_resume, 2472#endif 2473}; 2474 2475module_param_named(speed, options.speed, int, 0); 2476MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps"); 2477module_param_named(duplex, options.duplex, int, 0); 2478MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full"); 2479 2480static int __init tc35815_init_module(void) 2481{ 2482 return pci_register_driver(&tc35815_pci_driver); 2483} 2484 2485static void __exit tc35815_cleanup_module(void) 2486{ 2487 pci_unregister_driver(&tc35815_pci_driver); 2488} 2489 2490module_init(tc35815_init_module); 2491module_exit(tc35815_cleanup_module); 2492 2493MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver"); 2494MODULE_LICENSE("GPL");