Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.21-rc3 1994 lines 56 kB view raw
1/* 2 Written 1998-2000 by Donald Becker. 3 4 This software may be used and distributed according to the terms of 5 the GNU General Public License (GPL), incorporated herein by reference. 6 Drivers based on or derived from this code fall under the GPL and must 7 retain the authorship, copyright and license notice. This file is not 8 a complete program and may only be used when the entire operating 9 system is licensed under the GPL. 10 11 The author may be reached as becker@scyld.com, or C/O 12 Scyld Computing Corporation 13 410 Severn Ave., Suite 210 14 Annapolis MD 21403 15 16 Support information and updates available at 17 http://www.scyld.com/network/pci-skeleton.html 18 19 Linux kernel updates: 20 21 Version 2.51, Nov 17, 2001 (jgarzik): 22 - Add ethtool support 23 - Replace some MII-related magic numbers with constants 24 25*/ 26 27#define DRV_NAME "fealnx" 28#define DRV_VERSION "2.52" 29#define DRV_RELDATE "Sep-11-2006" 30 31static int debug; /* 1-> print debug message */ 32static int max_interrupt_work = 20; 33 34/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */ 35static int multicast_filter_limit = 32; 36 37/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */ 38/* Setting to > 1518 effectively disables this feature. */ 39static int rx_copybreak; 40 41/* Used to pass the media type, etc. */ 42/* Both 'options[]' and 'full_duplex[]' should exist for driver */ 43/* interoperability. */ 44/* The media type is usually passed in 'options[]'. */ 45#define MAX_UNITS 8 /* More are supported, limit only on options */ 46static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; 47static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; 48 49/* Operational parameters that are set at compile time. */ 50/* Keep the ring sizes a power of two for compile efficiency. */ 51/* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */ 52/* Making the Tx ring too large decreases the effectiveness of channel */ 53/* bonding and packet priority. */ 54/* There are no ill effects from too-large receive rings. */ 55// 88-12-9 modify, 56// #define TX_RING_SIZE 16 57// #define RX_RING_SIZE 32 58#define TX_RING_SIZE 6 59#define RX_RING_SIZE 12 60#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc) 61#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc) 62 63/* Operational parameters that usually are not changed. */ 64/* Time in jiffies before concluding the transmitter is hung. */ 65#define TX_TIMEOUT (2*HZ) 66 67#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ 68 69 70/* Include files, designed to support most kernel versions 2.0.0 and later. */ 71#include <linux/module.h> 72#include <linux/kernel.h> 73#include <linux/string.h> 74#include <linux/timer.h> 75#include <linux/errno.h> 76#include <linux/ioport.h> 77#include <linux/slab.h> 78#include <linux/interrupt.h> 79#include <linux/pci.h> 80#include <linux/netdevice.h> 81#include <linux/etherdevice.h> 82#include <linux/skbuff.h> 83#include <linux/init.h> 84#include <linux/mii.h> 85#include <linux/ethtool.h> 86#include <linux/crc32.h> 87#include <linux/delay.h> 88#include <linux/bitops.h> 89 90#include <asm/processor.h> /* Processor type for cache alignment. */ 91#include <asm/io.h> 92#include <asm/uaccess.h> 93 94/* These identify the driver base version and may not be removed. */ 95static char version[] = 96KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; 97 98 99/* This driver was written to use PCI memory space, however some x86 systems 100 work only with I/O space accesses. */ 101#ifndef __alpha__ 102#define USE_IO_OPS 103#endif 104 105/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */ 106/* This is only in the support-all-kernels source code. */ 107 108#define RUN_AT(x) (jiffies + (x)) 109 110MODULE_AUTHOR("Myson or whoever"); 111MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver"); 112MODULE_LICENSE("GPL"); 113module_param(max_interrupt_work, int, 0); 114//MODULE_PARM(min_pci_latency, "i"); 115module_param(debug, int, 0); 116module_param(rx_copybreak, int, 0); 117module_param(multicast_filter_limit, int, 0); 118module_param_array(options, int, NULL, 0); 119module_param_array(full_duplex, int, NULL, 0); 120MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt"); 121MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)"); 122MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames"); 123MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses"); 124MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex"); 125MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)"); 126 127enum { 128 MIN_REGION_SIZE = 136, 129}; 130 131/* A chip capabilities table, matching the entries in pci_tbl[] above. */ 132enum chip_capability_flags { 133 HAS_MII_XCVR, 134 HAS_CHIP_XCVR, 135}; 136 137/* 89/6/13 add, */ 138/* for different PHY */ 139enum phy_type_flags { 140 MysonPHY = 1, 141 AhdocPHY = 2, 142 SeeqPHY = 3, 143 MarvellPHY = 4, 144 Myson981 = 5, 145 LevelOnePHY = 6, 146 OtherPHY = 10, 147}; 148 149struct chip_info { 150 char *chip_name; 151 int flags; 152}; 153 154static const struct chip_info skel_netdrv_tbl[] __devinitdata = { 155 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 156 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR }, 157 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 158}; 159 160/* Offsets to the Command and Status Registers. */ 161enum fealnx_offsets { 162 PAR0 = 0x0, /* physical address 0-3 */ 163 PAR1 = 0x04, /* physical address 4-5 */ 164 MAR0 = 0x08, /* multicast address 0-3 */ 165 MAR1 = 0x0C, /* multicast address 4-7 */ 166 FAR0 = 0x10, /* flow-control address 0-3 */ 167 FAR1 = 0x14, /* flow-control address 4-5 */ 168 TCRRCR = 0x18, /* receive & transmit configuration */ 169 BCR = 0x1C, /* bus command */ 170 TXPDR = 0x20, /* transmit polling demand */ 171 RXPDR = 0x24, /* receive polling demand */ 172 RXCWP = 0x28, /* receive current word pointer */ 173 TXLBA = 0x2C, /* transmit list base address */ 174 RXLBA = 0x30, /* receive list base address */ 175 ISR = 0x34, /* interrupt status */ 176 IMR = 0x38, /* interrupt mask */ 177 FTH = 0x3C, /* flow control high/low threshold */ 178 MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */ 179 TALLY = 0x44, /* tally counters for crc and mpa */ 180 TSR = 0x48, /* tally counter for transmit status */ 181 BMCRSR = 0x4c, /* basic mode control and status */ 182 PHYIDENTIFIER = 0x50, /* phy identifier */ 183 ANARANLPAR = 0x54, /* auto-negotiation advertisement and link 184 partner ability */ 185 ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */ 186 BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */ 187}; 188 189/* Bits in the interrupt status/enable registers. */ 190/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */ 191enum intr_status_bits { 192 RFCON = 0x00020000, /* receive flow control xon packet */ 193 RFCOFF = 0x00010000, /* receive flow control xoff packet */ 194 LSCStatus = 0x00008000, /* link status change */ 195 ANCStatus = 0x00004000, /* autonegotiation completed */ 196 FBE = 0x00002000, /* fatal bus error */ 197 FBEMask = 0x00001800, /* mask bit12-11 */ 198 ParityErr = 0x00000000, /* parity error */ 199 TargetErr = 0x00001000, /* target abort */ 200 MasterErr = 0x00000800, /* master error */ 201 TUNF = 0x00000400, /* transmit underflow */ 202 ROVF = 0x00000200, /* receive overflow */ 203 ETI = 0x00000100, /* transmit early int */ 204 ERI = 0x00000080, /* receive early int */ 205 CNTOVF = 0x00000040, /* counter overflow */ 206 RBU = 0x00000020, /* receive buffer unavailable */ 207 TBU = 0x00000010, /* transmit buffer unavilable */ 208 TI = 0x00000008, /* transmit interrupt */ 209 RI = 0x00000004, /* receive interrupt */ 210 RxErr = 0x00000002, /* receive error */ 211}; 212 213/* Bits in the NetworkConfig register, W for writing, R for reading */ 214/* FIXME: some names are invented by me. Marked with (name?) */ 215/* If you have docs and know bit names, please fix 'em */ 216enum rx_mode_bits { 217 CR_W_ENH = 0x02000000, /* enhanced mode (name?) */ 218 CR_W_FD = 0x00100000, /* full duplex */ 219 CR_W_PS10 = 0x00080000, /* 10 mbit */ 220 CR_W_TXEN = 0x00040000, /* tx enable (name?) */ 221 CR_W_PS1000 = 0x00010000, /* 1000 mbit */ 222 /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */ 223 CR_W_RXMODEMASK = 0x000000e0, 224 CR_W_PROM = 0x00000080, /* promiscuous mode */ 225 CR_W_AB = 0x00000040, /* accept broadcast */ 226 CR_W_AM = 0x00000020, /* accept mutlicast */ 227 CR_W_ARP = 0x00000008, /* receive runt pkt */ 228 CR_W_ALP = 0x00000004, /* receive long pkt */ 229 CR_W_SEP = 0x00000002, /* receive error pkt */ 230 CR_W_RXEN = 0x00000001, /* rx enable (unicast?) (name?) */ 231 232 CR_R_TXSTOP = 0x04000000, /* tx stopped (name?) */ 233 CR_R_FD = 0x00100000, /* full duplex detected */ 234 CR_R_PS10 = 0x00080000, /* 10 mbit detected */ 235 CR_R_RXSTOP = 0x00008000, /* rx stopped (name?) */ 236}; 237 238/* The Tulip Rx and Tx buffer descriptors. */ 239struct fealnx_desc { 240 s32 status; 241 s32 control; 242 u32 buffer; 243 u32 next_desc; 244 struct fealnx_desc *next_desc_logical; 245 struct sk_buff *skbuff; 246 u32 reserved1; 247 u32 reserved2; 248}; 249 250/* Bits in network_desc.status */ 251enum rx_desc_status_bits { 252 RXOWN = 0x80000000, /* own bit */ 253 FLNGMASK = 0x0fff0000, /* frame length */ 254 FLNGShift = 16, 255 MARSTATUS = 0x00004000, /* multicast address received */ 256 BARSTATUS = 0x00002000, /* broadcast address received */ 257 PHYSTATUS = 0x00001000, /* physical address received */ 258 RXFSD = 0x00000800, /* first descriptor */ 259 RXLSD = 0x00000400, /* last descriptor */ 260 ErrorSummary = 0x80, /* error summary */ 261 RUNT = 0x40, /* runt packet received */ 262 LONG = 0x20, /* long packet received */ 263 FAE = 0x10, /* frame align error */ 264 CRC = 0x08, /* crc error */ 265 RXER = 0x04, /* receive error */ 266}; 267 268enum rx_desc_control_bits { 269 RXIC = 0x00800000, /* interrupt control */ 270 RBSShift = 0, 271}; 272 273enum tx_desc_status_bits { 274 TXOWN = 0x80000000, /* own bit */ 275 JABTO = 0x00004000, /* jabber timeout */ 276 CSL = 0x00002000, /* carrier sense lost */ 277 LC = 0x00001000, /* late collision */ 278 EC = 0x00000800, /* excessive collision */ 279 UDF = 0x00000400, /* fifo underflow */ 280 DFR = 0x00000200, /* deferred */ 281 HF = 0x00000100, /* heartbeat fail */ 282 NCRMask = 0x000000ff, /* collision retry count */ 283 NCRShift = 0, 284}; 285 286enum tx_desc_control_bits { 287 TXIC = 0x80000000, /* interrupt control */ 288 ETIControl = 0x40000000, /* early transmit interrupt */ 289 TXLD = 0x20000000, /* last descriptor */ 290 TXFD = 0x10000000, /* first descriptor */ 291 CRCEnable = 0x08000000, /* crc control */ 292 PADEnable = 0x04000000, /* padding control */ 293 RetryTxLC = 0x02000000, /* retry late collision */ 294 PKTSMask = 0x3ff800, /* packet size bit21-11 */ 295 PKTSShift = 11, 296 TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */ 297 TBSShift = 0, 298}; 299 300/* BootROM/EEPROM/MII Management Register */ 301#define MASK_MIIR_MII_READ 0x00000000 302#define MASK_MIIR_MII_WRITE 0x00000008 303#define MASK_MIIR_MII_MDO 0x00000004 304#define MASK_MIIR_MII_MDI 0x00000002 305#define MASK_MIIR_MII_MDC 0x00000001 306 307/* ST+OP+PHYAD+REGAD+TA */ 308#define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */ 309#define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */ 310 311/* ------------------------------------------------------------------------- */ 312/* Constants for Myson PHY */ 313/* ------------------------------------------------------------------------- */ 314#define MysonPHYID 0xd0000302 315/* 89-7-27 add, (begin) */ 316#define MysonPHYID0 0x0302 317#define StatusRegister 18 318#define SPEED100 0x0400 // bit10 319#define FULLMODE 0x0800 // bit11 320/* 89-7-27 add, (end) */ 321 322/* ------------------------------------------------------------------------- */ 323/* Constants for Seeq 80225 PHY */ 324/* ------------------------------------------------------------------------- */ 325#define SeeqPHYID0 0x0016 326 327#define MIIRegister18 18 328#define SPD_DET_100 0x80 329#define DPLX_DET_FULL 0x40 330 331/* ------------------------------------------------------------------------- */ 332/* Constants for Ahdoc 101 PHY */ 333/* ------------------------------------------------------------------------- */ 334#define AhdocPHYID0 0x0022 335 336#define DiagnosticReg 18 337#define DPLX_FULL 0x0800 338#define Speed_100 0x0400 339 340/* 89/6/13 add, */ 341/* -------------------------------------------------------------------------- */ 342/* Constants */ 343/* -------------------------------------------------------------------------- */ 344#define MarvellPHYID0 0x0141 345#define LevelOnePHYID0 0x0013 346 347#define MII1000BaseTControlReg 9 348#define MII1000BaseTStatusReg 10 349#define SpecificReg 17 350 351/* for 1000BaseT Control Register */ 352#define PHYAbletoPerform1000FullDuplex 0x0200 353#define PHYAbletoPerform1000HalfDuplex 0x0100 354#define PHY1000AbilityMask 0x300 355 356// for phy specific status register, marvell phy. 357#define SpeedMask 0x0c000 358#define Speed_1000M 0x08000 359#define Speed_100M 0x4000 360#define Speed_10M 0 361#define Full_Duplex 0x2000 362 363// 89/12/29 add, for phy specific status register, levelone phy, (begin) 364#define LXT1000_100M 0x08000 365#define LXT1000_1000M 0x0c000 366#define LXT1000_Full 0x200 367// 89/12/29 add, for phy specific status register, levelone phy, (end) 368 369/* for 3-in-1 case, BMCRSR register */ 370#define LinkIsUp2 0x00040000 371 372/* for PHY */ 373#define LinkIsUp 0x0004 374 375 376struct netdev_private { 377 /* Descriptor rings first for alignment. */ 378 struct fealnx_desc *rx_ring; 379 struct fealnx_desc *tx_ring; 380 381 dma_addr_t rx_ring_dma; 382 dma_addr_t tx_ring_dma; 383 384 spinlock_t lock; 385 386 struct net_device_stats stats; 387 388 /* Media monitoring timer. */ 389 struct timer_list timer; 390 391 /* Reset timer */ 392 struct timer_list reset_timer; 393 int reset_timer_armed; 394 unsigned long crvalue_sv; 395 unsigned long imrvalue_sv; 396 397 /* Frequently used values: keep some adjacent for cache effect. */ 398 int flags; 399 struct pci_dev *pci_dev; 400 unsigned long crvalue; 401 unsigned long bcrvalue; 402 unsigned long imrvalue; 403 struct fealnx_desc *cur_rx; 404 struct fealnx_desc *lack_rxbuf; 405 int really_rx_count; 406 struct fealnx_desc *cur_tx; 407 struct fealnx_desc *cur_tx_copy; 408 int really_tx_count; 409 int free_tx_count; 410 unsigned int rx_buf_sz; /* Based on MTU+slack. */ 411 412 /* These values are keep track of the transceiver/media in use. */ 413 unsigned int linkok; 414 unsigned int line_speed; 415 unsigned int duplexmode; 416 unsigned int default_port:4; /* Last dev->if_port value. */ 417 unsigned int PHYType; 418 419 /* MII transceiver section. */ 420 int mii_cnt; /* MII device addresses. */ 421 unsigned char phys[2]; /* MII device addresses. */ 422 struct mii_if_info mii; 423 void __iomem *mem; 424}; 425 426 427static int mdio_read(struct net_device *dev, int phy_id, int location); 428static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 429static int netdev_open(struct net_device *dev); 430static void getlinktype(struct net_device *dev); 431static void getlinkstatus(struct net_device *dev); 432static void netdev_timer(unsigned long data); 433static void reset_timer(unsigned long data); 434static void tx_timeout(struct net_device *dev); 435static void init_ring(struct net_device *dev); 436static int start_tx(struct sk_buff *skb, struct net_device *dev); 437static irqreturn_t intr_handler(int irq, void *dev_instance); 438static int netdev_rx(struct net_device *dev); 439static void set_rx_mode(struct net_device *dev); 440static void __set_rx_mode(struct net_device *dev); 441static struct net_device_stats *get_stats(struct net_device *dev); 442static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 443static const struct ethtool_ops netdev_ethtool_ops; 444static int netdev_close(struct net_device *dev); 445static void reset_rx_descriptors(struct net_device *dev); 446static void reset_tx_descriptors(struct net_device *dev); 447 448static void stop_nic_rx(void __iomem *ioaddr, long crvalue) 449{ 450 int delay = 0x1000; 451 iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR); 452 while (--delay) { 453 if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP) 454 break; 455 } 456} 457 458 459static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue) 460{ 461 int delay = 0x1000; 462 iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR); 463 while (--delay) { 464 if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP)) 465 == (CR_R_RXSTOP+CR_R_TXSTOP) ) 466 break; 467 } 468} 469 470 471static int __devinit fealnx_init_one(struct pci_dev *pdev, 472 const struct pci_device_id *ent) 473{ 474 struct netdev_private *np; 475 int i, option, err, irq; 476 static int card_idx = -1; 477 char boardname[12]; 478 void __iomem *ioaddr; 479 unsigned long len; 480 unsigned int chip_id = ent->driver_data; 481 struct net_device *dev; 482 void *ring_space; 483 dma_addr_t ring_dma; 484#ifdef USE_IO_OPS 485 int bar = 0; 486#else 487 int bar = 1; 488#endif 489 490/* when built into the kernel, we only print version if device is found */ 491#ifndef MODULE 492 static int printed_version; 493 if (!printed_version++) 494 printk(version); 495#endif 496 497 card_idx++; 498 sprintf(boardname, "fealnx%d", card_idx); 499 500 option = card_idx < MAX_UNITS ? options[card_idx] : 0; 501 502 i = pci_enable_device(pdev); 503 if (i) return i; 504 pci_set_master(pdev); 505 506 len = pci_resource_len(pdev, bar); 507 if (len < MIN_REGION_SIZE) { 508 dev_err(&pdev->dev, 509 "region size %ld too small, aborting\n", len); 510 return -ENODEV; 511 } 512 513 i = pci_request_regions(pdev, boardname); 514 if (i) 515 return i; 516 517 irq = pdev->irq; 518 519 ioaddr = pci_iomap(pdev, bar, len); 520 if (!ioaddr) { 521 err = -ENOMEM; 522 goto err_out_res; 523 } 524 525 dev = alloc_etherdev(sizeof(struct netdev_private)); 526 if (!dev) { 527 err = -ENOMEM; 528 goto err_out_unmap; 529 } 530 SET_MODULE_OWNER(dev); 531 SET_NETDEV_DEV(dev, &pdev->dev); 532 533 /* read ethernet id */ 534 for (i = 0; i < 6; ++i) 535 dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i); 536 537 /* Reset the chip to erase previous misconfiguration. */ 538 iowrite32(0x00000001, ioaddr + BCR); 539 540 dev->base_addr = (unsigned long)ioaddr; 541 dev->irq = irq; 542 543 /* Make certain the descriptor lists are aligned. */ 544 np = netdev_priv(dev); 545 np->mem = ioaddr; 546 spin_lock_init(&np->lock); 547 np->pci_dev = pdev; 548 np->flags = skel_netdrv_tbl[chip_id].flags; 549 pci_set_drvdata(pdev, dev); 550 np->mii.dev = dev; 551 np->mii.mdio_read = mdio_read; 552 np->mii.mdio_write = mdio_write; 553 np->mii.phy_id_mask = 0x1f; 554 np->mii.reg_num_mask = 0x1f; 555 556 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); 557 if (!ring_space) { 558 err = -ENOMEM; 559 goto err_out_free_dev; 560 } 561 np->rx_ring = (struct fealnx_desc *)ring_space; 562 np->rx_ring_dma = ring_dma; 563 564 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); 565 if (!ring_space) { 566 err = -ENOMEM; 567 goto err_out_free_rx; 568 } 569 np->tx_ring = (struct fealnx_desc *)ring_space; 570 np->tx_ring_dma = ring_dma; 571 572 /* find the connected MII xcvrs */ 573 if (np->flags == HAS_MII_XCVR) { 574 int phy, phy_idx = 0; 575 576 for (phy = 1; phy < 32 && phy_idx < 4; phy++) { 577 int mii_status = mdio_read(dev, phy, 1); 578 579 if (mii_status != 0xffff && mii_status != 0x0000) { 580 np->phys[phy_idx++] = phy; 581 dev_info(&pdev->dev, 582 "MII PHY found at address %d, status " 583 "0x%4.4x.\n", phy, mii_status); 584 /* get phy type */ 585 { 586 unsigned int data; 587 588 data = mdio_read(dev, np->phys[0], 2); 589 if (data == SeeqPHYID0) 590 np->PHYType = SeeqPHY; 591 else if (data == AhdocPHYID0) 592 np->PHYType = AhdocPHY; 593 else if (data == MarvellPHYID0) 594 np->PHYType = MarvellPHY; 595 else if (data == MysonPHYID0) 596 np->PHYType = Myson981; 597 else if (data == LevelOnePHYID0) 598 np->PHYType = LevelOnePHY; 599 else 600 np->PHYType = OtherPHY; 601 } 602 } 603 } 604 605 np->mii_cnt = phy_idx; 606 if (phy_idx == 0) 607 dev_warn(&pdev->dev, 608 "MII PHY not found -- this device may " 609 "not operate correctly.\n"); 610 } else { 611 np->phys[0] = 32; 612/* 89/6/23 add, (begin) */ 613 /* get phy type */ 614 if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID) 615 np->PHYType = MysonPHY; 616 else 617 np->PHYType = OtherPHY; 618 } 619 np->mii.phy_id = np->phys[0]; 620 621 if (dev->mem_start) 622 option = dev->mem_start; 623 624 /* The lower four bits are the media type. */ 625 if (option > 0) { 626 if (option & 0x200) 627 np->mii.full_duplex = 1; 628 np->default_port = option & 15; 629 } 630 631 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0) 632 np->mii.full_duplex = full_duplex[card_idx]; 633 634 if (np->mii.full_duplex) { 635 dev_info(&pdev->dev, "Media type forced to Full Duplex.\n"); 636/* 89/6/13 add, (begin) */ 637// if (np->PHYType==MarvellPHY) 638 if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) { 639 unsigned int data; 640 641 data = mdio_read(dev, np->phys[0], 9); 642 data = (data & 0xfcff) | 0x0200; 643 mdio_write(dev, np->phys[0], 9, data); 644 } 645/* 89/6/13 add, (end) */ 646 if (np->flags == HAS_MII_XCVR) 647 mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL); 648 else 649 iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR); 650 np->mii.force_media = 1; 651 } 652 653 /* The chip-specific entries in the device structure. */ 654 dev->open = &netdev_open; 655 dev->hard_start_xmit = &start_tx; 656 dev->stop = &netdev_close; 657 dev->get_stats = &get_stats; 658 dev->set_multicast_list = &set_rx_mode; 659 dev->do_ioctl = &mii_ioctl; 660 dev->ethtool_ops = &netdev_ethtool_ops; 661 dev->tx_timeout = &tx_timeout; 662 dev->watchdog_timeo = TX_TIMEOUT; 663 664 err = register_netdev(dev); 665 if (err) 666 goto err_out_free_tx; 667 668 printk(KERN_INFO "%s: %s at %p, ", 669 dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr); 670 for (i = 0; i < 5; i++) 671 printk("%2.2x:", dev->dev_addr[i]); 672 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); 673 674 return 0; 675 676err_out_free_tx: 677 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 678err_out_free_rx: 679 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 680err_out_free_dev: 681 free_netdev(dev); 682err_out_unmap: 683 pci_iounmap(pdev, ioaddr); 684err_out_res: 685 pci_release_regions(pdev); 686 return err; 687} 688 689 690static void __devexit fealnx_remove_one(struct pci_dev *pdev) 691{ 692 struct net_device *dev = pci_get_drvdata(pdev); 693 694 if (dev) { 695 struct netdev_private *np = netdev_priv(dev); 696 697 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, 698 np->tx_ring_dma); 699 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, 700 np->rx_ring_dma); 701 unregister_netdev(dev); 702 pci_iounmap(pdev, np->mem); 703 free_netdev(dev); 704 pci_release_regions(pdev); 705 pci_set_drvdata(pdev, NULL); 706 } else 707 printk(KERN_ERR "fealnx: remove for unknown device\n"); 708} 709 710 711static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad) 712{ 713 ulong miir; 714 int i; 715 unsigned int mask, data; 716 717 /* enable MII output */ 718 miir = (ulong) ioread32(miiport); 719 miir &= 0xfffffff0; 720 721 miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO; 722 723 /* send 32 1's preamble */ 724 for (i = 0; i < 32; i++) { 725 /* low MDC; MDO is already high (miir) */ 726 miir &= ~MASK_MIIR_MII_MDC; 727 iowrite32(miir, miiport); 728 729 /* high MDC */ 730 miir |= MASK_MIIR_MII_MDC; 731 iowrite32(miir, miiport); 732 } 733 734 /* calculate ST+OP+PHYAD+REGAD+TA */ 735 data = opcode | (phyad << 7) | (regad << 2); 736 737 /* sent out */ 738 mask = 0x8000; 739 while (mask) { 740 /* low MDC, prepare MDO */ 741 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO); 742 if (mask & data) 743 miir |= MASK_MIIR_MII_MDO; 744 745 iowrite32(miir, miiport); 746 /* high MDC */ 747 miir |= MASK_MIIR_MII_MDC; 748 iowrite32(miir, miiport); 749 udelay(30); 750 751 /* next */ 752 mask >>= 1; 753 if (mask == 0x2 && opcode == OP_READ) 754 miir &= ~MASK_MIIR_MII_WRITE; 755 } 756 return miir; 757} 758 759 760static int mdio_read(struct net_device *dev, int phyad, int regad) 761{ 762 struct netdev_private *np = netdev_priv(dev); 763 void __iomem *miiport = np->mem + MANAGEMENT; 764 ulong miir; 765 unsigned int mask, data; 766 767 miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad); 768 769 /* read data */ 770 mask = 0x8000; 771 data = 0; 772 while (mask) { 773 /* low MDC */ 774 miir &= ~MASK_MIIR_MII_MDC; 775 iowrite32(miir, miiport); 776 777 /* read MDI */ 778 miir = ioread32(miiport); 779 if (miir & MASK_MIIR_MII_MDI) 780 data |= mask; 781 782 /* high MDC, and wait */ 783 miir |= MASK_MIIR_MII_MDC; 784 iowrite32(miir, miiport); 785 udelay(30); 786 787 /* next */ 788 mask >>= 1; 789 } 790 791 /* low MDC */ 792 miir &= ~MASK_MIIR_MII_MDC; 793 iowrite32(miir, miiport); 794 795 return data & 0xffff; 796} 797 798 799static void mdio_write(struct net_device *dev, int phyad, int regad, int data) 800{ 801 struct netdev_private *np = netdev_priv(dev); 802 void __iomem *miiport = np->mem + MANAGEMENT; 803 ulong miir; 804 unsigned int mask; 805 806 miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad); 807 808 /* write data */ 809 mask = 0x8000; 810 while (mask) { 811 /* low MDC, prepare MDO */ 812 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO); 813 if (mask & data) 814 miir |= MASK_MIIR_MII_MDO; 815 iowrite32(miir, miiport); 816 817 /* high MDC */ 818 miir |= MASK_MIIR_MII_MDC; 819 iowrite32(miir, miiport); 820 821 /* next */ 822 mask >>= 1; 823 } 824 825 /* low MDC */ 826 miir &= ~MASK_MIIR_MII_MDC; 827 iowrite32(miir, miiport); 828} 829 830 831static int netdev_open(struct net_device *dev) 832{ 833 struct netdev_private *np = netdev_priv(dev); 834 void __iomem *ioaddr = np->mem; 835 int i; 836 837 iowrite32(0x00000001, ioaddr + BCR); /* Reset */ 838 839 if (request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev)) 840 return -EAGAIN; 841 842 for (i = 0; i < 3; i++) 843 iowrite16(((unsigned short*)dev->dev_addr)[i], 844 ioaddr + PAR0 + i*2); 845 846 init_ring(dev); 847 848 iowrite32(np->rx_ring_dma, ioaddr + RXLBA); 849 iowrite32(np->tx_ring_dma, ioaddr + TXLBA); 850 851 /* Initialize other registers. */ 852 /* Configure the PCI bus bursts and FIFO thresholds. 853 486: Set 8 longword burst. 854 586: no burst limit. 855 Burst length 5:3 856 0 0 0 1 857 0 0 1 4 858 0 1 0 8 859 0 1 1 16 860 1 0 0 32 861 1 0 1 64 862 1 1 0 128 863 1 1 1 256 864 Wait the specified 50 PCI cycles after a reset by initializing 865 Tx and Rx queues and the address filter list. 866 FIXME (Ueimor): optimistic for alpha + posted writes ? */ 867#if defined(__powerpc__) || defined(__sparc__) 868// 89/9/1 modify, 869// np->bcrvalue=0x04 | 0x0x38; /* big-endian, 256 burst length */ 870 np->bcrvalue = 0x04 | 0x10; /* big-endian, tx 8 burst length */ 871 np->crvalue = 0xe00; /* rx 128 burst length */ 872#elif defined(__alpha__) || defined(__x86_64__) 873// 89/9/1 modify, 874// np->bcrvalue=0x38; /* little-endian, 256 burst length */ 875 np->bcrvalue = 0x10; /* little-endian, 8 burst length */ 876 np->crvalue = 0xe00; /* rx 128 burst length */ 877#elif defined(__i386__) 878#if defined(MODULE) 879// 89/9/1 modify, 880// np->bcrvalue=0x38; /* little-endian, 256 burst length */ 881 np->bcrvalue = 0x10; /* little-endian, 8 burst length */ 882 np->crvalue = 0xe00; /* rx 128 burst length */ 883#else 884 /* When not a module we can work around broken '486 PCI boards. */ 885#define x86 boot_cpu_data.x86 886// 89/9/1 modify, 887// np->bcrvalue=(x86 <= 4 ? 0x10 : 0x38); 888 np->bcrvalue = 0x10; 889 np->crvalue = (x86 <= 4 ? 0xa00 : 0xe00); 890 if (x86 <= 4) 891 printk(KERN_INFO "%s: This is a 386/486 PCI system, setting burst " 892 "length to %x.\n", dev->name, (x86 <= 4 ? 0x10 : 0x38)); 893#endif 894#else 895// 89/9/1 modify, 896// np->bcrvalue=0x38; 897 np->bcrvalue = 0x10; 898 np->crvalue = 0xe00; /* rx 128 burst length */ 899#warning Processor architecture undefined! 900#endif 901// 89/12/29 add, 902// 90/1/16 modify, 903// np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI; 904 np->imrvalue = TUNF | CNTOVF | RBU | TI | RI; 905 if (np->pci_dev->device == 0x891) { 906 np->bcrvalue |= 0x200; /* set PROG bit */ 907 np->crvalue |= CR_W_ENH; /* set enhanced bit */ 908 np->imrvalue |= ETI; 909 } 910 iowrite32(np->bcrvalue, ioaddr + BCR); 911 912 if (dev->if_port == 0) 913 dev->if_port = np->default_port; 914 915 iowrite32(0, ioaddr + RXPDR); 916// 89/9/1 modify, 917// np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */ 918 np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */ 919 np->mii.full_duplex = np->mii.force_media; 920 getlinkstatus(dev); 921 if (np->linkok) 922 getlinktype(dev); 923 __set_rx_mode(dev); 924 925 netif_start_queue(dev); 926 927 /* Clear and Enable interrupts by setting the interrupt mask. */ 928 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); 929 iowrite32(np->imrvalue, ioaddr + IMR); 930 931 if (debug) 932 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); 933 934 /* Set the timer to check for link beat. */ 935 init_timer(&np->timer); 936 np->timer.expires = RUN_AT(3 * HZ); 937 np->timer.data = (unsigned long) dev; 938 np->timer.function = &netdev_timer; 939 940 /* timer handler */ 941 add_timer(&np->timer); 942 943 init_timer(&np->reset_timer); 944 np->reset_timer.data = (unsigned long) dev; 945 np->reset_timer.function = &reset_timer; 946 np->reset_timer_armed = 0; 947 948 return 0; 949} 950 951 952static void getlinkstatus(struct net_device *dev) 953/* function: Routine will read MII Status Register to get link status. */ 954/* input : dev... pointer to the adapter block. */ 955/* output : none. */ 956{ 957 struct netdev_private *np = netdev_priv(dev); 958 unsigned int i, DelayTime = 0x1000; 959 960 np->linkok = 0; 961 962 if (np->PHYType == MysonPHY) { 963 for (i = 0; i < DelayTime; ++i) { 964 if (ioread32(np->mem + BMCRSR) & LinkIsUp2) { 965 np->linkok = 1; 966 return; 967 } 968 udelay(100); 969 } 970 } else { 971 for (i = 0; i < DelayTime; ++i) { 972 if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) { 973 np->linkok = 1; 974 return; 975 } 976 udelay(100); 977 } 978 } 979} 980 981 982static void getlinktype(struct net_device *dev) 983{ 984 struct netdev_private *np = netdev_priv(dev); 985 986 if (np->PHYType == MysonPHY) { /* 3-in-1 case */ 987 if (ioread32(np->mem + TCRRCR) & CR_R_FD) 988 np->duplexmode = 2; /* full duplex */ 989 else 990 np->duplexmode = 1; /* half duplex */ 991 if (ioread32(np->mem + TCRRCR) & CR_R_PS10) 992 np->line_speed = 1; /* 10M */ 993 else 994 np->line_speed = 2; /* 100M */ 995 } else { 996 if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */ 997 unsigned int data; 998 999 data = mdio_read(dev, np->phys[0], MIIRegister18); 1000 if (data & SPD_DET_100) 1001 np->line_speed = 2; /* 100M */ 1002 else 1003 np->line_speed = 1; /* 10M */ 1004 if (data & DPLX_DET_FULL) 1005 np->duplexmode = 2; /* full duplex mode */ 1006 else 1007 np->duplexmode = 1; /* half duplex mode */ 1008 } else if (np->PHYType == AhdocPHY) { 1009 unsigned int data; 1010 1011 data = mdio_read(dev, np->phys[0], DiagnosticReg); 1012 if (data & Speed_100) 1013 np->line_speed = 2; /* 100M */ 1014 else 1015 np->line_speed = 1; /* 10M */ 1016 if (data & DPLX_FULL) 1017 np->duplexmode = 2; /* full duplex mode */ 1018 else 1019 np->duplexmode = 1; /* half duplex mode */ 1020 } 1021/* 89/6/13 add, (begin) */ 1022 else if (np->PHYType == MarvellPHY) { 1023 unsigned int data; 1024 1025 data = mdio_read(dev, np->phys[0], SpecificReg); 1026 if (data & Full_Duplex) 1027 np->duplexmode = 2; /* full duplex mode */ 1028 else 1029 np->duplexmode = 1; /* half duplex mode */ 1030 data &= SpeedMask; 1031 if (data == Speed_1000M) 1032 np->line_speed = 3; /* 1000M */ 1033 else if (data == Speed_100M) 1034 np->line_speed = 2; /* 100M */ 1035 else 1036 np->line_speed = 1; /* 10M */ 1037 } 1038/* 89/6/13 add, (end) */ 1039/* 89/7/27 add, (begin) */ 1040 else if (np->PHYType == Myson981) { 1041 unsigned int data; 1042 1043 data = mdio_read(dev, np->phys[0], StatusRegister); 1044 1045 if (data & SPEED100) 1046 np->line_speed = 2; 1047 else 1048 np->line_speed = 1; 1049 1050 if (data & FULLMODE) 1051 np->duplexmode = 2; 1052 else 1053 np->duplexmode = 1; 1054 } 1055/* 89/7/27 add, (end) */ 1056/* 89/12/29 add */ 1057 else if (np->PHYType == LevelOnePHY) { 1058 unsigned int data; 1059 1060 data = mdio_read(dev, np->phys[0], SpecificReg); 1061 if (data & LXT1000_Full) 1062 np->duplexmode = 2; /* full duplex mode */ 1063 else 1064 np->duplexmode = 1; /* half duplex mode */ 1065 data &= SpeedMask; 1066 if (data == LXT1000_1000M) 1067 np->line_speed = 3; /* 1000M */ 1068 else if (data == LXT1000_100M) 1069 np->line_speed = 2; /* 100M */ 1070 else 1071 np->line_speed = 1; /* 10M */ 1072 } 1073 np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000); 1074 if (np->line_speed == 1) 1075 np->crvalue |= CR_W_PS10; 1076 else if (np->line_speed == 3) 1077 np->crvalue |= CR_W_PS1000; 1078 if (np->duplexmode == 2) 1079 np->crvalue |= CR_W_FD; 1080 } 1081} 1082 1083 1084/* Take lock before calling this */ 1085static void allocate_rx_buffers(struct net_device *dev) 1086{ 1087 struct netdev_private *np = netdev_priv(dev); 1088 1089 /* allocate skb for rx buffers */ 1090 while (np->really_rx_count != RX_RING_SIZE) { 1091 struct sk_buff *skb; 1092 1093 skb = dev_alloc_skb(np->rx_buf_sz); 1094 if (skb == NULL) 1095 break; /* Better luck next round. */ 1096 1097 while (np->lack_rxbuf->skbuff) 1098 np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; 1099 1100 skb->dev = dev; /* Mark as being used by this device. */ 1101 np->lack_rxbuf->skbuff = skb; 1102 np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data, 1103 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1104 np->lack_rxbuf->status = RXOWN; 1105 ++np->really_rx_count; 1106 } 1107} 1108 1109 1110static void netdev_timer(unsigned long data) 1111{ 1112 struct net_device *dev = (struct net_device *) data; 1113 struct netdev_private *np = netdev_priv(dev); 1114 void __iomem *ioaddr = np->mem; 1115 int old_crvalue = np->crvalue; 1116 unsigned int old_linkok = np->linkok; 1117 unsigned long flags; 1118 1119 if (debug) 1120 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x " 1121 "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR), 1122 ioread32(ioaddr + TCRRCR)); 1123 1124 spin_lock_irqsave(&np->lock, flags); 1125 1126 if (np->flags == HAS_MII_XCVR) { 1127 getlinkstatus(dev); 1128 if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */ 1129 getlinktype(dev); 1130 if (np->crvalue != old_crvalue) { 1131 stop_nic_rxtx(ioaddr, np->crvalue); 1132 iowrite32(np->crvalue, ioaddr + TCRRCR); 1133 } 1134 } 1135 } 1136 1137 allocate_rx_buffers(dev); 1138 1139 spin_unlock_irqrestore(&np->lock, flags); 1140 1141 np->timer.expires = RUN_AT(10 * HZ); 1142 add_timer(&np->timer); 1143} 1144 1145 1146/* Take lock before calling */ 1147/* Reset chip and disable rx, tx and interrupts */ 1148static void reset_and_disable_rxtx(struct net_device *dev) 1149{ 1150 struct netdev_private *np = netdev_priv(dev); 1151 void __iomem *ioaddr = np->mem; 1152 int delay=51; 1153 1154 /* Reset the chip's Tx and Rx processes. */ 1155 stop_nic_rxtx(ioaddr, 0); 1156 1157 /* Disable interrupts by clearing the interrupt mask. */ 1158 iowrite32(0, ioaddr + IMR); 1159 1160 /* Reset the chip to erase previous misconfiguration. */ 1161 iowrite32(0x00000001, ioaddr + BCR); 1162 1163 /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw). 1164 We surely wait too long (address+data phase). Who cares? */ 1165 while (--delay) { 1166 ioread32(ioaddr + BCR); 1167 rmb(); 1168 } 1169} 1170 1171 1172/* Take lock before calling */ 1173/* Restore chip after reset */ 1174static void enable_rxtx(struct net_device *dev) 1175{ 1176 struct netdev_private *np = netdev_priv(dev); 1177 void __iomem *ioaddr = np->mem; 1178 1179 reset_rx_descriptors(dev); 1180 1181 iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring), 1182 ioaddr + TXLBA); 1183 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), 1184 ioaddr + RXLBA); 1185 1186 iowrite32(np->bcrvalue, ioaddr + BCR); 1187 1188 iowrite32(0, ioaddr + RXPDR); 1189 __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */ 1190 1191 /* Clear and Enable interrupts by setting the interrupt mask. */ 1192 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); 1193 iowrite32(np->imrvalue, ioaddr + IMR); 1194 1195 iowrite32(0, ioaddr + TXPDR); 1196} 1197 1198 1199static void reset_timer(unsigned long data) 1200{ 1201 struct net_device *dev = (struct net_device *) data; 1202 struct netdev_private *np = netdev_priv(dev); 1203 unsigned long flags; 1204 1205 printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name); 1206 1207 spin_lock_irqsave(&np->lock, flags); 1208 np->crvalue = np->crvalue_sv; 1209 np->imrvalue = np->imrvalue_sv; 1210 1211 reset_and_disable_rxtx(dev); 1212 /* works for me without this: 1213 reset_tx_descriptors(dev); */ 1214 enable_rxtx(dev); 1215 netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */ 1216 1217 np->reset_timer_armed = 0; 1218 1219 spin_unlock_irqrestore(&np->lock, flags); 1220} 1221 1222 1223static void tx_timeout(struct net_device *dev) 1224{ 1225 struct netdev_private *np = netdev_priv(dev); 1226 void __iomem *ioaddr = np->mem; 1227 unsigned long flags; 1228 int i; 1229 1230 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x," 1231 " resetting...\n", dev->name, ioread32(ioaddr + ISR)); 1232 1233 { 1234 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); 1235 for (i = 0; i < RX_RING_SIZE; i++) 1236 printk(" %8.8x", (unsigned int) np->rx_ring[i].status); 1237 printk("\n" KERN_DEBUG " Tx ring %p: ", np->tx_ring); 1238 for (i = 0; i < TX_RING_SIZE; i++) 1239 printk(" %4.4x", np->tx_ring[i].status); 1240 printk("\n"); 1241 } 1242 1243 spin_lock_irqsave(&np->lock, flags); 1244 1245 reset_and_disable_rxtx(dev); 1246 reset_tx_descriptors(dev); 1247 enable_rxtx(dev); 1248 1249 spin_unlock_irqrestore(&np->lock, flags); 1250 1251 dev->trans_start = jiffies; 1252 np->stats.tx_errors++; 1253 netif_wake_queue(dev); /* or .._start_.. ?? */ 1254} 1255 1256 1257/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 1258static void init_ring(struct net_device *dev) 1259{ 1260 struct netdev_private *np = netdev_priv(dev); 1261 int i; 1262 1263 /* initialize rx variables */ 1264 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); 1265 np->cur_rx = &np->rx_ring[0]; 1266 np->lack_rxbuf = np->rx_ring; 1267 np->really_rx_count = 0; 1268 1269 /* initial rx descriptors. */ 1270 for (i = 0; i < RX_RING_SIZE; i++) { 1271 np->rx_ring[i].status = 0; 1272 np->rx_ring[i].control = np->rx_buf_sz << RBSShift; 1273 np->rx_ring[i].next_desc = np->rx_ring_dma + 1274 (i + 1)*sizeof(struct fealnx_desc); 1275 np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1]; 1276 np->rx_ring[i].skbuff = NULL; 1277 } 1278 1279 /* for the last rx descriptor */ 1280 np->rx_ring[i - 1].next_desc = np->rx_ring_dma; 1281 np->rx_ring[i - 1].next_desc_logical = np->rx_ring; 1282 1283 /* allocate skb for rx buffers */ 1284 for (i = 0; i < RX_RING_SIZE; i++) { 1285 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); 1286 1287 if (skb == NULL) { 1288 np->lack_rxbuf = &np->rx_ring[i]; 1289 break; 1290 } 1291 1292 ++np->really_rx_count; 1293 np->rx_ring[i].skbuff = skb; 1294 skb->dev = dev; /* Mark as being used by this device. */ 1295 np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data, 1296 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1297 np->rx_ring[i].status = RXOWN; 1298 np->rx_ring[i].control |= RXIC; 1299 } 1300 1301 /* initialize tx variables */ 1302 np->cur_tx = &np->tx_ring[0]; 1303 np->cur_tx_copy = &np->tx_ring[0]; 1304 np->really_tx_count = 0; 1305 np->free_tx_count = TX_RING_SIZE; 1306 1307 for (i = 0; i < TX_RING_SIZE; i++) { 1308 np->tx_ring[i].status = 0; 1309 /* do we need np->tx_ring[i].control = XXX; ?? */ 1310 np->tx_ring[i].next_desc = np->tx_ring_dma + 1311 (i + 1)*sizeof(struct fealnx_desc); 1312 np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1]; 1313 np->tx_ring[i].skbuff = NULL; 1314 } 1315 1316 /* for the last tx descriptor */ 1317 np->tx_ring[i - 1].next_desc = np->tx_ring_dma; 1318 np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0]; 1319} 1320 1321 1322static int start_tx(struct sk_buff *skb, struct net_device *dev) 1323{ 1324 struct netdev_private *np = netdev_priv(dev); 1325 unsigned long flags; 1326 1327 spin_lock_irqsave(&np->lock, flags); 1328 1329 np->cur_tx_copy->skbuff = skb; 1330 1331#define one_buffer 1332#define BPT 1022 1333#if defined(one_buffer) 1334 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1335 skb->len, PCI_DMA_TODEVICE); 1336 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; 1337 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1338 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ 1339// 89/12/29 add, 1340 if (np->pci_dev->device == 0x891) 1341 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1342 np->cur_tx_copy->status = TXOWN; 1343 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; 1344 --np->free_tx_count; 1345#elif defined(two_buffer) 1346 if (skb->len > BPT) { 1347 struct fealnx_desc *next; 1348 1349 /* for the first descriptor */ 1350 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1351 BPT, PCI_DMA_TODEVICE); 1352 np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable; 1353 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1354 np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */ 1355 1356 /* for the last descriptor */ 1357 next = np->cur_tx_copy->next_desc_logical; 1358 next->skbuff = skb; 1359 next->control = TXIC | TXLD | CRCEnable | PADEnable; 1360 next->control |= (skb->len << PKTSShift); /* pkt size */ 1361 next->control |= ((skb->len - BPT) << TBSShift); /* buf size */ 1362// 89/12/29 add, 1363 if (np->pci_dev->device == 0x891) 1364 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1365 next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT, 1366 skb->len - BPT, PCI_DMA_TODEVICE); 1367 1368 next->status = TXOWN; 1369 np->cur_tx_copy->status = TXOWN; 1370 1371 np->cur_tx_copy = next->next_desc_logical; 1372 np->free_tx_count -= 2; 1373 } else { 1374 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1375 skb->len, PCI_DMA_TODEVICE); 1376 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; 1377 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1378 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ 1379// 89/12/29 add, 1380 if (np->pci_dev->device == 0x891) 1381 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1382 np->cur_tx_copy->status = TXOWN; 1383 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; 1384 --np->free_tx_count; 1385 } 1386#endif 1387 1388 if (np->free_tx_count < 2) 1389 netif_stop_queue(dev); 1390 ++np->really_tx_count; 1391 iowrite32(0, np->mem + TXPDR); 1392 dev->trans_start = jiffies; 1393 1394 spin_unlock_irqrestore(&np->lock, flags); 1395 return 0; 1396} 1397 1398 1399/* Take lock before calling */ 1400/* Chip probably hosed tx ring. Clean up. */ 1401static void reset_tx_descriptors(struct net_device *dev) 1402{ 1403 struct netdev_private *np = netdev_priv(dev); 1404 struct fealnx_desc *cur; 1405 int i; 1406 1407 /* initialize tx variables */ 1408 np->cur_tx = &np->tx_ring[0]; 1409 np->cur_tx_copy = &np->tx_ring[0]; 1410 np->really_tx_count = 0; 1411 np->free_tx_count = TX_RING_SIZE; 1412 1413 for (i = 0; i < TX_RING_SIZE; i++) { 1414 cur = &np->tx_ring[i]; 1415 if (cur->skbuff) { 1416 pci_unmap_single(np->pci_dev, cur->buffer, 1417 cur->skbuff->len, PCI_DMA_TODEVICE); 1418 dev_kfree_skb_any(cur->skbuff); 1419 cur->skbuff = NULL; 1420 } 1421 cur->status = 0; 1422 cur->control = 0; /* needed? */ 1423 /* probably not needed. We do it for purely paranoid reasons */ 1424 cur->next_desc = np->tx_ring_dma + 1425 (i + 1)*sizeof(struct fealnx_desc); 1426 cur->next_desc_logical = &np->tx_ring[i + 1]; 1427 } 1428 /* for the last tx descriptor */ 1429 np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma; 1430 np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0]; 1431} 1432 1433 1434/* Take lock and stop rx before calling this */ 1435static void reset_rx_descriptors(struct net_device *dev) 1436{ 1437 struct netdev_private *np = netdev_priv(dev); 1438 struct fealnx_desc *cur = np->cur_rx; 1439 int i; 1440 1441 allocate_rx_buffers(dev); 1442 1443 for (i = 0; i < RX_RING_SIZE; i++) { 1444 if (cur->skbuff) 1445 cur->status = RXOWN; 1446 cur = cur->next_desc_logical; 1447 } 1448 1449 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), 1450 np->mem + RXLBA); 1451} 1452 1453 1454/* The interrupt handler does all of the Rx thread work and cleans up 1455 after the Tx thread. */ 1456static irqreturn_t intr_handler(int irq, void *dev_instance) 1457{ 1458 struct net_device *dev = (struct net_device *) dev_instance; 1459 struct netdev_private *np = netdev_priv(dev); 1460 void __iomem *ioaddr = np->mem; 1461 long boguscnt = max_interrupt_work; 1462 unsigned int num_tx = 0; 1463 int handled = 0; 1464 1465 spin_lock(&np->lock); 1466 1467 iowrite32(0, ioaddr + IMR); 1468 1469 do { 1470 u32 intr_status = ioread32(ioaddr + ISR); 1471 1472 /* Acknowledge all of the current interrupt sources ASAP. */ 1473 iowrite32(intr_status, ioaddr + ISR); 1474 1475 if (debug) 1476 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, 1477 intr_status); 1478 1479 if (!(intr_status & np->imrvalue)) 1480 break; 1481 1482 handled = 1; 1483 1484// 90/1/16 delete, 1485// 1486// if (intr_status & FBE) 1487// { /* fatal error */ 1488// stop_nic_tx(ioaddr, 0); 1489// stop_nic_rx(ioaddr, 0); 1490// break; 1491// }; 1492 1493 if (intr_status & TUNF) 1494 iowrite32(0, ioaddr + TXPDR); 1495 1496 if (intr_status & CNTOVF) { 1497 /* missed pkts */ 1498 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1499 1500 /* crc error */ 1501 np->stats.rx_crc_errors += 1502 (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1503 } 1504 1505 if (intr_status & (RI | RBU)) { 1506 if (intr_status & RI) 1507 netdev_rx(dev); 1508 else { 1509 stop_nic_rx(ioaddr, np->crvalue); 1510 reset_rx_descriptors(dev); 1511 iowrite32(np->crvalue, ioaddr + TCRRCR); 1512 } 1513 } 1514 1515 while (np->really_tx_count) { 1516 long tx_status = np->cur_tx->status; 1517 long tx_control = np->cur_tx->control; 1518 1519 if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */ 1520 struct fealnx_desc *next; 1521 1522 next = np->cur_tx->next_desc_logical; 1523 tx_status = next->status; 1524 tx_control = next->control; 1525 } 1526 1527 if (tx_status & TXOWN) 1528 break; 1529 1530 if (!(np->crvalue & CR_W_ENH)) { 1531 if (tx_status & (CSL | LC | EC | UDF | HF)) { 1532 np->stats.tx_errors++; 1533 if (tx_status & EC) 1534 np->stats.tx_aborted_errors++; 1535 if (tx_status & CSL) 1536 np->stats.tx_carrier_errors++; 1537 if (tx_status & LC) 1538 np->stats.tx_window_errors++; 1539 if (tx_status & UDF) 1540 np->stats.tx_fifo_errors++; 1541 if ((tx_status & HF) && np->mii.full_duplex == 0) 1542 np->stats.tx_heartbeat_errors++; 1543 1544 } else { 1545 np->stats.tx_bytes += 1546 ((tx_control & PKTSMask) >> PKTSShift); 1547 1548 np->stats.collisions += 1549 ((tx_status & NCRMask) >> NCRShift); 1550 np->stats.tx_packets++; 1551 } 1552 } else { 1553 np->stats.tx_bytes += 1554 ((tx_control & PKTSMask) >> PKTSShift); 1555 np->stats.tx_packets++; 1556 } 1557 1558 /* Free the original skb. */ 1559 pci_unmap_single(np->pci_dev, np->cur_tx->buffer, 1560 np->cur_tx->skbuff->len, PCI_DMA_TODEVICE); 1561 dev_kfree_skb_irq(np->cur_tx->skbuff); 1562 np->cur_tx->skbuff = NULL; 1563 --np->really_tx_count; 1564 if (np->cur_tx->control & TXLD) { 1565 np->cur_tx = np->cur_tx->next_desc_logical; 1566 ++np->free_tx_count; 1567 } else { 1568 np->cur_tx = np->cur_tx->next_desc_logical; 1569 np->cur_tx = np->cur_tx->next_desc_logical; 1570 np->free_tx_count += 2; 1571 } 1572 num_tx++; 1573 } /* end of for loop */ 1574 1575 if (num_tx && np->free_tx_count >= 2) 1576 netif_wake_queue(dev); 1577 1578 /* read transmit status for enhanced mode only */ 1579 if (np->crvalue & CR_W_ENH) { 1580 long data; 1581 1582 data = ioread32(ioaddr + TSR); 1583 np->stats.tx_errors += (data & 0xff000000) >> 24; 1584 np->stats.tx_aborted_errors += (data & 0xff000000) >> 24; 1585 np->stats.tx_window_errors += (data & 0x00ff0000) >> 16; 1586 np->stats.collisions += (data & 0x0000ffff); 1587 } 1588 1589 if (--boguscnt < 0) { 1590 printk(KERN_WARNING "%s: Too much work at interrupt, " 1591 "status=0x%4.4x.\n", dev->name, intr_status); 1592 if (!np->reset_timer_armed) { 1593 np->reset_timer_armed = 1; 1594 np->reset_timer.expires = RUN_AT(HZ/2); 1595 add_timer(&np->reset_timer); 1596 stop_nic_rxtx(ioaddr, 0); 1597 netif_stop_queue(dev); 1598 /* or netif_tx_disable(dev); ?? */ 1599 /* Prevent other paths from enabling tx,rx,intrs */ 1600 np->crvalue_sv = np->crvalue; 1601 np->imrvalue_sv = np->imrvalue; 1602 np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */ 1603 np->imrvalue = 0; 1604 } 1605 1606 break; 1607 } 1608 } while (1); 1609 1610 /* read the tally counters */ 1611 /* missed pkts */ 1612 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1613 1614 /* crc error */ 1615 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1616 1617 if (debug) 1618 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", 1619 dev->name, ioread32(ioaddr + ISR)); 1620 1621 iowrite32(np->imrvalue, ioaddr + IMR); 1622 1623 spin_unlock(&np->lock); 1624 1625 return IRQ_RETVAL(handled); 1626} 1627 1628 1629/* This routine is logically part of the interrupt handler, but separated 1630 for clarity and better register allocation. */ 1631static int netdev_rx(struct net_device *dev) 1632{ 1633 struct netdev_private *np = netdev_priv(dev); 1634 void __iomem *ioaddr = np->mem; 1635 1636 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1637 while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) { 1638 s32 rx_status = np->cur_rx->status; 1639 1640 if (np->really_rx_count == 0) 1641 break; 1642 1643 if (debug) 1644 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status); 1645 1646 if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) 1647 || (rx_status & ErrorSummary)) { 1648 if (rx_status & ErrorSummary) { /* there was a fatal error */ 1649 if (debug) 1650 printk(KERN_DEBUG 1651 "%s: Receive error, Rx status %8.8x.\n", 1652 dev->name, rx_status); 1653 1654 np->stats.rx_errors++; /* end of a packet. */ 1655 if (rx_status & (LONG | RUNT)) 1656 np->stats.rx_length_errors++; 1657 if (rx_status & RXER) 1658 np->stats.rx_frame_errors++; 1659 if (rx_status & CRC) 1660 np->stats.rx_crc_errors++; 1661 } else { 1662 int need_to_reset = 0; 1663 int desno = 0; 1664 1665 if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */ 1666 struct fealnx_desc *cur; 1667 1668 /* check this packet is received completely? */ 1669 cur = np->cur_rx; 1670 while (desno <= np->really_rx_count) { 1671 ++desno; 1672 if ((!(cur->status & RXOWN)) 1673 && (cur->status & RXLSD)) 1674 break; 1675 /* goto next rx descriptor */ 1676 cur = cur->next_desc_logical; 1677 } 1678 if (desno > np->really_rx_count) 1679 need_to_reset = 1; 1680 } else /* RXLSD did not find, something error */ 1681 need_to_reset = 1; 1682 1683 if (need_to_reset == 0) { 1684 int i; 1685 1686 np->stats.rx_length_errors++; 1687 1688 /* free all rx descriptors related this long pkt */ 1689 for (i = 0; i < desno; ++i) { 1690 if (!np->cur_rx->skbuff) { 1691 printk(KERN_DEBUG 1692 "%s: I'm scared\n", dev->name); 1693 break; 1694 } 1695 np->cur_rx->status = RXOWN; 1696 np->cur_rx = np->cur_rx->next_desc_logical; 1697 } 1698 continue; 1699 } else { /* rx error, need to reset this chip */ 1700 stop_nic_rx(ioaddr, np->crvalue); 1701 reset_rx_descriptors(dev); 1702 iowrite32(np->crvalue, ioaddr + TCRRCR); 1703 } 1704 break; /* exit the while loop */ 1705 } 1706 } else { /* this received pkt is ok */ 1707 1708 struct sk_buff *skb; 1709 /* Omit the four octet CRC from the length. */ 1710 short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4; 1711 1712#ifndef final_version 1713 if (debug) 1714 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" 1715 " status %x.\n", pkt_len, rx_status); 1716#endif 1717 1718 /* Check if the packet is long enough to accept without copying 1719 to a minimally-sized skbuff. */ 1720 if (pkt_len < rx_copybreak && 1721 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1722 skb->dev = dev; 1723 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1724 pci_dma_sync_single_for_cpu(np->pci_dev, 1725 np->cur_rx->buffer, 1726 np->rx_buf_sz, 1727 PCI_DMA_FROMDEVICE); 1728 /* Call copy + cksum if available. */ 1729 1730#if ! defined(__alpha__) 1731 eth_copy_and_sum(skb, 1732 np->cur_rx->skbuff->data, pkt_len, 0); 1733 skb_put(skb, pkt_len); 1734#else 1735 memcpy(skb_put(skb, pkt_len), 1736 np->cur_rx->skbuff->data, pkt_len); 1737#endif 1738 pci_dma_sync_single_for_device(np->pci_dev, 1739 np->cur_rx->buffer, 1740 np->rx_buf_sz, 1741 PCI_DMA_FROMDEVICE); 1742 } else { 1743 pci_unmap_single(np->pci_dev, 1744 np->cur_rx->buffer, 1745 np->rx_buf_sz, 1746 PCI_DMA_FROMDEVICE); 1747 skb_put(skb = np->cur_rx->skbuff, pkt_len); 1748 np->cur_rx->skbuff = NULL; 1749 --np->really_rx_count; 1750 } 1751 skb->protocol = eth_type_trans(skb, dev); 1752 netif_rx(skb); 1753 dev->last_rx = jiffies; 1754 np->stats.rx_packets++; 1755 np->stats.rx_bytes += pkt_len; 1756 } 1757 1758 np->cur_rx = np->cur_rx->next_desc_logical; 1759 } /* end of while loop */ 1760 1761 /* allocate skb for rx buffers */ 1762 allocate_rx_buffers(dev); 1763 1764 return 0; 1765} 1766 1767 1768static struct net_device_stats *get_stats(struct net_device *dev) 1769{ 1770 struct netdev_private *np = netdev_priv(dev); 1771 void __iomem *ioaddr = np->mem; 1772 1773 /* The chip only need report frame silently dropped. */ 1774 if (netif_running(dev)) { 1775 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1776 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1777 } 1778 1779 return &np->stats; 1780} 1781 1782 1783/* for dev->set_multicast_list */ 1784static void set_rx_mode(struct net_device *dev) 1785{ 1786 spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock; 1787 unsigned long flags; 1788 spin_lock_irqsave(lp, flags); 1789 __set_rx_mode(dev); 1790 spin_unlock_irqrestore(lp, flags); 1791} 1792 1793 1794/* Take lock before calling */ 1795static void __set_rx_mode(struct net_device *dev) 1796{ 1797 struct netdev_private *np = netdev_priv(dev); 1798 void __iomem *ioaddr = np->mem; 1799 u32 mc_filter[2]; /* Multicast hash filter */ 1800 u32 rx_mode; 1801 1802 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1803 memset(mc_filter, 0xff, sizeof(mc_filter)); 1804 rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM; 1805 } else if ((dev->mc_count > multicast_filter_limit) 1806 || (dev->flags & IFF_ALLMULTI)) { 1807 /* Too many to match, or accept all multicasts. */ 1808 memset(mc_filter, 0xff, sizeof(mc_filter)); 1809 rx_mode = CR_W_AB | CR_W_AM; 1810 } else { 1811 struct dev_mc_list *mclist; 1812 int i; 1813 1814 memset(mc_filter, 0, sizeof(mc_filter)); 1815 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1816 i++, mclist = mclist->next) { 1817 unsigned int bit; 1818 bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; 1819 mc_filter[bit >> 5] |= (1 << bit); 1820 } 1821 rx_mode = CR_W_AB | CR_W_AM; 1822 } 1823 1824 stop_nic_rxtx(ioaddr, np->crvalue); 1825 1826 iowrite32(mc_filter[0], ioaddr + MAR0); 1827 iowrite32(mc_filter[1], ioaddr + MAR1); 1828 np->crvalue &= ~CR_W_RXMODEMASK; 1829 np->crvalue |= rx_mode; 1830 iowrite32(np->crvalue, ioaddr + TCRRCR); 1831} 1832 1833static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1834{ 1835 struct netdev_private *np = netdev_priv(dev); 1836 1837 strcpy(info->driver, DRV_NAME); 1838 strcpy(info->version, DRV_VERSION); 1839 strcpy(info->bus_info, pci_name(np->pci_dev)); 1840} 1841 1842static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1843{ 1844 struct netdev_private *np = netdev_priv(dev); 1845 int rc; 1846 1847 spin_lock_irq(&np->lock); 1848 rc = mii_ethtool_gset(&np->mii, cmd); 1849 spin_unlock_irq(&np->lock); 1850 1851 return rc; 1852} 1853 1854static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1855{ 1856 struct netdev_private *np = netdev_priv(dev); 1857 int rc; 1858 1859 spin_lock_irq(&np->lock); 1860 rc = mii_ethtool_sset(&np->mii, cmd); 1861 spin_unlock_irq(&np->lock); 1862 1863 return rc; 1864} 1865 1866static int netdev_nway_reset(struct net_device *dev) 1867{ 1868 struct netdev_private *np = netdev_priv(dev); 1869 return mii_nway_restart(&np->mii); 1870} 1871 1872static u32 netdev_get_link(struct net_device *dev) 1873{ 1874 struct netdev_private *np = netdev_priv(dev); 1875 return mii_link_ok(&np->mii); 1876} 1877 1878static u32 netdev_get_msglevel(struct net_device *dev) 1879{ 1880 return debug; 1881} 1882 1883static void netdev_set_msglevel(struct net_device *dev, u32 value) 1884{ 1885 debug = value; 1886} 1887 1888static const struct ethtool_ops netdev_ethtool_ops = { 1889 .get_drvinfo = netdev_get_drvinfo, 1890 .get_settings = netdev_get_settings, 1891 .set_settings = netdev_set_settings, 1892 .nway_reset = netdev_nway_reset, 1893 .get_link = netdev_get_link, 1894 .get_msglevel = netdev_get_msglevel, 1895 .set_msglevel = netdev_set_msglevel, 1896 .get_sg = ethtool_op_get_sg, 1897 .get_tx_csum = ethtool_op_get_tx_csum, 1898}; 1899 1900static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1901{ 1902 struct netdev_private *np = netdev_priv(dev); 1903 int rc; 1904 1905 if (!netif_running(dev)) 1906 return -EINVAL; 1907 1908 spin_lock_irq(&np->lock); 1909 rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL); 1910 spin_unlock_irq(&np->lock); 1911 1912 return rc; 1913} 1914 1915 1916static int netdev_close(struct net_device *dev) 1917{ 1918 struct netdev_private *np = netdev_priv(dev); 1919 void __iomem *ioaddr = np->mem; 1920 int i; 1921 1922 netif_stop_queue(dev); 1923 1924 /* Disable interrupts by clearing the interrupt mask. */ 1925 iowrite32(0x0000, ioaddr + IMR); 1926 1927 /* Stop the chip's Tx and Rx processes. */ 1928 stop_nic_rxtx(ioaddr, 0); 1929 1930 del_timer_sync(&np->timer); 1931 del_timer_sync(&np->reset_timer); 1932 1933 free_irq(dev->irq, dev); 1934 1935 /* Free all the skbuffs in the Rx queue. */ 1936 for (i = 0; i < RX_RING_SIZE; i++) { 1937 struct sk_buff *skb = np->rx_ring[i].skbuff; 1938 1939 np->rx_ring[i].status = 0; 1940 if (skb) { 1941 pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer, 1942 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1943 dev_kfree_skb(skb); 1944 np->rx_ring[i].skbuff = NULL; 1945 } 1946 } 1947 1948 for (i = 0; i < TX_RING_SIZE; i++) { 1949 struct sk_buff *skb = np->tx_ring[i].skbuff; 1950 1951 if (skb) { 1952 pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer, 1953 skb->len, PCI_DMA_TODEVICE); 1954 dev_kfree_skb(skb); 1955 np->tx_ring[i].skbuff = NULL; 1956 } 1957 } 1958 1959 return 0; 1960} 1961 1962static struct pci_device_id fealnx_pci_tbl[] = { 1963 {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1964 {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 1965 {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 1966 {} /* terminate list */ 1967}; 1968MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl); 1969 1970 1971static struct pci_driver fealnx_driver = { 1972 .name = "fealnx", 1973 .id_table = fealnx_pci_tbl, 1974 .probe = fealnx_init_one, 1975 .remove = __devexit_p(fealnx_remove_one), 1976}; 1977 1978static int __init fealnx_init(void) 1979{ 1980/* when a module, this is printed whether or not devices are found in probe */ 1981#ifdef MODULE 1982 printk(version); 1983#endif 1984 1985 return pci_register_driver(&fealnx_driver); 1986} 1987 1988static void __exit fealnx_exit(void) 1989{ 1990 pci_unregister_driver(&fealnx_driver); 1991} 1992 1993module_init(fealnx_init); 1994module_exit(fealnx_exit);