Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.25 1988 lines 56 kB view raw
1/* 2 Written 1998-2000 by Donald Becker. 3 4 This software may be used and distributed according to the terms of 5 the GNU General Public License (GPL), incorporated herein by reference. 6 Drivers based on or derived from this code fall under the GPL and must 7 retain the authorship, copyright and license notice. This file is not 8 a complete program and may only be used when the entire operating 9 system is licensed under the GPL. 10 11 The author may be reached as becker@scyld.com, or C/O 12 Scyld Computing Corporation 13 410 Severn Ave., Suite 210 14 Annapolis MD 21403 15 16 Support information and updates available at 17 http://www.scyld.com/network/pci-skeleton.html 18 19 Linux kernel updates: 20 21 Version 2.51, Nov 17, 2001 (jgarzik): 22 - Add ethtool support 23 - Replace some MII-related magic numbers with constants 24 25*/ 26 27#define DRV_NAME "fealnx" 28#define DRV_VERSION "2.52" 29#define DRV_RELDATE "Sep-11-2006" 30 31static int debug; /* 1-> print debug message */ 32static int max_interrupt_work = 20; 33 34/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */ 35static int multicast_filter_limit = 32; 36 37/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */ 38/* Setting to > 1518 effectively disables this feature. */ 39static int rx_copybreak; 40 41/* Used to pass the media type, etc. */ 42/* Both 'options[]' and 'full_duplex[]' should exist for driver */ 43/* interoperability. */ 44/* The media type is usually passed in 'options[]'. */ 45#define MAX_UNITS 8 /* More are supported, limit only on options */ 46static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; 47static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; 48 49/* Operational parameters that are set at compile time. */ 50/* Keep the ring sizes a power of two for compile efficiency. */ 51/* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */ 52/* Making the Tx ring too large decreases the effectiveness of channel */ 53/* bonding and packet priority. */ 54/* There are no ill effects from too-large receive rings. */ 55// 88-12-9 modify, 56// #define TX_RING_SIZE 16 57// #define RX_RING_SIZE 32 58#define TX_RING_SIZE 6 59#define RX_RING_SIZE 12 60#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc) 61#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc) 62 63/* Operational parameters that usually are not changed. */ 64/* Time in jiffies before concluding the transmitter is hung. */ 65#define TX_TIMEOUT (2*HZ) 66 67#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ 68 69 70/* Include files, designed to support most kernel versions 2.0.0 and later. */ 71#include <linux/module.h> 72#include <linux/kernel.h> 73#include <linux/string.h> 74#include <linux/timer.h> 75#include <linux/errno.h> 76#include <linux/ioport.h> 77#include <linux/slab.h> 78#include <linux/interrupt.h> 79#include <linux/pci.h> 80#include <linux/netdevice.h> 81#include <linux/etherdevice.h> 82#include <linux/skbuff.h> 83#include <linux/init.h> 84#include <linux/mii.h> 85#include <linux/ethtool.h> 86#include <linux/crc32.h> 87#include <linux/delay.h> 88#include <linux/bitops.h> 89 90#include <asm/processor.h> /* Processor type for cache alignment. */ 91#include <asm/io.h> 92#include <asm/uaccess.h> 93 94/* These identify the driver base version and may not be removed. */ 95static char version[] = 96KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; 97 98 99/* This driver was written to use PCI memory space, however some x86 systems 100 work only with I/O space accesses. */ 101#ifndef __alpha__ 102#define USE_IO_OPS 103#endif 104 105/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */ 106/* This is only in the support-all-kernels source code. */ 107 108#define RUN_AT(x) (jiffies + (x)) 109 110MODULE_AUTHOR("Myson or whoever"); 111MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver"); 112MODULE_LICENSE("GPL"); 113module_param(max_interrupt_work, int, 0); 114module_param(debug, int, 0); 115module_param(rx_copybreak, int, 0); 116module_param(multicast_filter_limit, int, 0); 117module_param_array(options, int, NULL, 0); 118module_param_array(full_duplex, int, NULL, 0); 119MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt"); 120MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)"); 121MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames"); 122MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses"); 123MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex"); 124MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)"); 125 126enum { 127 MIN_REGION_SIZE = 136, 128}; 129 130/* A chip capabilities table, matching the entries in pci_tbl[] above. */ 131enum chip_capability_flags { 132 HAS_MII_XCVR, 133 HAS_CHIP_XCVR, 134}; 135 136/* 89/6/13 add, */ 137/* for different PHY */ 138enum phy_type_flags { 139 MysonPHY = 1, 140 AhdocPHY = 2, 141 SeeqPHY = 3, 142 MarvellPHY = 4, 143 Myson981 = 5, 144 LevelOnePHY = 6, 145 OtherPHY = 10, 146}; 147 148struct chip_info { 149 char *chip_name; 150 int flags; 151}; 152 153static const struct chip_info skel_netdrv_tbl[] __devinitdata = { 154 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 155 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR }, 156 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 157}; 158 159/* Offsets to the Command and Status Registers. */ 160enum fealnx_offsets { 161 PAR0 = 0x0, /* physical address 0-3 */ 162 PAR1 = 0x04, /* physical address 4-5 */ 163 MAR0 = 0x08, /* multicast address 0-3 */ 164 MAR1 = 0x0C, /* multicast address 4-7 */ 165 FAR0 = 0x10, /* flow-control address 0-3 */ 166 FAR1 = 0x14, /* flow-control address 4-5 */ 167 TCRRCR = 0x18, /* receive & transmit configuration */ 168 BCR = 0x1C, /* bus command */ 169 TXPDR = 0x20, /* transmit polling demand */ 170 RXPDR = 0x24, /* receive polling demand */ 171 RXCWP = 0x28, /* receive current word pointer */ 172 TXLBA = 0x2C, /* transmit list base address */ 173 RXLBA = 0x30, /* receive list base address */ 174 ISR = 0x34, /* interrupt status */ 175 IMR = 0x38, /* interrupt mask */ 176 FTH = 0x3C, /* flow control high/low threshold */ 177 MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */ 178 TALLY = 0x44, /* tally counters for crc and mpa */ 179 TSR = 0x48, /* tally counter for transmit status */ 180 BMCRSR = 0x4c, /* basic mode control and status */ 181 PHYIDENTIFIER = 0x50, /* phy identifier */ 182 ANARANLPAR = 0x54, /* auto-negotiation advertisement and link 183 partner ability */ 184 ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */ 185 BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */ 186}; 187 188/* Bits in the interrupt status/enable registers. */ 189/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */ 190enum intr_status_bits { 191 RFCON = 0x00020000, /* receive flow control xon packet */ 192 RFCOFF = 0x00010000, /* receive flow control xoff packet */ 193 LSCStatus = 0x00008000, /* link status change */ 194 ANCStatus = 0x00004000, /* autonegotiation completed */ 195 FBE = 0x00002000, /* fatal bus error */ 196 FBEMask = 0x00001800, /* mask bit12-11 */ 197 ParityErr = 0x00000000, /* parity error */ 198 TargetErr = 0x00001000, /* target abort */ 199 MasterErr = 0x00000800, /* master error */ 200 TUNF = 0x00000400, /* transmit underflow */ 201 ROVF = 0x00000200, /* receive overflow */ 202 ETI = 0x00000100, /* transmit early int */ 203 ERI = 0x00000080, /* receive early int */ 204 CNTOVF = 0x00000040, /* counter overflow */ 205 RBU = 0x00000020, /* receive buffer unavailable */ 206 TBU = 0x00000010, /* transmit buffer unavilable */ 207 TI = 0x00000008, /* transmit interrupt */ 208 RI = 0x00000004, /* receive interrupt */ 209 RxErr = 0x00000002, /* receive error */ 210}; 211 212/* Bits in the NetworkConfig register, W for writing, R for reading */ 213/* FIXME: some names are invented by me. Marked with (name?) */ 214/* If you have docs and know bit names, please fix 'em */ 215enum rx_mode_bits { 216 CR_W_ENH = 0x02000000, /* enhanced mode (name?) */ 217 CR_W_FD = 0x00100000, /* full duplex */ 218 CR_W_PS10 = 0x00080000, /* 10 mbit */ 219 CR_W_TXEN = 0x00040000, /* tx enable (name?) */ 220 CR_W_PS1000 = 0x00010000, /* 1000 mbit */ 221 /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */ 222 CR_W_RXMODEMASK = 0x000000e0, 223 CR_W_PROM = 0x00000080, /* promiscuous mode */ 224 CR_W_AB = 0x00000040, /* accept broadcast */ 225 CR_W_AM = 0x00000020, /* accept mutlicast */ 226 CR_W_ARP = 0x00000008, /* receive runt pkt */ 227 CR_W_ALP = 0x00000004, /* receive long pkt */ 228 CR_W_SEP = 0x00000002, /* receive error pkt */ 229 CR_W_RXEN = 0x00000001, /* rx enable (unicast?) (name?) */ 230 231 CR_R_TXSTOP = 0x04000000, /* tx stopped (name?) */ 232 CR_R_FD = 0x00100000, /* full duplex detected */ 233 CR_R_PS10 = 0x00080000, /* 10 mbit detected */ 234 CR_R_RXSTOP = 0x00008000, /* rx stopped (name?) */ 235}; 236 237/* The Tulip Rx and Tx buffer descriptors. */ 238struct fealnx_desc { 239 s32 status; 240 s32 control; 241 u32 buffer; 242 u32 next_desc; 243 struct fealnx_desc *next_desc_logical; 244 struct sk_buff *skbuff; 245 u32 reserved1; 246 u32 reserved2; 247}; 248 249/* Bits in network_desc.status */ 250enum rx_desc_status_bits { 251 RXOWN = 0x80000000, /* own bit */ 252 FLNGMASK = 0x0fff0000, /* frame length */ 253 FLNGShift = 16, 254 MARSTATUS = 0x00004000, /* multicast address received */ 255 BARSTATUS = 0x00002000, /* broadcast address received */ 256 PHYSTATUS = 0x00001000, /* physical address received */ 257 RXFSD = 0x00000800, /* first descriptor */ 258 RXLSD = 0x00000400, /* last descriptor */ 259 ErrorSummary = 0x80, /* error summary */ 260 RUNT = 0x40, /* runt packet received */ 261 LONG = 0x20, /* long packet received */ 262 FAE = 0x10, /* frame align error */ 263 CRC = 0x08, /* crc error */ 264 RXER = 0x04, /* receive error */ 265}; 266 267enum rx_desc_control_bits { 268 RXIC = 0x00800000, /* interrupt control */ 269 RBSShift = 0, 270}; 271 272enum tx_desc_status_bits { 273 TXOWN = 0x80000000, /* own bit */ 274 JABTO = 0x00004000, /* jabber timeout */ 275 CSL = 0x00002000, /* carrier sense lost */ 276 LC = 0x00001000, /* late collision */ 277 EC = 0x00000800, /* excessive collision */ 278 UDF = 0x00000400, /* fifo underflow */ 279 DFR = 0x00000200, /* deferred */ 280 HF = 0x00000100, /* heartbeat fail */ 281 NCRMask = 0x000000ff, /* collision retry count */ 282 NCRShift = 0, 283}; 284 285enum tx_desc_control_bits { 286 TXIC = 0x80000000, /* interrupt control */ 287 ETIControl = 0x40000000, /* early transmit interrupt */ 288 TXLD = 0x20000000, /* last descriptor */ 289 TXFD = 0x10000000, /* first descriptor */ 290 CRCEnable = 0x08000000, /* crc control */ 291 PADEnable = 0x04000000, /* padding control */ 292 RetryTxLC = 0x02000000, /* retry late collision */ 293 PKTSMask = 0x3ff800, /* packet size bit21-11 */ 294 PKTSShift = 11, 295 TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */ 296 TBSShift = 0, 297}; 298 299/* BootROM/EEPROM/MII Management Register */ 300#define MASK_MIIR_MII_READ 0x00000000 301#define MASK_MIIR_MII_WRITE 0x00000008 302#define MASK_MIIR_MII_MDO 0x00000004 303#define MASK_MIIR_MII_MDI 0x00000002 304#define MASK_MIIR_MII_MDC 0x00000001 305 306/* ST+OP+PHYAD+REGAD+TA */ 307#define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */ 308#define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */ 309 310/* ------------------------------------------------------------------------- */ 311/* Constants for Myson PHY */ 312/* ------------------------------------------------------------------------- */ 313#define MysonPHYID 0xd0000302 314/* 89-7-27 add, (begin) */ 315#define MysonPHYID0 0x0302 316#define StatusRegister 18 317#define SPEED100 0x0400 // bit10 318#define FULLMODE 0x0800 // bit11 319/* 89-7-27 add, (end) */ 320 321/* ------------------------------------------------------------------------- */ 322/* Constants for Seeq 80225 PHY */ 323/* ------------------------------------------------------------------------- */ 324#define SeeqPHYID0 0x0016 325 326#define MIIRegister18 18 327#define SPD_DET_100 0x80 328#define DPLX_DET_FULL 0x40 329 330/* ------------------------------------------------------------------------- */ 331/* Constants for Ahdoc 101 PHY */ 332/* ------------------------------------------------------------------------- */ 333#define AhdocPHYID0 0x0022 334 335#define DiagnosticReg 18 336#define DPLX_FULL 0x0800 337#define Speed_100 0x0400 338 339/* 89/6/13 add, */ 340/* -------------------------------------------------------------------------- */ 341/* Constants */ 342/* -------------------------------------------------------------------------- */ 343#define MarvellPHYID0 0x0141 344#define LevelOnePHYID0 0x0013 345 346#define MII1000BaseTControlReg 9 347#define MII1000BaseTStatusReg 10 348#define SpecificReg 17 349 350/* for 1000BaseT Control Register */ 351#define PHYAbletoPerform1000FullDuplex 0x0200 352#define PHYAbletoPerform1000HalfDuplex 0x0100 353#define PHY1000AbilityMask 0x300 354 355// for phy specific status register, marvell phy. 356#define SpeedMask 0x0c000 357#define Speed_1000M 0x08000 358#define Speed_100M 0x4000 359#define Speed_10M 0 360#define Full_Duplex 0x2000 361 362// 89/12/29 add, for phy specific status register, levelone phy, (begin) 363#define LXT1000_100M 0x08000 364#define LXT1000_1000M 0x0c000 365#define LXT1000_Full 0x200 366// 89/12/29 add, for phy specific status register, levelone phy, (end) 367 368/* for 3-in-1 case, BMCRSR register */ 369#define LinkIsUp2 0x00040000 370 371/* for PHY */ 372#define LinkIsUp 0x0004 373 374 375struct netdev_private { 376 /* Descriptor rings first for alignment. */ 377 struct fealnx_desc *rx_ring; 378 struct fealnx_desc *tx_ring; 379 380 dma_addr_t rx_ring_dma; 381 dma_addr_t tx_ring_dma; 382 383 spinlock_t lock; 384 385 struct net_device_stats stats; 386 387 /* Media monitoring timer. */ 388 struct timer_list timer; 389 390 /* Reset timer */ 391 struct timer_list reset_timer; 392 int reset_timer_armed; 393 unsigned long crvalue_sv; 394 unsigned long imrvalue_sv; 395 396 /* Frequently used values: keep some adjacent for cache effect. */ 397 int flags; 398 struct pci_dev *pci_dev; 399 unsigned long crvalue; 400 unsigned long bcrvalue; 401 unsigned long imrvalue; 402 struct fealnx_desc *cur_rx; 403 struct fealnx_desc *lack_rxbuf; 404 int really_rx_count; 405 struct fealnx_desc *cur_tx; 406 struct fealnx_desc *cur_tx_copy; 407 int really_tx_count; 408 int free_tx_count; 409 unsigned int rx_buf_sz; /* Based on MTU+slack. */ 410 411 /* These values are keep track of the transceiver/media in use. */ 412 unsigned int linkok; 413 unsigned int line_speed; 414 unsigned int duplexmode; 415 unsigned int default_port:4; /* Last dev->if_port value. */ 416 unsigned int PHYType; 417 418 /* MII transceiver section. */ 419 int mii_cnt; /* MII device addresses. */ 420 unsigned char phys[2]; /* MII device addresses. */ 421 struct mii_if_info mii; 422 void __iomem *mem; 423}; 424 425 426static int mdio_read(struct net_device *dev, int phy_id, int location); 427static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 428static int netdev_open(struct net_device *dev); 429static void getlinktype(struct net_device *dev); 430static void getlinkstatus(struct net_device *dev); 431static void netdev_timer(unsigned long data); 432static void reset_timer(unsigned long data); 433static void tx_timeout(struct net_device *dev); 434static void init_ring(struct net_device *dev); 435static int start_tx(struct sk_buff *skb, struct net_device *dev); 436static irqreturn_t intr_handler(int irq, void *dev_instance); 437static int netdev_rx(struct net_device *dev); 438static void set_rx_mode(struct net_device *dev); 439static void __set_rx_mode(struct net_device *dev); 440static struct net_device_stats *get_stats(struct net_device *dev); 441static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 442static const struct ethtool_ops netdev_ethtool_ops; 443static int netdev_close(struct net_device *dev); 444static void reset_rx_descriptors(struct net_device *dev); 445static void reset_tx_descriptors(struct net_device *dev); 446 447static void stop_nic_rx(void __iomem *ioaddr, long crvalue) 448{ 449 int delay = 0x1000; 450 iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR); 451 while (--delay) { 452 if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP) 453 break; 454 } 455} 456 457 458static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue) 459{ 460 int delay = 0x1000; 461 iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR); 462 while (--delay) { 463 if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP)) 464 == (CR_R_RXSTOP+CR_R_TXSTOP) ) 465 break; 466 } 467} 468 469 470static int __devinit fealnx_init_one(struct pci_dev *pdev, 471 const struct pci_device_id *ent) 472{ 473 struct netdev_private *np; 474 int i, option, err, irq; 475 static int card_idx = -1; 476 char boardname[12]; 477 void __iomem *ioaddr; 478 unsigned long len; 479 unsigned int chip_id = ent->driver_data; 480 struct net_device *dev; 481 void *ring_space; 482 dma_addr_t ring_dma; 483#ifdef USE_IO_OPS 484 int bar = 0; 485#else 486 int bar = 1; 487#endif 488 DECLARE_MAC_BUF(mac); 489 490/* when built into the kernel, we only print version if device is found */ 491#ifndef MODULE 492 static int printed_version; 493 if (!printed_version++) 494 printk(version); 495#endif 496 497 card_idx++; 498 sprintf(boardname, "fealnx%d", card_idx); 499 500 option = card_idx < MAX_UNITS ? options[card_idx] : 0; 501 502 i = pci_enable_device(pdev); 503 if (i) return i; 504 pci_set_master(pdev); 505 506 len = pci_resource_len(pdev, bar); 507 if (len < MIN_REGION_SIZE) { 508 dev_err(&pdev->dev, 509 "region size %ld too small, aborting\n", len); 510 return -ENODEV; 511 } 512 513 i = pci_request_regions(pdev, boardname); 514 if (i) 515 return i; 516 517 irq = pdev->irq; 518 519 ioaddr = pci_iomap(pdev, bar, len); 520 if (!ioaddr) { 521 err = -ENOMEM; 522 goto err_out_res; 523 } 524 525 dev = alloc_etherdev(sizeof(struct netdev_private)); 526 if (!dev) { 527 err = -ENOMEM; 528 goto err_out_unmap; 529 } 530 SET_NETDEV_DEV(dev, &pdev->dev); 531 532 /* read ethernet id */ 533 for (i = 0; i < 6; ++i) 534 dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i); 535 536 /* Reset the chip to erase previous misconfiguration. */ 537 iowrite32(0x00000001, ioaddr + BCR); 538 539 dev->base_addr = (unsigned long)ioaddr; 540 dev->irq = irq; 541 542 /* Make certain the descriptor lists are aligned. */ 543 np = netdev_priv(dev); 544 np->mem = ioaddr; 545 spin_lock_init(&np->lock); 546 np->pci_dev = pdev; 547 np->flags = skel_netdrv_tbl[chip_id].flags; 548 pci_set_drvdata(pdev, dev); 549 np->mii.dev = dev; 550 np->mii.mdio_read = mdio_read; 551 np->mii.mdio_write = mdio_write; 552 np->mii.phy_id_mask = 0x1f; 553 np->mii.reg_num_mask = 0x1f; 554 555 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); 556 if (!ring_space) { 557 err = -ENOMEM; 558 goto err_out_free_dev; 559 } 560 np->rx_ring = (struct fealnx_desc *)ring_space; 561 np->rx_ring_dma = ring_dma; 562 563 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); 564 if (!ring_space) { 565 err = -ENOMEM; 566 goto err_out_free_rx; 567 } 568 np->tx_ring = (struct fealnx_desc *)ring_space; 569 np->tx_ring_dma = ring_dma; 570 571 /* find the connected MII xcvrs */ 572 if (np->flags == HAS_MII_XCVR) { 573 int phy, phy_idx = 0; 574 575 for (phy = 1; phy < 32 && phy_idx < 4; phy++) { 576 int mii_status = mdio_read(dev, phy, 1); 577 578 if (mii_status != 0xffff && mii_status != 0x0000) { 579 np->phys[phy_idx++] = phy; 580 dev_info(&pdev->dev, 581 "MII PHY found at address %d, status " 582 "0x%4.4x.\n", phy, mii_status); 583 /* get phy type */ 584 { 585 unsigned int data; 586 587 data = mdio_read(dev, np->phys[0], 2); 588 if (data == SeeqPHYID0) 589 np->PHYType = SeeqPHY; 590 else if (data == AhdocPHYID0) 591 np->PHYType = AhdocPHY; 592 else if (data == MarvellPHYID0) 593 np->PHYType = MarvellPHY; 594 else if (data == MysonPHYID0) 595 np->PHYType = Myson981; 596 else if (data == LevelOnePHYID0) 597 np->PHYType = LevelOnePHY; 598 else 599 np->PHYType = OtherPHY; 600 } 601 } 602 } 603 604 np->mii_cnt = phy_idx; 605 if (phy_idx == 0) 606 dev_warn(&pdev->dev, 607 "MII PHY not found -- this device may " 608 "not operate correctly.\n"); 609 } else { 610 np->phys[0] = 32; 611/* 89/6/23 add, (begin) */ 612 /* get phy type */ 613 if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID) 614 np->PHYType = MysonPHY; 615 else 616 np->PHYType = OtherPHY; 617 } 618 np->mii.phy_id = np->phys[0]; 619 620 if (dev->mem_start) 621 option = dev->mem_start; 622 623 /* The lower four bits are the media type. */ 624 if (option > 0) { 625 if (option & 0x200) 626 np->mii.full_duplex = 1; 627 np->default_port = option & 15; 628 } 629 630 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0) 631 np->mii.full_duplex = full_duplex[card_idx]; 632 633 if (np->mii.full_duplex) { 634 dev_info(&pdev->dev, "Media type forced to Full Duplex.\n"); 635/* 89/6/13 add, (begin) */ 636// if (np->PHYType==MarvellPHY) 637 if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) { 638 unsigned int data; 639 640 data = mdio_read(dev, np->phys[0], 9); 641 data = (data & 0xfcff) | 0x0200; 642 mdio_write(dev, np->phys[0], 9, data); 643 } 644/* 89/6/13 add, (end) */ 645 if (np->flags == HAS_MII_XCVR) 646 mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL); 647 else 648 iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR); 649 np->mii.force_media = 1; 650 } 651 652 /* The chip-specific entries in the device structure. */ 653 dev->open = &netdev_open; 654 dev->hard_start_xmit = &start_tx; 655 dev->stop = &netdev_close; 656 dev->get_stats = &get_stats; 657 dev->set_multicast_list = &set_rx_mode; 658 dev->do_ioctl = &mii_ioctl; 659 dev->ethtool_ops = &netdev_ethtool_ops; 660 dev->tx_timeout = &tx_timeout; 661 dev->watchdog_timeo = TX_TIMEOUT; 662 663 err = register_netdev(dev); 664 if (err) 665 goto err_out_free_tx; 666 667 printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n", 668 dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr, 669 print_mac(mac, dev->dev_addr), irq); 670 671 return 0; 672 673err_out_free_tx: 674 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 675err_out_free_rx: 676 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 677err_out_free_dev: 678 free_netdev(dev); 679err_out_unmap: 680 pci_iounmap(pdev, ioaddr); 681err_out_res: 682 pci_release_regions(pdev); 683 return err; 684} 685 686 687static void __devexit fealnx_remove_one(struct pci_dev *pdev) 688{ 689 struct net_device *dev = pci_get_drvdata(pdev); 690 691 if (dev) { 692 struct netdev_private *np = netdev_priv(dev); 693 694 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, 695 np->tx_ring_dma); 696 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, 697 np->rx_ring_dma); 698 unregister_netdev(dev); 699 pci_iounmap(pdev, np->mem); 700 free_netdev(dev); 701 pci_release_regions(pdev); 702 pci_set_drvdata(pdev, NULL); 703 } else 704 printk(KERN_ERR "fealnx: remove for unknown device\n"); 705} 706 707 708static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad) 709{ 710 ulong miir; 711 int i; 712 unsigned int mask, data; 713 714 /* enable MII output */ 715 miir = (ulong) ioread32(miiport); 716 miir &= 0xfffffff0; 717 718 miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO; 719 720 /* send 32 1's preamble */ 721 for (i = 0; i < 32; i++) { 722 /* low MDC; MDO is already high (miir) */ 723 miir &= ~MASK_MIIR_MII_MDC; 724 iowrite32(miir, miiport); 725 726 /* high MDC */ 727 miir |= MASK_MIIR_MII_MDC; 728 iowrite32(miir, miiport); 729 } 730 731 /* calculate ST+OP+PHYAD+REGAD+TA */ 732 data = opcode | (phyad << 7) | (regad << 2); 733 734 /* sent out */ 735 mask = 0x8000; 736 while (mask) { 737 /* low MDC, prepare MDO */ 738 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO); 739 if (mask & data) 740 miir |= MASK_MIIR_MII_MDO; 741 742 iowrite32(miir, miiport); 743 /* high MDC */ 744 miir |= MASK_MIIR_MII_MDC; 745 iowrite32(miir, miiport); 746 udelay(30); 747 748 /* next */ 749 mask >>= 1; 750 if (mask == 0x2 && opcode == OP_READ) 751 miir &= ~MASK_MIIR_MII_WRITE; 752 } 753 return miir; 754} 755 756 757static int mdio_read(struct net_device *dev, int phyad, int regad) 758{ 759 struct netdev_private *np = netdev_priv(dev); 760 void __iomem *miiport = np->mem + MANAGEMENT; 761 ulong miir; 762 unsigned int mask, data; 763 764 miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad); 765 766 /* read data */ 767 mask = 0x8000; 768 data = 0; 769 while (mask) { 770 /* low MDC */ 771 miir &= ~MASK_MIIR_MII_MDC; 772 iowrite32(miir, miiport); 773 774 /* read MDI */ 775 miir = ioread32(miiport); 776 if (miir & MASK_MIIR_MII_MDI) 777 data |= mask; 778 779 /* high MDC, and wait */ 780 miir |= MASK_MIIR_MII_MDC; 781 iowrite32(miir, miiport); 782 udelay(30); 783 784 /* next */ 785 mask >>= 1; 786 } 787 788 /* low MDC */ 789 miir &= ~MASK_MIIR_MII_MDC; 790 iowrite32(miir, miiport); 791 792 return data & 0xffff; 793} 794 795 796static void mdio_write(struct net_device *dev, int phyad, int regad, int data) 797{ 798 struct netdev_private *np = netdev_priv(dev); 799 void __iomem *miiport = np->mem + MANAGEMENT; 800 ulong miir; 801 unsigned int mask; 802 803 miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad); 804 805 /* write data */ 806 mask = 0x8000; 807 while (mask) { 808 /* low MDC, prepare MDO */ 809 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO); 810 if (mask & data) 811 miir |= MASK_MIIR_MII_MDO; 812 iowrite32(miir, miiport); 813 814 /* high MDC */ 815 miir |= MASK_MIIR_MII_MDC; 816 iowrite32(miir, miiport); 817 818 /* next */ 819 mask >>= 1; 820 } 821 822 /* low MDC */ 823 miir &= ~MASK_MIIR_MII_MDC; 824 iowrite32(miir, miiport); 825} 826 827 828static int netdev_open(struct net_device *dev) 829{ 830 struct netdev_private *np = netdev_priv(dev); 831 void __iomem *ioaddr = np->mem; 832 int i; 833 834 iowrite32(0x00000001, ioaddr + BCR); /* Reset */ 835 836 if (request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev)) 837 return -EAGAIN; 838 839 for (i = 0; i < 3; i++) 840 iowrite16(((unsigned short*)dev->dev_addr)[i], 841 ioaddr + PAR0 + i*2); 842 843 init_ring(dev); 844 845 iowrite32(np->rx_ring_dma, ioaddr + RXLBA); 846 iowrite32(np->tx_ring_dma, ioaddr + TXLBA); 847 848 /* Initialize other registers. */ 849 /* Configure the PCI bus bursts and FIFO thresholds. 850 486: Set 8 longword burst. 851 586: no burst limit. 852 Burst length 5:3 853 0 0 0 1 854 0 0 1 4 855 0 1 0 8 856 0 1 1 16 857 1 0 0 32 858 1 0 1 64 859 1 1 0 128 860 1 1 1 256 861 Wait the specified 50 PCI cycles after a reset by initializing 862 Tx and Rx queues and the address filter list. 863 FIXME (Ueimor): optimistic for alpha + posted writes ? */ 864#if defined(__powerpc__) || defined(__sparc__) 865// 89/9/1 modify, 866// np->bcrvalue=0x04 | 0x0x38; /* big-endian, 256 burst length */ 867 np->bcrvalue = 0x04 | 0x10; /* big-endian, tx 8 burst length */ 868 np->crvalue = 0xe00; /* rx 128 burst length */ 869#elif defined(__alpha__) || defined(__x86_64__) 870// 89/9/1 modify, 871// np->bcrvalue=0x38; /* little-endian, 256 burst length */ 872 np->bcrvalue = 0x10; /* little-endian, 8 burst length */ 873 np->crvalue = 0xe00; /* rx 128 burst length */ 874#elif defined(__i386__) 875#if defined(MODULE) 876// 89/9/1 modify, 877// np->bcrvalue=0x38; /* little-endian, 256 burst length */ 878 np->bcrvalue = 0x10; /* little-endian, 8 burst length */ 879 np->crvalue = 0xe00; /* rx 128 burst length */ 880#else 881 /* When not a module we can work around broken '486 PCI boards. */ 882#define x86 boot_cpu_data.x86 883// 89/9/1 modify, 884// np->bcrvalue=(x86 <= 4 ? 0x10 : 0x38); 885 np->bcrvalue = 0x10; 886 np->crvalue = (x86 <= 4 ? 0xa00 : 0xe00); 887 if (x86 <= 4) 888 printk(KERN_INFO "%s: This is a 386/486 PCI system, setting burst " 889 "length to %x.\n", dev->name, (x86 <= 4 ? 0x10 : 0x38)); 890#endif 891#else 892// 89/9/1 modify, 893// np->bcrvalue=0x38; 894 np->bcrvalue = 0x10; 895 np->crvalue = 0xe00; /* rx 128 burst length */ 896#warning Processor architecture undefined! 897#endif 898// 89/12/29 add, 899// 90/1/16 modify, 900// np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI; 901 np->imrvalue = TUNF | CNTOVF | RBU | TI | RI; 902 if (np->pci_dev->device == 0x891) { 903 np->bcrvalue |= 0x200; /* set PROG bit */ 904 np->crvalue |= CR_W_ENH; /* set enhanced bit */ 905 np->imrvalue |= ETI; 906 } 907 iowrite32(np->bcrvalue, ioaddr + BCR); 908 909 if (dev->if_port == 0) 910 dev->if_port = np->default_port; 911 912 iowrite32(0, ioaddr + RXPDR); 913// 89/9/1 modify, 914// np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */ 915 np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */ 916 np->mii.full_duplex = np->mii.force_media; 917 getlinkstatus(dev); 918 if (np->linkok) 919 getlinktype(dev); 920 __set_rx_mode(dev); 921 922 netif_start_queue(dev); 923 924 /* Clear and Enable interrupts by setting the interrupt mask. */ 925 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); 926 iowrite32(np->imrvalue, ioaddr + IMR); 927 928 if (debug) 929 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); 930 931 /* Set the timer to check for link beat. */ 932 init_timer(&np->timer); 933 np->timer.expires = RUN_AT(3 * HZ); 934 np->timer.data = (unsigned long) dev; 935 np->timer.function = &netdev_timer; 936 937 /* timer handler */ 938 add_timer(&np->timer); 939 940 init_timer(&np->reset_timer); 941 np->reset_timer.data = (unsigned long) dev; 942 np->reset_timer.function = &reset_timer; 943 np->reset_timer_armed = 0; 944 945 return 0; 946} 947 948 949static void getlinkstatus(struct net_device *dev) 950/* function: Routine will read MII Status Register to get link status. */ 951/* input : dev... pointer to the adapter block. */ 952/* output : none. */ 953{ 954 struct netdev_private *np = netdev_priv(dev); 955 unsigned int i, DelayTime = 0x1000; 956 957 np->linkok = 0; 958 959 if (np->PHYType == MysonPHY) { 960 for (i = 0; i < DelayTime; ++i) { 961 if (ioread32(np->mem + BMCRSR) & LinkIsUp2) { 962 np->linkok = 1; 963 return; 964 } 965 udelay(100); 966 } 967 } else { 968 for (i = 0; i < DelayTime; ++i) { 969 if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) { 970 np->linkok = 1; 971 return; 972 } 973 udelay(100); 974 } 975 } 976} 977 978 979static void getlinktype(struct net_device *dev) 980{ 981 struct netdev_private *np = netdev_priv(dev); 982 983 if (np->PHYType == MysonPHY) { /* 3-in-1 case */ 984 if (ioread32(np->mem + TCRRCR) & CR_R_FD) 985 np->duplexmode = 2; /* full duplex */ 986 else 987 np->duplexmode = 1; /* half duplex */ 988 if (ioread32(np->mem + TCRRCR) & CR_R_PS10) 989 np->line_speed = 1; /* 10M */ 990 else 991 np->line_speed = 2; /* 100M */ 992 } else { 993 if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */ 994 unsigned int data; 995 996 data = mdio_read(dev, np->phys[0], MIIRegister18); 997 if (data & SPD_DET_100) 998 np->line_speed = 2; /* 100M */ 999 else 1000 np->line_speed = 1; /* 10M */ 1001 if (data & DPLX_DET_FULL) 1002 np->duplexmode = 2; /* full duplex mode */ 1003 else 1004 np->duplexmode = 1; /* half duplex mode */ 1005 } else if (np->PHYType == AhdocPHY) { 1006 unsigned int data; 1007 1008 data = mdio_read(dev, np->phys[0], DiagnosticReg); 1009 if (data & Speed_100) 1010 np->line_speed = 2; /* 100M */ 1011 else 1012 np->line_speed = 1; /* 10M */ 1013 if (data & DPLX_FULL) 1014 np->duplexmode = 2; /* full duplex mode */ 1015 else 1016 np->duplexmode = 1; /* half duplex mode */ 1017 } 1018/* 89/6/13 add, (begin) */ 1019 else if (np->PHYType == MarvellPHY) { 1020 unsigned int data; 1021 1022 data = mdio_read(dev, np->phys[0], SpecificReg); 1023 if (data & Full_Duplex) 1024 np->duplexmode = 2; /* full duplex mode */ 1025 else 1026 np->duplexmode = 1; /* half duplex mode */ 1027 data &= SpeedMask; 1028 if (data == Speed_1000M) 1029 np->line_speed = 3; /* 1000M */ 1030 else if (data == Speed_100M) 1031 np->line_speed = 2; /* 100M */ 1032 else 1033 np->line_speed = 1; /* 10M */ 1034 } 1035/* 89/6/13 add, (end) */ 1036/* 89/7/27 add, (begin) */ 1037 else if (np->PHYType == Myson981) { 1038 unsigned int data; 1039 1040 data = mdio_read(dev, np->phys[0], StatusRegister); 1041 1042 if (data & SPEED100) 1043 np->line_speed = 2; 1044 else 1045 np->line_speed = 1; 1046 1047 if (data & FULLMODE) 1048 np->duplexmode = 2; 1049 else 1050 np->duplexmode = 1; 1051 } 1052/* 89/7/27 add, (end) */ 1053/* 89/12/29 add */ 1054 else if (np->PHYType == LevelOnePHY) { 1055 unsigned int data; 1056 1057 data = mdio_read(dev, np->phys[0], SpecificReg); 1058 if (data & LXT1000_Full) 1059 np->duplexmode = 2; /* full duplex mode */ 1060 else 1061 np->duplexmode = 1; /* half duplex mode */ 1062 data &= SpeedMask; 1063 if (data == LXT1000_1000M) 1064 np->line_speed = 3; /* 1000M */ 1065 else if (data == LXT1000_100M) 1066 np->line_speed = 2; /* 100M */ 1067 else 1068 np->line_speed = 1; /* 10M */ 1069 } 1070 np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000); 1071 if (np->line_speed == 1) 1072 np->crvalue |= CR_W_PS10; 1073 else if (np->line_speed == 3) 1074 np->crvalue |= CR_W_PS1000; 1075 if (np->duplexmode == 2) 1076 np->crvalue |= CR_W_FD; 1077 } 1078} 1079 1080 1081/* Take lock before calling this */ 1082static void allocate_rx_buffers(struct net_device *dev) 1083{ 1084 struct netdev_private *np = netdev_priv(dev); 1085 1086 /* allocate skb for rx buffers */ 1087 while (np->really_rx_count != RX_RING_SIZE) { 1088 struct sk_buff *skb; 1089 1090 skb = dev_alloc_skb(np->rx_buf_sz); 1091 if (skb == NULL) 1092 break; /* Better luck next round. */ 1093 1094 while (np->lack_rxbuf->skbuff) 1095 np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; 1096 1097 skb->dev = dev; /* Mark as being used by this device. */ 1098 np->lack_rxbuf->skbuff = skb; 1099 np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data, 1100 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1101 np->lack_rxbuf->status = RXOWN; 1102 ++np->really_rx_count; 1103 } 1104} 1105 1106 1107static void netdev_timer(unsigned long data) 1108{ 1109 struct net_device *dev = (struct net_device *) data; 1110 struct netdev_private *np = netdev_priv(dev); 1111 void __iomem *ioaddr = np->mem; 1112 int old_crvalue = np->crvalue; 1113 unsigned int old_linkok = np->linkok; 1114 unsigned long flags; 1115 1116 if (debug) 1117 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x " 1118 "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR), 1119 ioread32(ioaddr + TCRRCR)); 1120 1121 spin_lock_irqsave(&np->lock, flags); 1122 1123 if (np->flags == HAS_MII_XCVR) { 1124 getlinkstatus(dev); 1125 if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */ 1126 getlinktype(dev); 1127 if (np->crvalue != old_crvalue) { 1128 stop_nic_rxtx(ioaddr, np->crvalue); 1129 iowrite32(np->crvalue, ioaddr + TCRRCR); 1130 } 1131 } 1132 } 1133 1134 allocate_rx_buffers(dev); 1135 1136 spin_unlock_irqrestore(&np->lock, flags); 1137 1138 np->timer.expires = RUN_AT(10 * HZ); 1139 add_timer(&np->timer); 1140} 1141 1142 1143/* Take lock before calling */ 1144/* Reset chip and disable rx, tx and interrupts */ 1145static void reset_and_disable_rxtx(struct net_device *dev) 1146{ 1147 struct netdev_private *np = netdev_priv(dev); 1148 void __iomem *ioaddr = np->mem; 1149 int delay=51; 1150 1151 /* Reset the chip's Tx and Rx processes. */ 1152 stop_nic_rxtx(ioaddr, 0); 1153 1154 /* Disable interrupts by clearing the interrupt mask. */ 1155 iowrite32(0, ioaddr + IMR); 1156 1157 /* Reset the chip to erase previous misconfiguration. */ 1158 iowrite32(0x00000001, ioaddr + BCR); 1159 1160 /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw). 1161 We surely wait too long (address+data phase). Who cares? */ 1162 while (--delay) { 1163 ioread32(ioaddr + BCR); 1164 rmb(); 1165 } 1166} 1167 1168 1169/* Take lock before calling */ 1170/* Restore chip after reset */ 1171static void enable_rxtx(struct net_device *dev) 1172{ 1173 struct netdev_private *np = netdev_priv(dev); 1174 void __iomem *ioaddr = np->mem; 1175 1176 reset_rx_descriptors(dev); 1177 1178 iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring), 1179 ioaddr + TXLBA); 1180 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), 1181 ioaddr + RXLBA); 1182 1183 iowrite32(np->bcrvalue, ioaddr + BCR); 1184 1185 iowrite32(0, ioaddr + RXPDR); 1186 __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */ 1187 1188 /* Clear and Enable interrupts by setting the interrupt mask. */ 1189 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); 1190 iowrite32(np->imrvalue, ioaddr + IMR); 1191 1192 iowrite32(0, ioaddr + TXPDR); 1193} 1194 1195 1196static void reset_timer(unsigned long data) 1197{ 1198 struct net_device *dev = (struct net_device *) data; 1199 struct netdev_private *np = netdev_priv(dev); 1200 unsigned long flags; 1201 1202 printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name); 1203 1204 spin_lock_irqsave(&np->lock, flags); 1205 np->crvalue = np->crvalue_sv; 1206 np->imrvalue = np->imrvalue_sv; 1207 1208 reset_and_disable_rxtx(dev); 1209 /* works for me without this: 1210 reset_tx_descriptors(dev); */ 1211 enable_rxtx(dev); 1212 netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */ 1213 1214 np->reset_timer_armed = 0; 1215 1216 spin_unlock_irqrestore(&np->lock, flags); 1217} 1218 1219 1220static void tx_timeout(struct net_device *dev) 1221{ 1222 struct netdev_private *np = netdev_priv(dev); 1223 void __iomem *ioaddr = np->mem; 1224 unsigned long flags; 1225 int i; 1226 1227 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x," 1228 " resetting...\n", dev->name, ioread32(ioaddr + ISR)); 1229 1230 { 1231 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); 1232 for (i = 0; i < RX_RING_SIZE; i++) 1233 printk(" %8.8x", (unsigned int) np->rx_ring[i].status); 1234 printk("\n" KERN_DEBUG " Tx ring %p: ", np->tx_ring); 1235 for (i = 0; i < TX_RING_SIZE; i++) 1236 printk(" %4.4x", np->tx_ring[i].status); 1237 printk("\n"); 1238 } 1239 1240 spin_lock_irqsave(&np->lock, flags); 1241 1242 reset_and_disable_rxtx(dev); 1243 reset_tx_descriptors(dev); 1244 enable_rxtx(dev); 1245 1246 spin_unlock_irqrestore(&np->lock, flags); 1247 1248 dev->trans_start = jiffies; 1249 np->stats.tx_errors++; 1250 netif_wake_queue(dev); /* or .._start_.. ?? */ 1251} 1252 1253 1254/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 1255static void init_ring(struct net_device *dev) 1256{ 1257 struct netdev_private *np = netdev_priv(dev); 1258 int i; 1259 1260 /* initialize rx variables */ 1261 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); 1262 np->cur_rx = &np->rx_ring[0]; 1263 np->lack_rxbuf = np->rx_ring; 1264 np->really_rx_count = 0; 1265 1266 /* initial rx descriptors. */ 1267 for (i = 0; i < RX_RING_SIZE; i++) { 1268 np->rx_ring[i].status = 0; 1269 np->rx_ring[i].control = np->rx_buf_sz << RBSShift; 1270 np->rx_ring[i].next_desc = np->rx_ring_dma + 1271 (i + 1)*sizeof(struct fealnx_desc); 1272 np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1]; 1273 np->rx_ring[i].skbuff = NULL; 1274 } 1275 1276 /* for the last rx descriptor */ 1277 np->rx_ring[i - 1].next_desc = np->rx_ring_dma; 1278 np->rx_ring[i - 1].next_desc_logical = np->rx_ring; 1279 1280 /* allocate skb for rx buffers */ 1281 for (i = 0; i < RX_RING_SIZE; i++) { 1282 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); 1283 1284 if (skb == NULL) { 1285 np->lack_rxbuf = &np->rx_ring[i]; 1286 break; 1287 } 1288 1289 ++np->really_rx_count; 1290 np->rx_ring[i].skbuff = skb; 1291 skb->dev = dev; /* Mark as being used by this device. */ 1292 np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data, 1293 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1294 np->rx_ring[i].status = RXOWN; 1295 np->rx_ring[i].control |= RXIC; 1296 } 1297 1298 /* initialize tx variables */ 1299 np->cur_tx = &np->tx_ring[0]; 1300 np->cur_tx_copy = &np->tx_ring[0]; 1301 np->really_tx_count = 0; 1302 np->free_tx_count = TX_RING_SIZE; 1303 1304 for (i = 0; i < TX_RING_SIZE; i++) { 1305 np->tx_ring[i].status = 0; 1306 /* do we need np->tx_ring[i].control = XXX; ?? */ 1307 np->tx_ring[i].next_desc = np->tx_ring_dma + 1308 (i + 1)*sizeof(struct fealnx_desc); 1309 np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1]; 1310 np->tx_ring[i].skbuff = NULL; 1311 } 1312 1313 /* for the last tx descriptor */ 1314 np->tx_ring[i - 1].next_desc = np->tx_ring_dma; 1315 np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0]; 1316} 1317 1318 1319static int start_tx(struct sk_buff *skb, struct net_device *dev) 1320{ 1321 struct netdev_private *np = netdev_priv(dev); 1322 unsigned long flags; 1323 1324 spin_lock_irqsave(&np->lock, flags); 1325 1326 np->cur_tx_copy->skbuff = skb; 1327 1328#define one_buffer 1329#define BPT 1022 1330#if defined(one_buffer) 1331 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1332 skb->len, PCI_DMA_TODEVICE); 1333 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; 1334 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1335 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ 1336// 89/12/29 add, 1337 if (np->pci_dev->device == 0x891) 1338 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1339 np->cur_tx_copy->status = TXOWN; 1340 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; 1341 --np->free_tx_count; 1342#elif defined(two_buffer) 1343 if (skb->len > BPT) { 1344 struct fealnx_desc *next; 1345 1346 /* for the first descriptor */ 1347 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1348 BPT, PCI_DMA_TODEVICE); 1349 np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable; 1350 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1351 np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */ 1352 1353 /* for the last descriptor */ 1354 next = np->cur_tx_copy->next_desc_logical; 1355 next->skbuff = skb; 1356 next->control = TXIC | TXLD | CRCEnable | PADEnable; 1357 next->control |= (skb->len << PKTSShift); /* pkt size */ 1358 next->control |= ((skb->len - BPT) << TBSShift); /* buf size */ 1359// 89/12/29 add, 1360 if (np->pci_dev->device == 0x891) 1361 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1362 next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT, 1363 skb->len - BPT, PCI_DMA_TODEVICE); 1364 1365 next->status = TXOWN; 1366 np->cur_tx_copy->status = TXOWN; 1367 1368 np->cur_tx_copy = next->next_desc_logical; 1369 np->free_tx_count -= 2; 1370 } else { 1371 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1372 skb->len, PCI_DMA_TODEVICE); 1373 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; 1374 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1375 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ 1376// 89/12/29 add, 1377 if (np->pci_dev->device == 0x891) 1378 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1379 np->cur_tx_copy->status = TXOWN; 1380 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; 1381 --np->free_tx_count; 1382 } 1383#endif 1384 1385 if (np->free_tx_count < 2) 1386 netif_stop_queue(dev); 1387 ++np->really_tx_count; 1388 iowrite32(0, np->mem + TXPDR); 1389 dev->trans_start = jiffies; 1390 1391 spin_unlock_irqrestore(&np->lock, flags); 1392 return 0; 1393} 1394 1395 1396/* Take lock before calling */ 1397/* Chip probably hosed tx ring. Clean up. */ 1398static void reset_tx_descriptors(struct net_device *dev) 1399{ 1400 struct netdev_private *np = netdev_priv(dev); 1401 struct fealnx_desc *cur; 1402 int i; 1403 1404 /* initialize tx variables */ 1405 np->cur_tx = &np->tx_ring[0]; 1406 np->cur_tx_copy = &np->tx_ring[0]; 1407 np->really_tx_count = 0; 1408 np->free_tx_count = TX_RING_SIZE; 1409 1410 for (i = 0; i < TX_RING_SIZE; i++) { 1411 cur = &np->tx_ring[i]; 1412 if (cur->skbuff) { 1413 pci_unmap_single(np->pci_dev, cur->buffer, 1414 cur->skbuff->len, PCI_DMA_TODEVICE); 1415 dev_kfree_skb_any(cur->skbuff); 1416 cur->skbuff = NULL; 1417 } 1418 cur->status = 0; 1419 cur->control = 0; /* needed? */ 1420 /* probably not needed. We do it for purely paranoid reasons */ 1421 cur->next_desc = np->tx_ring_dma + 1422 (i + 1)*sizeof(struct fealnx_desc); 1423 cur->next_desc_logical = &np->tx_ring[i + 1]; 1424 } 1425 /* for the last tx descriptor */ 1426 np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma; 1427 np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0]; 1428} 1429 1430 1431/* Take lock and stop rx before calling this */ 1432static void reset_rx_descriptors(struct net_device *dev) 1433{ 1434 struct netdev_private *np = netdev_priv(dev); 1435 struct fealnx_desc *cur = np->cur_rx; 1436 int i; 1437 1438 allocate_rx_buffers(dev); 1439 1440 for (i = 0; i < RX_RING_SIZE; i++) { 1441 if (cur->skbuff) 1442 cur->status = RXOWN; 1443 cur = cur->next_desc_logical; 1444 } 1445 1446 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), 1447 np->mem + RXLBA); 1448} 1449 1450 1451/* The interrupt handler does all of the Rx thread work and cleans up 1452 after the Tx thread. */ 1453static irqreturn_t intr_handler(int irq, void *dev_instance) 1454{ 1455 struct net_device *dev = (struct net_device *) dev_instance; 1456 struct netdev_private *np = netdev_priv(dev); 1457 void __iomem *ioaddr = np->mem; 1458 long boguscnt = max_interrupt_work; 1459 unsigned int num_tx = 0; 1460 int handled = 0; 1461 1462 spin_lock(&np->lock); 1463 1464 iowrite32(0, ioaddr + IMR); 1465 1466 do { 1467 u32 intr_status = ioread32(ioaddr + ISR); 1468 1469 /* Acknowledge all of the current interrupt sources ASAP. */ 1470 iowrite32(intr_status, ioaddr + ISR); 1471 1472 if (debug) 1473 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, 1474 intr_status); 1475 1476 if (!(intr_status & np->imrvalue)) 1477 break; 1478 1479 handled = 1; 1480 1481// 90/1/16 delete, 1482// 1483// if (intr_status & FBE) 1484// { /* fatal error */ 1485// stop_nic_tx(ioaddr, 0); 1486// stop_nic_rx(ioaddr, 0); 1487// break; 1488// }; 1489 1490 if (intr_status & TUNF) 1491 iowrite32(0, ioaddr + TXPDR); 1492 1493 if (intr_status & CNTOVF) { 1494 /* missed pkts */ 1495 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1496 1497 /* crc error */ 1498 np->stats.rx_crc_errors += 1499 (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1500 } 1501 1502 if (intr_status & (RI | RBU)) { 1503 if (intr_status & RI) 1504 netdev_rx(dev); 1505 else { 1506 stop_nic_rx(ioaddr, np->crvalue); 1507 reset_rx_descriptors(dev); 1508 iowrite32(np->crvalue, ioaddr + TCRRCR); 1509 } 1510 } 1511 1512 while (np->really_tx_count) { 1513 long tx_status = np->cur_tx->status; 1514 long tx_control = np->cur_tx->control; 1515 1516 if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */ 1517 struct fealnx_desc *next; 1518 1519 next = np->cur_tx->next_desc_logical; 1520 tx_status = next->status; 1521 tx_control = next->control; 1522 } 1523 1524 if (tx_status & TXOWN) 1525 break; 1526 1527 if (!(np->crvalue & CR_W_ENH)) { 1528 if (tx_status & (CSL | LC | EC | UDF | HF)) { 1529 np->stats.tx_errors++; 1530 if (tx_status & EC) 1531 np->stats.tx_aborted_errors++; 1532 if (tx_status & CSL) 1533 np->stats.tx_carrier_errors++; 1534 if (tx_status & LC) 1535 np->stats.tx_window_errors++; 1536 if (tx_status & UDF) 1537 np->stats.tx_fifo_errors++; 1538 if ((tx_status & HF) && np->mii.full_duplex == 0) 1539 np->stats.tx_heartbeat_errors++; 1540 1541 } else { 1542 np->stats.tx_bytes += 1543 ((tx_control & PKTSMask) >> PKTSShift); 1544 1545 np->stats.collisions += 1546 ((tx_status & NCRMask) >> NCRShift); 1547 np->stats.tx_packets++; 1548 } 1549 } else { 1550 np->stats.tx_bytes += 1551 ((tx_control & PKTSMask) >> PKTSShift); 1552 np->stats.tx_packets++; 1553 } 1554 1555 /* Free the original skb. */ 1556 pci_unmap_single(np->pci_dev, np->cur_tx->buffer, 1557 np->cur_tx->skbuff->len, PCI_DMA_TODEVICE); 1558 dev_kfree_skb_irq(np->cur_tx->skbuff); 1559 np->cur_tx->skbuff = NULL; 1560 --np->really_tx_count; 1561 if (np->cur_tx->control & TXLD) { 1562 np->cur_tx = np->cur_tx->next_desc_logical; 1563 ++np->free_tx_count; 1564 } else { 1565 np->cur_tx = np->cur_tx->next_desc_logical; 1566 np->cur_tx = np->cur_tx->next_desc_logical; 1567 np->free_tx_count += 2; 1568 } 1569 num_tx++; 1570 } /* end of for loop */ 1571 1572 if (num_tx && np->free_tx_count >= 2) 1573 netif_wake_queue(dev); 1574 1575 /* read transmit status for enhanced mode only */ 1576 if (np->crvalue & CR_W_ENH) { 1577 long data; 1578 1579 data = ioread32(ioaddr + TSR); 1580 np->stats.tx_errors += (data & 0xff000000) >> 24; 1581 np->stats.tx_aborted_errors += (data & 0xff000000) >> 24; 1582 np->stats.tx_window_errors += (data & 0x00ff0000) >> 16; 1583 np->stats.collisions += (data & 0x0000ffff); 1584 } 1585 1586 if (--boguscnt < 0) { 1587 printk(KERN_WARNING "%s: Too much work at interrupt, " 1588 "status=0x%4.4x.\n", dev->name, intr_status); 1589 if (!np->reset_timer_armed) { 1590 np->reset_timer_armed = 1; 1591 np->reset_timer.expires = RUN_AT(HZ/2); 1592 add_timer(&np->reset_timer); 1593 stop_nic_rxtx(ioaddr, 0); 1594 netif_stop_queue(dev); 1595 /* or netif_tx_disable(dev); ?? */ 1596 /* Prevent other paths from enabling tx,rx,intrs */ 1597 np->crvalue_sv = np->crvalue; 1598 np->imrvalue_sv = np->imrvalue; 1599 np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */ 1600 np->imrvalue = 0; 1601 } 1602 1603 break; 1604 } 1605 } while (1); 1606 1607 /* read the tally counters */ 1608 /* missed pkts */ 1609 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1610 1611 /* crc error */ 1612 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1613 1614 if (debug) 1615 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", 1616 dev->name, ioread32(ioaddr + ISR)); 1617 1618 iowrite32(np->imrvalue, ioaddr + IMR); 1619 1620 spin_unlock(&np->lock); 1621 1622 return IRQ_RETVAL(handled); 1623} 1624 1625 1626/* This routine is logically part of the interrupt handler, but separated 1627 for clarity and better register allocation. */ 1628static int netdev_rx(struct net_device *dev) 1629{ 1630 struct netdev_private *np = netdev_priv(dev); 1631 void __iomem *ioaddr = np->mem; 1632 1633 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1634 while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) { 1635 s32 rx_status = np->cur_rx->status; 1636 1637 if (np->really_rx_count == 0) 1638 break; 1639 1640 if (debug) 1641 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status); 1642 1643 if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) 1644 || (rx_status & ErrorSummary)) { 1645 if (rx_status & ErrorSummary) { /* there was a fatal error */ 1646 if (debug) 1647 printk(KERN_DEBUG 1648 "%s: Receive error, Rx status %8.8x.\n", 1649 dev->name, rx_status); 1650 1651 np->stats.rx_errors++; /* end of a packet. */ 1652 if (rx_status & (LONG | RUNT)) 1653 np->stats.rx_length_errors++; 1654 if (rx_status & RXER) 1655 np->stats.rx_frame_errors++; 1656 if (rx_status & CRC) 1657 np->stats.rx_crc_errors++; 1658 } else { 1659 int need_to_reset = 0; 1660 int desno = 0; 1661 1662 if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */ 1663 struct fealnx_desc *cur; 1664 1665 /* check this packet is received completely? */ 1666 cur = np->cur_rx; 1667 while (desno <= np->really_rx_count) { 1668 ++desno; 1669 if ((!(cur->status & RXOWN)) 1670 && (cur->status & RXLSD)) 1671 break; 1672 /* goto next rx descriptor */ 1673 cur = cur->next_desc_logical; 1674 } 1675 if (desno > np->really_rx_count) 1676 need_to_reset = 1; 1677 } else /* RXLSD did not find, something error */ 1678 need_to_reset = 1; 1679 1680 if (need_to_reset == 0) { 1681 int i; 1682 1683 np->stats.rx_length_errors++; 1684 1685 /* free all rx descriptors related this long pkt */ 1686 for (i = 0; i < desno; ++i) { 1687 if (!np->cur_rx->skbuff) { 1688 printk(KERN_DEBUG 1689 "%s: I'm scared\n", dev->name); 1690 break; 1691 } 1692 np->cur_rx->status = RXOWN; 1693 np->cur_rx = np->cur_rx->next_desc_logical; 1694 } 1695 continue; 1696 } else { /* rx error, need to reset this chip */ 1697 stop_nic_rx(ioaddr, np->crvalue); 1698 reset_rx_descriptors(dev); 1699 iowrite32(np->crvalue, ioaddr + TCRRCR); 1700 } 1701 break; /* exit the while loop */ 1702 } 1703 } else { /* this received pkt is ok */ 1704 1705 struct sk_buff *skb; 1706 /* Omit the four octet CRC from the length. */ 1707 short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4; 1708 1709#ifndef final_version 1710 if (debug) 1711 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" 1712 " status %x.\n", pkt_len, rx_status); 1713#endif 1714 1715 /* Check if the packet is long enough to accept without copying 1716 to a minimally-sized skbuff. */ 1717 if (pkt_len < rx_copybreak && 1718 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1719 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1720 pci_dma_sync_single_for_cpu(np->pci_dev, 1721 np->cur_rx->buffer, 1722 np->rx_buf_sz, 1723 PCI_DMA_FROMDEVICE); 1724 /* Call copy + cksum if available. */ 1725 1726#if ! defined(__alpha__) 1727 skb_copy_to_linear_data(skb, 1728 np->cur_rx->skbuff->data, pkt_len); 1729 skb_put(skb, pkt_len); 1730#else 1731 memcpy(skb_put(skb, pkt_len), 1732 np->cur_rx->skbuff->data, pkt_len); 1733#endif 1734 pci_dma_sync_single_for_device(np->pci_dev, 1735 np->cur_rx->buffer, 1736 np->rx_buf_sz, 1737 PCI_DMA_FROMDEVICE); 1738 } else { 1739 pci_unmap_single(np->pci_dev, 1740 np->cur_rx->buffer, 1741 np->rx_buf_sz, 1742 PCI_DMA_FROMDEVICE); 1743 skb_put(skb = np->cur_rx->skbuff, pkt_len); 1744 np->cur_rx->skbuff = NULL; 1745 --np->really_rx_count; 1746 } 1747 skb->protocol = eth_type_trans(skb, dev); 1748 netif_rx(skb); 1749 dev->last_rx = jiffies; 1750 np->stats.rx_packets++; 1751 np->stats.rx_bytes += pkt_len; 1752 } 1753 1754 np->cur_rx = np->cur_rx->next_desc_logical; 1755 } /* end of while loop */ 1756 1757 /* allocate skb for rx buffers */ 1758 allocate_rx_buffers(dev); 1759 1760 return 0; 1761} 1762 1763 1764static struct net_device_stats *get_stats(struct net_device *dev) 1765{ 1766 struct netdev_private *np = netdev_priv(dev); 1767 void __iomem *ioaddr = np->mem; 1768 1769 /* The chip only need report frame silently dropped. */ 1770 if (netif_running(dev)) { 1771 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1772 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1773 } 1774 1775 return &np->stats; 1776} 1777 1778 1779/* for dev->set_multicast_list */ 1780static void set_rx_mode(struct net_device *dev) 1781{ 1782 spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock; 1783 unsigned long flags; 1784 spin_lock_irqsave(lp, flags); 1785 __set_rx_mode(dev); 1786 spin_unlock_irqrestore(lp, flags); 1787} 1788 1789 1790/* Take lock before calling */ 1791static void __set_rx_mode(struct net_device *dev) 1792{ 1793 struct netdev_private *np = netdev_priv(dev); 1794 void __iomem *ioaddr = np->mem; 1795 u32 mc_filter[2]; /* Multicast hash filter */ 1796 u32 rx_mode; 1797 1798 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1799 memset(mc_filter, 0xff, sizeof(mc_filter)); 1800 rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM; 1801 } else if ((dev->mc_count > multicast_filter_limit) 1802 || (dev->flags & IFF_ALLMULTI)) { 1803 /* Too many to match, or accept all multicasts. */ 1804 memset(mc_filter, 0xff, sizeof(mc_filter)); 1805 rx_mode = CR_W_AB | CR_W_AM; 1806 } else { 1807 struct dev_mc_list *mclist; 1808 int i; 1809 1810 memset(mc_filter, 0, sizeof(mc_filter)); 1811 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1812 i++, mclist = mclist->next) { 1813 unsigned int bit; 1814 bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; 1815 mc_filter[bit >> 5] |= (1 << bit); 1816 } 1817 rx_mode = CR_W_AB | CR_W_AM; 1818 } 1819 1820 stop_nic_rxtx(ioaddr, np->crvalue); 1821 1822 iowrite32(mc_filter[0], ioaddr + MAR0); 1823 iowrite32(mc_filter[1], ioaddr + MAR1); 1824 np->crvalue &= ~CR_W_RXMODEMASK; 1825 np->crvalue |= rx_mode; 1826 iowrite32(np->crvalue, ioaddr + TCRRCR); 1827} 1828 1829static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1830{ 1831 struct netdev_private *np = netdev_priv(dev); 1832 1833 strcpy(info->driver, DRV_NAME); 1834 strcpy(info->version, DRV_VERSION); 1835 strcpy(info->bus_info, pci_name(np->pci_dev)); 1836} 1837 1838static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1839{ 1840 struct netdev_private *np = netdev_priv(dev); 1841 int rc; 1842 1843 spin_lock_irq(&np->lock); 1844 rc = mii_ethtool_gset(&np->mii, cmd); 1845 spin_unlock_irq(&np->lock); 1846 1847 return rc; 1848} 1849 1850static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1851{ 1852 struct netdev_private *np = netdev_priv(dev); 1853 int rc; 1854 1855 spin_lock_irq(&np->lock); 1856 rc = mii_ethtool_sset(&np->mii, cmd); 1857 spin_unlock_irq(&np->lock); 1858 1859 return rc; 1860} 1861 1862static int netdev_nway_reset(struct net_device *dev) 1863{ 1864 struct netdev_private *np = netdev_priv(dev); 1865 return mii_nway_restart(&np->mii); 1866} 1867 1868static u32 netdev_get_link(struct net_device *dev) 1869{ 1870 struct netdev_private *np = netdev_priv(dev); 1871 return mii_link_ok(&np->mii); 1872} 1873 1874static u32 netdev_get_msglevel(struct net_device *dev) 1875{ 1876 return debug; 1877} 1878 1879static void netdev_set_msglevel(struct net_device *dev, u32 value) 1880{ 1881 debug = value; 1882} 1883 1884static const struct ethtool_ops netdev_ethtool_ops = { 1885 .get_drvinfo = netdev_get_drvinfo, 1886 .get_settings = netdev_get_settings, 1887 .set_settings = netdev_set_settings, 1888 .nway_reset = netdev_nway_reset, 1889 .get_link = netdev_get_link, 1890 .get_msglevel = netdev_get_msglevel, 1891 .set_msglevel = netdev_set_msglevel, 1892}; 1893 1894static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1895{ 1896 struct netdev_private *np = netdev_priv(dev); 1897 int rc; 1898 1899 if (!netif_running(dev)) 1900 return -EINVAL; 1901 1902 spin_lock_irq(&np->lock); 1903 rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL); 1904 spin_unlock_irq(&np->lock); 1905 1906 return rc; 1907} 1908 1909 1910static int netdev_close(struct net_device *dev) 1911{ 1912 struct netdev_private *np = netdev_priv(dev); 1913 void __iomem *ioaddr = np->mem; 1914 int i; 1915 1916 netif_stop_queue(dev); 1917 1918 /* Disable interrupts by clearing the interrupt mask. */ 1919 iowrite32(0x0000, ioaddr + IMR); 1920 1921 /* Stop the chip's Tx and Rx processes. */ 1922 stop_nic_rxtx(ioaddr, 0); 1923 1924 del_timer_sync(&np->timer); 1925 del_timer_sync(&np->reset_timer); 1926 1927 free_irq(dev->irq, dev); 1928 1929 /* Free all the skbuffs in the Rx queue. */ 1930 for (i = 0; i < RX_RING_SIZE; i++) { 1931 struct sk_buff *skb = np->rx_ring[i].skbuff; 1932 1933 np->rx_ring[i].status = 0; 1934 if (skb) { 1935 pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer, 1936 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1937 dev_kfree_skb(skb); 1938 np->rx_ring[i].skbuff = NULL; 1939 } 1940 } 1941 1942 for (i = 0; i < TX_RING_SIZE; i++) { 1943 struct sk_buff *skb = np->tx_ring[i].skbuff; 1944 1945 if (skb) { 1946 pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer, 1947 skb->len, PCI_DMA_TODEVICE); 1948 dev_kfree_skb(skb); 1949 np->tx_ring[i].skbuff = NULL; 1950 } 1951 } 1952 1953 return 0; 1954} 1955 1956static struct pci_device_id fealnx_pci_tbl[] = { 1957 {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1958 {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 1959 {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 1960 {} /* terminate list */ 1961}; 1962MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl); 1963 1964 1965static struct pci_driver fealnx_driver = { 1966 .name = "fealnx", 1967 .id_table = fealnx_pci_tbl, 1968 .probe = fealnx_init_one, 1969 .remove = __devexit_p(fealnx_remove_one), 1970}; 1971 1972static int __init fealnx_init(void) 1973{ 1974/* when a module, this is printed whether or not devices are found in probe */ 1975#ifdef MODULE 1976 printk(version); 1977#endif 1978 1979 return pci_register_driver(&fealnx_driver); 1980} 1981 1982static void __exit fealnx_exit(void) 1983{ 1984 pci_unregister_driver(&fealnx_driver); 1985} 1986 1987module_init(fealnx_init); 1988module_exit(fealnx_exit);