Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at b1404069f64457c94de241738fdca142c2e5698f 1969 lines 55 kB view raw
1/* 2 Written 1998-2000 by Donald Becker. 3 4 This software may be used and distributed according to the terms of 5 the GNU General Public License (GPL), incorporated herein by reference. 6 Drivers based on or derived from this code fall under the GPL and must 7 retain the authorship, copyright and license notice. This file is not 8 a complete program and may only be used when the entire operating 9 system is licensed under the GPL. 10 11 The author may be reached as becker@scyld.com, or C/O 12 Scyld Computing Corporation 13 410 Severn Ave., Suite 210 14 Annapolis MD 21403 15 16 Support information and updates available at 17 http://www.scyld.com/network/pci-skeleton.html 18 19 Linux kernel updates: 20 21 Version 2.51, Nov 17, 2001 (jgarzik): 22 - Add ethtool support 23 - Replace some MII-related magic numbers with constants 24 25*/ 26 27#define DRV_NAME "fealnx" 28#define DRV_VERSION "2.52" 29#define DRV_RELDATE "Sep-11-2006" 30 31static int debug; /* 1-> print debug message */ 32static int max_interrupt_work = 20; 33 34/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */ 35static int multicast_filter_limit = 32; 36 37/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */ 38/* Setting to > 1518 effectively disables this feature. */ 39static int rx_copybreak; 40 41/* Used to pass the media type, etc. */ 42/* Both 'options[]' and 'full_duplex[]' should exist for driver */ 43/* interoperability. */ 44/* The media type is usually passed in 'options[]'. */ 45#define MAX_UNITS 8 /* More are supported, limit only on options */ 46static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; 47static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; 48 49/* Operational parameters that are set at compile time. */ 50/* Keep the ring sizes a power of two for compile efficiency. */ 51/* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */ 52/* Making the Tx ring too large decreases the effectiveness of channel */ 53/* bonding and packet priority. */ 54/* There are no ill effects from too-large receive rings. */ 55// 88-12-9 modify, 56// #define TX_RING_SIZE 16 57// #define RX_RING_SIZE 32 58#define TX_RING_SIZE 6 59#define RX_RING_SIZE 12 60#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc) 61#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc) 62 63/* Operational parameters that usually are not changed. */ 64/* Time in jiffies before concluding the transmitter is hung. */ 65#define TX_TIMEOUT (2*HZ) 66 67#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ 68 69 70/* Include files, designed to support most kernel versions 2.0.0 and later. */ 71#include <linux/module.h> 72#include <linux/kernel.h> 73#include <linux/string.h> 74#include <linux/timer.h> 75#include <linux/errno.h> 76#include <linux/ioport.h> 77#include <linux/slab.h> 78#include <linux/interrupt.h> 79#include <linux/pci.h> 80#include <linux/netdevice.h> 81#include <linux/etherdevice.h> 82#include <linux/skbuff.h> 83#include <linux/init.h> 84#include <linux/mii.h> 85#include <linux/ethtool.h> 86#include <linux/crc32.h> 87#include <linux/delay.h> 88#include <linux/bitops.h> 89 90#include <asm/processor.h> /* Processor type for cache alignment. */ 91#include <asm/io.h> 92#include <asm/uaccess.h> 93#include <asm/byteorder.h> 94 95/* These identify the driver base version and may not be removed. */ 96static char version[] = 97KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; 98 99 100/* This driver was written to use PCI memory space, however some x86 systems 101 work only with I/O space accesses. */ 102#ifndef __alpha__ 103#define USE_IO_OPS 104#endif 105 106/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */ 107/* This is only in the support-all-kernels source code. */ 108 109#define RUN_AT(x) (jiffies + (x)) 110 111MODULE_AUTHOR("Myson or whoever"); 112MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver"); 113MODULE_LICENSE("GPL"); 114module_param(max_interrupt_work, int, 0); 115module_param(debug, int, 0); 116module_param(rx_copybreak, int, 0); 117module_param(multicast_filter_limit, int, 0); 118module_param_array(options, int, NULL, 0); 119module_param_array(full_duplex, int, NULL, 0); 120MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt"); 121MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)"); 122MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames"); 123MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses"); 124MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex"); 125MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)"); 126 127enum { 128 MIN_REGION_SIZE = 136, 129}; 130 131/* A chip capabilities table, matching the entries in pci_tbl[] above. */ 132enum chip_capability_flags { 133 HAS_MII_XCVR, 134 HAS_CHIP_XCVR, 135}; 136 137/* 89/6/13 add, */ 138/* for different PHY */ 139enum phy_type_flags { 140 MysonPHY = 1, 141 AhdocPHY = 2, 142 SeeqPHY = 3, 143 MarvellPHY = 4, 144 Myson981 = 5, 145 LevelOnePHY = 6, 146 OtherPHY = 10, 147}; 148 149struct chip_info { 150 char *chip_name; 151 int flags; 152}; 153 154static const struct chip_info skel_netdrv_tbl[] __devinitdata = { 155 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 156 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR }, 157 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 158}; 159 160/* Offsets to the Command and Status Registers. */ 161enum fealnx_offsets { 162 PAR0 = 0x0, /* physical address 0-3 */ 163 PAR1 = 0x04, /* physical address 4-5 */ 164 MAR0 = 0x08, /* multicast address 0-3 */ 165 MAR1 = 0x0C, /* multicast address 4-7 */ 166 FAR0 = 0x10, /* flow-control address 0-3 */ 167 FAR1 = 0x14, /* flow-control address 4-5 */ 168 TCRRCR = 0x18, /* receive & transmit configuration */ 169 BCR = 0x1C, /* bus command */ 170 TXPDR = 0x20, /* transmit polling demand */ 171 RXPDR = 0x24, /* receive polling demand */ 172 RXCWP = 0x28, /* receive current word pointer */ 173 TXLBA = 0x2C, /* transmit list base address */ 174 RXLBA = 0x30, /* receive list base address */ 175 ISR = 0x34, /* interrupt status */ 176 IMR = 0x38, /* interrupt mask */ 177 FTH = 0x3C, /* flow control high/low threshold */ 178 MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */ 179 TALLY = 0x44, /* tally counters for crc and mpa */ 180 TSR = 0x48, /* tally counter for transmit status */ 181 BMCRSR = 0x4c, /* basic mode control and status */ 182 PHYIDENTIFIER = 0x50, /* phy identifier */ 183 ANARANLPAR = 0x54, /* auto-negotiation advertisement and link 184 partner ability */ 185 ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */ 186 BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */ 187}; 188 189/* Bits in the interrupt status/enable registers. */ 190/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */ 191enum intr_status_bits { 192 RFCON = 0x00020000, /* receive flow control xon packet */ 193 RFCOFF = 0x00010000, /* receive flow control xoff packet */ 194 LSCStatus = 0x00008000, /* link status change */ 195 ANCStatus = 0x00004000, /* autonegotiation completed */ 196 FBE = 0x00002000, /* fatal bus error */ 197 FBEMask = 0x00001800, /* mask bit12-11 */ 198 ParityErr = 0x00000000, /* parity error */ 199 TargetErr = 0x00001000, /* target abort */ 200 MasterErr = 0x00000800, /* master error */ 201 TUNF = 0x00000400, /* transmit underflow */ 202 ROVF = 0x00000200, /* receive overflow */ 203 ETI = 0x00000100, /* transmit early int */ 204 ERI = 0x00000080, /* receive early int */ 205 CNTOVF = 0x00000040, /* counter overflow */ 206 RBU = 0x00000020, /* receive buffer unavailable */ 207 TBU = 0x00000010, /* transmit buffer unavilable */ 208 TI = 0x00000008, /* transmit interrupt */ 209 RI = 0x00000004, /* receive interrupt */ 210 RxErr = 0x00000002, /* receive error */ 211}; 212 213/* Bits in the NetworkConfig register, W for writing, R for reading */ 214/* FIXME: some names are invented by me. Marked with (name?) */ 215/* If you have docs and know bit names, please fix 'em */ 216enum rx_mode_bits { 217 CR_W_ENH = 0x02000000, /* enhanced mode (name?) */ 218 CR_W_FD = 0x00100000, /* full duplex */ 219 CR_W_PS10 = 0x00080000, /* 10 mbit */ 220 CR_W_TXEN = 0x00040000, /* tx enable (name?) */ 221 CR_W_PS1000 = 0x00010000, /* 1000 mbit */ 222 /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */ 223 CR_W_RXMODEMASK = 0x000000e0, 224 CR_W_PROM = 0x00000080, /* promiscuous mode */ 225 CR_W_AB = 0x00000040, /* accept broadcast */ 226 CR_W_AM = 0x00000020, /* accept mutlicast */ 227 CR_W_ARP = 0x00000008, /* receive runt pkt */ 228 CR_W_ALP = 0x00000004, /* receive long pkt */ 229 CR_W_SEP = 0x00000002, /* receive error pkt */ 230 CR_W_RXEN = 0x00000001, /* rx enable (unicast?) (name?) */ 231 232 CR_R_TXSTOP = 0x04000000, /* tx stopped (name?) */ 233 CR_R_FD = 0x00100000, /* full duplex detected */ 234 CR_R_PS10 = 0x00080000, /* 10 mbit detected */ 235 CR_R_RXSTOP = 0x00008000, /* rx stopped (name?) */ 236}; 237 238/* The Tulip Rx and Tx buffer descriptors. */ 239struct fealnx_desc { 240 s32 status; 241 s32 control; 242 u32 buffer; 243 u32 next_desc; 244 struct fealnx_desc *next_desc_logical; 245 struct sk_buff *skbuff; 246 u32 reserved1; 247 u32 reserved2; 248}; 249 250/* Bits in network_desc.status */ 251enum rx_desc_status_bits { 252 RXOWN = 0x80000000, /* own bit */ 253 FLNGMASK = 0x0fff0000, /* frame length */ 254 FLNGShift = 16, 255 MARSTATUS = 0x00004000, /* multicast address received */ 256 BARSTATUS = 0x00002000, /* broadcast address received */ 257 PHYSTATUS = 0x00001000, /* physical address received */ 258 RXFSD = 0x00000800, /* first descriptor */ 259 RXLSD = 0x00000400, /* last descriptor */ 260 ErrorSummary = 0x80, /* error summary */ 261 RUNT = 0x40, /* runt packet received */ 262 LONG = 0x20, /* long packet received */ 263 FAE = 0x10, /* frame align error */ 264 CRC = 0x08, /* crc error */ 265 RXER = 0x04, /* receive error */ 266}; 267 268enum rx_desc_control_bits { 269 RXIC = 0x00800000, /* interrupt control */ 270 RBSShift = 0, 271}; 272 273enum tx_desc_status_bits { 274 TXOWN = 0x80000000, /* own bit */ 275 JABTO = 0x00004000, /* jabber timeout */ 276 CSL = 0x00002000, /* carrier sense lost */ 277 LC = 0x00001000, /* late collision */ 278 EC = 0x00000800, /* excessive collision */ 279 UDF = 0x00000400, /* fifo underflow */ 280 DFR = 0x00000200, /* deferred */ 281 HF = 0x00000100, /* heartbeat fail */ 282 NCRMask = 0x000000ff, /* collision retry count */ 283 NCRShift = 0, 284}; 285 286enum tx_desc_control_bits { 287 TXIC = 0x80000000, /* interrupt control */ 288 ETIControl = 0x40000000, /* early transmit interrupt */ 289 TXLD = 0x20000000, /* last descriptor */ 290 TXFD = 0x10000000, /* first descriptor */ 291 CRCEnable = 0x08000000, /* crc control */ 292 PADEnable = 0x04000000, /* padding control */ 293 RetryTxLC = 0x02000000, /* retry late collision */ 294 PKTSMask = 0x3ff800, /* packet size bit21-11 */ 295 PKTSShift = 11, 296 TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */ 297 TBSShift = 0, 298}; 299 300/* BootROM/EEPROM/MII Management Register */ 301#define MASK_MIIR_MII_READ 0x00000000 302#define MASK_MIIR_MII_WRITE 0x00000008 303#define MASK_MIIR_MII_MDO 0x00000004 304#define MASK_MIIR_MII_MDI 0x00000002 305#define MASK_MIIR_MII_MDC 0x00000001 306 307/* ST+OP+PHYAD+REGAD+TA */ 308#define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */ 309#define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */ 310 311/* ------------------------------------------------------------------------- */ 312/* Constants for Myson PHY */ 313/* ------------------------------------------------------------------------- */ 314#define MysonPHYID 0xd0000302 315/* 89-7-27 add, (begin) */ 316#define MysonPHYID0 0x0302 317#define StatusRegister 18 318#define SPEED100 0x0400 // bit10 319#define FULLMODE 0x0800 // bit11 320/* 89-7-27 add, (end) */ 321 322/* ------------------------------------------------------------------------- */ 323/* Constants for Seeq 80225 PHY */ 324/* ------------------------------------------------------------------------- */ 325#define SeeqPHYID0 0x0016 326 327#define MIIRegister18 18 328#define SPD_DET_100 0x80 329#define DPLX_DET_FULL 0x40 330 331/* ------------------------------------------------------------------------- */ 332/* Constants for Ahdoc 101 PHY */ 333/* ------------------------------------------------------------------------- */ 334#define AhdocPHYID0 0x0022 335 336#define DiagnosticReg 18 337#define DPLX_FULL 0x0800 338#define Speed_100 0x0400 339 340/* 89/6/13 add, */ 341/* -------------------------------------------------------------------------- */ 342/* Constants */ 343/* -------------------------------------------------------------------------- */ 344#define MarvellPHYID0 0x0141 345#define LevelOnePHYID0 0x0013 346 347#define MII1000BaseTControlReg 9 348#define MII1000BaseTStatusReg 10 349#define SpecificReg 17 350 351/* for 1000BaseT Control Register */ 352#define PHYAbletoPerform1000FullDuplex 0x0200 353#define PHYAbletoPerform1000HalfDuplex 0x0100 354#define PHY1000AbilityMask 0x300 355 356// for phy specific status register, marvell phy. 357#define SpeedMask 0x0c000 358#define Speed_1000M 0x08000 359#define Speed_100M 0x4000 360#define Speed_10M 0 361#define Full_Duplex 0x2000 362 363// 89/12/29 add, for phy specific status register, levelone phy, (begin) 364#define LXT1000_100M 0x08000 365#define LXT1000_1000M 0x0c000 366#define LXT1000_Full 0x200 367// 89/12/29 add, for phy specific status register, levelone phy, (end) 368 369/* for 3-in-1 case, BMCRSR register */ 370#define LinkIsUp2 0x00040000 371 372/* for PHY */ 373#define LinkIsUp 0x0004 374 375 376struct netdev_private { 377 /* Descriptor rings first for alignment. */ 378 struct fealnx_desc *rx_ring; 379 struct fealnx_desc *tx_ring; 380 381 dma_addr_t rx_ring_dma; 382 dma_addr_t tx_ring_dma; 383 384 spinlock_t lock; 385 386 struct net_device_stats stats; 387 388 /* Media monitoring timer. */ 389 struct timer_list timer; 390 391 /* Reset timer */ 392 struct timer_list reset_timer; 393 int reset_timer_armed; 394 unsigned long crvalue_sv; 395 unsigned long imrvalue_sv; 396 397 /* Frequently used values: keep some adjacent for cache effect. */ 398 int flags; 399 struct pci_dev *pci_dev; 400 unsigned long crvalue; 401 unsigned long bcrvalue; 402 unsigned long imrvalue; 403 struct fealnx_desc *cur_rx; 404 struct fealnx_desc *lack_rxbuf; 405 int really_rx_count; 406 struct fealnx_desc *cur_tx; 407 struct fealnx_desc *cur_tx_copy; 408 int really_tx_count; 409 int free_tx_count; 410 unsigned int rx_buf_sz; /* Based on MTU+slack. */ 411 412 /* These values are keep track of the transceiver/media in use. */ 413 unsigned int linkok; 414 unsigned int line_speed; 415 unsigned int duplexmode; 416 unsigned int default_port:4; /* Last dev->if_port value. */ 417 unsigned int PHYType; 418 419 /* MII transceiver section. */ 420 int mii_cnt; /* MII device addresses. */ 421 unsigned char phys[2]; /* MII device addresses. */ 422 struct mii_if_info mii; 423 void __iomem *mem; 424}; 425 426 427static int mdio_read(struct net_device *dev, int phy_id, int location); 428static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 429static int netdev_open(struct net_device *dev); 430static void getlinktype(struct net_device *dev); 431static void getlinkstatus(struct net_device *dev); 432static void netdev_timer(unsigned long data); 433static void reset_timer(unsigned long data); 434static void tx_timeout(struct net_device *dev); 435static void init_ring(struct net_device *dev); 436static int start_tx(struct sk_buff *skb, struct net_device *dev); 437static irqreturn_t intr_handler(int irq, void *dev_instance); 438static int netdev_rx(struct net_device *dev); 439static void set_rx_mode(struct net_device *dev); 440static void __set_rx_mode(struct net_device *dev); 441static struct net_device_stats *get_stats(struct net_device *dev); 442static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 443static const struct ethtool_ops netdev_ethtool_ops; 444static int netdev_close(struct net_device *dev); 445static void reset_rx_descriptors(struct net_device *dev); 446static void reset_tx_descriptors(struct net_device *dev); 447 448static void stop_nic_rx(void __iomem *ioaddr, long crvalue) 449{ 450 int delay = 0x1000; 451 iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR); 452 while (--delay) { 453 if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP) 454 break; 455 } 456} 457 458 459static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue) 460{ 461 int delay = 0x1000; 462 iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR); 463 while (--delay) { 464 if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP)) 465 == (CR_R_RXSTOP+CR_R_TXSTOP) ) 466 break; 467 } 468} 469 470 471static int __devinit fealnx_init_one(struct pci_dev *pdev, 472 const struct pci_device_id *ent) 473{ 474 struct netdev_private *np; 475 int i, option, err, irq; 476 static int card_idx = -1; 477 char boardname[12]; 478 void __iomem *ioaddr; 479 unsigned long len; 480 unsigned int chip_id = ent->driver_data; 481 struct net_device *dev; 482 void *ring_space; 483 dma_addr_t ring_dma; 484#ifdef USE_IO_OPS 485 int bar = 0; 486#else 487 int bar = 1; 488#endif 489 DECLARE_MAC_BUF(mac); 490 491/* when built into the kernel, we only print version if device is found */ 492#ifndef MODULE 493 static int printed_version; 494 if (!printed_version++) 495 printk(version); 496#endif 497 498 card_idx++; 499 sprintf(boardname, "fealnx%d", card_idx); 500 501 option = card_idx < MAX_UNITS ? options[card_idx] : 0; 502 503 i = pci_enable_device(pdev); 504 if (i) return i; 505 pci_set_master(pdev); 506 507 len = pci_resource_len(pdev, bar); 508 if (len < MIN_REGION_SIZE) { 509 dev_err(&pdev->dev, 510 "region size %ld too small, aborting\n", len); 511 return -ENODEV; 512 } 513 514 i = pci_request_regions(pdev, boardname); 515 if (i) 516 return i; 517 518 irq = pdev->irq; 519 520 ioaddr = pci_iomap(pdev, bar, len); 521 if (!ioaddr) { 522 err = -ENOMEM; 523 goto err_out_res; 524 } 525 526 dev = alloc_etherdev(sizeof(struct netdev_private)); 527 if (!dev) { 528 err = -ENOMEM; 529 goto err_out_unmap; 530 } 531 SET_NETDEV_DEV(dev, &pdev->dev); 532 533 /* read ethernet id */ 534 for (i = 0; i < 6; ++i) 535 dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i); 536 537 /* Reset the chip to erase previous misconfiguration. */ 538 iowrite32(0x00000001, ioaddr + BCR); 539 540 dev->base_addr = (unsigned long)ioaddr; 541 dev->irq = irq; 542 543 /* Make certain the descriptor lists are aligned. */ 544 np = netdev_priv(dev); 545 np->mem = ioaddr; 546 spin_lock_init(&np->lock); 547 np->pci_dev = pdev; 548 np->flags = skel_netdrv_tbl[chip_id].flags; 549 pci_set_drvdata(pdev, dev); 550 np->mii.dev = dev; 551 np->mii.mdio_read = mdio_read; 552 np->mii.mdio_write = mdio_write; 553 np->mii.phy_id_mask = 0x1f; 554 np->mii.reg_num_mask = 0x1f; 555 556 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); 557 if (!ring_space) { 558 err = -ENOMEM; 559 goto err_out_free_dev; 560 } 561 np->rx_ring = (struct fealnx_desc *)ring_space; 562 np->rx_ring_dma = ring_dma; 563 564 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); 565 if (!ring_space) { 566 err = -ENOMEM; 567 goto err_out_free_rx; 568 } 569 np->tx_ring = (struct fealnx_desc *)ring_space; 570 np->tx_ring_dma = ring_dma; 571 572 /* find the connected MII xcvrs */ 573 if (np->flags == HAS_MII_XCVR) { 574 int phy, phy_idx = 0; 575 576 for (phy = 1; phy < 32 && phy_idx < 4; phy++) { 577 int mii_status = mdio_read(dev, phy, 1); 578 579 if (mii_status != 0xffff && mii_status != 0x0000) { 580 np->phys[phy_idx++] = phy; 581 dev_info(&pdev->dev, 582 "MII PHY found at address %d, status " 583 "0x%4.4x.\n", phy, mii_status); 584 /* get phy type */ 585 { 586 unsigned int data; 587 588 data = mdio_read(dev, np->phys[0], 2); 589 if (data == SeeqPHYID0) 590 np->PHYType = SeeqPHY; 591 else if (data == AhdocPHYID0) 592 np->PHYType = AhdocPHY; 593 else if (data == MarvellPHYID0) 594 np->PHYType = MarvellPHY; 595 else if (data == MysonPHYID0) 596 np->PHYType = Myson981; 597 else if (data == LevelOnePHYID0) 598 np->PHYType = LevelOnePHY; 599 else 600 np->PHYType = OtherPHY; 601 } 602 } 603 } 604 605 np->mii_cnt = phy_idx; 606 if (phy_idx == 0) 607 dev_warn(&pdev->dev, 608 "MII PHY not found -- this device may " 609 "not operate correctly.\n"); 610 } else { 611 np->phys[0] = 32; 612/* 89/6/23 add, (begin) */ 613 /* get phy type */ 614 if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID) 615 np->PHYType = MysonPHY; 616 else 617 np->PHYType = OtherPHY; 618 } 619 np->mii.phy_id = np->phys[0]; 620 621 if (dev->mem_start) 622 option = dev->mem_start; 623 624 /* The lower four bits are the media type. */ 625 if (option > 0) { 626 if (option & 0x200) 627 np->mii.full_duplex = 1; 628 np->default_port = option & 15; 629 } 630 631 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0) 632 np->mii.full_duplex = full_duplex[card_idx]; 633 634 if (np->mii.full_duplex) { 635 dev_info(&pdev->dev, "Media type forced to Full Duplex.\n"); 636/* 89/6/13 add, (begin) */ 637// if (np->PHYType==MarvellPHY) 638 if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) { 639 unsigned int data; 640 641 data = mdio_read(dev, np->phys[0], 9); 642 data = (data & 0xfcff) | 0x0200; 643 mdio_write(dev, np->phys[0], 9, data); 644 } 645/* 89/6/13 add, (end) */ 646 if (np->flags == HAS_MII_XCVR) 647 mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL); 648 else 649 iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR); 650 np->mii.force_media = 1; 651 } 652 653 /* The chip-specific entries in the device structure. */ 654 dev->open = &netdev_open; 655 dev->hard_start_xmit = &start_tx; 656 dev->stop = &netdev_close; 657 dev->get_stats = &get_stats; 658 dev->set_multicast_list = &set_rx_mode; 659 dev->do_ioctl = &mii_ioctl; 660 dev->ethtool_ops = &netdev_ethtool_ops; 661 dev->tx_timeout = &tx_timeout; 662 dev->watchdog_timeo = TX_TIMEOUT; 663 664 err = register_netdev(dev); 665 if (err) 666 goto err_out_free_tx; 667 668 printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n", 669 dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr, 670 print_mac(mac, dev->dev_addr), irq); 671 672 return 0; 673 674err_out_free_tx: 675 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 676err_out_free_rx: 677 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 678err_out_free_dev: 679 free_netdev(dev); 680err_out_unmap: 681 pci_iounmap(pdev, ioaddr); 682err_out_res: 683 pci_release_regions(pdev); 684 return err; 685} 686 687 688static void __devexit fealnx_remove_one(struct pci_dev *pdev) 689{ 690 struct net_device *dev = pci_get_drvdata(pdev); 691 692 if (dev) { 693 struct netdev_private *np = netdev_priv(dev); 694 695 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, 696 np->tx_ring_dma); 697 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, 698 np->rx_ring_dma); 699 unregister_netdev(dev); 700 pci_iounmap(pdev, np->mem); 701 free_netdev(dev); 702 pci_release_regions(pdev); 703 pci_set_drvdata(pdev, NULL); 704 } else 705 printk(KERN_ERR "fealnx: remove for unknown device\n"); 706} 707 708 709static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad) 710{ 711 ulong miir; 712 int i; 713 unsigned int mask, data; 714 715 /* enable MII output */ 716 miir = (ulong) ioread32(miiport); 717 miir &= 0xfffffff0; 718 719 miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO; 720 721 /* send 32 1's preamble */ 722 for (i = 0; i < 32; i++) { 723 /* low MDC; MDO is already high (miir) */ 724 miir &= ~MASK_MIIR_MII_MDC; 725 iowrite32(miir, miiport); 726 727 /* high MDC */ 728 miir |= MASK_MIIR_MII_MDC; 729 iowrite32(miir, miiport); 730 } 731 732 /* calculate ST+OP+PHYAD+REGAD+TA */ 733 data = opcode | (phyad << 7) | (regad << 2); 734 735 /* sent out */ 736 mask = 0x8000; 737 while (mask) { 738 /* low MDC, prepare MDO */ 739 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO); 740 if (mask & data) 741 miir |= MASK_MIIR_MII_MDO; 742 743 iowrite32(miir, miiport); 744 /* high MDC */ 745 miir |= MASK_MIIR_MII_MDC; 746 iowrite32(miir, miiport); 747 udelay(30); 748 749 /* next */ 750 mask >>= 1; 751 if (mask == 0x2 && opcode == OP_READ) 752 miir &= ~MASK_MIIR_MII_WRITE; 753 } 754 return miir; 755} 756 757 758static int mdio_read(struct net_device *dev, int phyad, int regad) 759{ 760 struct netdev_private *np = netdev_priv(dev); 761 void __iomem *miiport = np->mem + MANAGEMENT; 762 ulong miir; 763 unsigned int mask, data; 764 765 miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad); 766 767 /* read data */ 768 mask = 0x8000; 769 data = 0; 770 while (mask) { 771 /* low MDC */ 772 miir &= ~MASK_MIIR_MII_MDC; 773 iowrite32(miir, miiport); 774 775 /* read MDI */ 776 miir = ioread32(miiport); 777 if (miir & MASK_MIIR_MII_MDI) 778 data |= mask; 779 780 /* high MDC, and wait */ 781 miir |= MASK_MIIR_MII_MDC; 782 iowrite32(miir, miiport); 783 udelay(30); 784 785 /* next */ 786 mask >>= 1; 787 } 788 789 /* low MDC */ 790 miir &= ~MASK_MIIR_MII_MDC; 791 iowrite32(miir, miiport); 792 793 return data & 0xffff; 794} 795 796 797static void mdio_write(struct net_device *dev, int phyad, int regad, int data) 798{ 799 struct netdev_private *np = netdev_priv(dev); 800 void __iomem *miiport = np->mem + MANAGEMENT; 801 ulong miir; 802 unsigned int mask; 803 804 miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad); 805 806 /* write data */ 807 mask = 0x8000; 808 while (mask) { 809 /* low MDC, prepare MDO */ 810 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO); 811 if (mask & data) 812 miir |= MASK_MIIR_MII_MDO; 813 iowrite32(miir, miiport); 814 815 /* high MDC */ 816 miir |= MASK_MIIR_MII_MDC; 817 iowrite32(miir, miiport); 818 819 /* next */ 820 mask >>= 1; 821 } 822 823 /* low MDC */ 824 miir &= ~MASK_MIIR_MII_MDC; 825 iowrite32(miir, miiport); 826} 827 828 829static int netdev_open(struct net_device *dev) 830{ 831 struct netdev_private *np = netdev_priv(dev); 832 void __iomem *ioaddr = np->mem; 833 int i; 834 835 iowrite32(0x00000001, ioaddr + BCR); /* Reset */ 836 837 if (request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev)) 838 return -EAGAIN; 839 840 for (i = 0; i < 3; i++) 841 iowrite16(((unsigned short*)dev->dev_addr)[i], 842 ioaddr + PAR0 + i*2); 843 844 init_ring(dev); 845 846 iowrite32(np->rx_ring_dma, ioaddr + RXLBA); 847 iowrite32(np->tx_ring_dma, ioaddr + TXLBA); 848 849 /* Initialize other registers. */ 850 /* Configure the PCI bus bursts and FIFO thresholds. 851 486: Set 8 longword burst. 852 586: no burst limit. 853 Burst length 5:3 854 0 0 0 1 855 0 0 1 4 856 0 1 0 8 857 0 1 1 16 858 1 0 0 32 859 1 0 1 64 860 1 1 0 128 861 1 1 1 256 862 Wait the specified 50 PCI cycles after a reset by initializing 863 Tx and Rx queues and the address filter list. 864 FIXME (Ueimor): optimistic for alpha + posted writes ? */ 865 866 np->bcrvalue = 0x10; /* little-endian, 8 burst length */ 867#ifdef __BIG_ENDIAN 868 np->bcrvalue |= 0x04; /* big-endian */ 869#endif 870 871#if defined(__i386__) && !defined(MODULE) 872 if (boot_cpu_data.x86 <= 4) 873 np->crvalue = 0xa00; 874 else 875#endif 876 np->crvalue = 0xe00; /* rx 128 burst length */ 877 878 879// 89/12/29 add, 880// 90/1/16 modify, 881// np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI; 882 np->imrvalue = TUNF | CNTOVF | RBU | TI | RI; 883 if (np->pci_dev->device == 0x891) { 884 np->bcrvalue |= 0x200; /* set PROG bit */ 885 np->crvalue |= CR_W_ENH; /* set enhanced bit */ 886 np->imrvalue |= ETI; 887 } 888 iowrite32(np->bcrvalue, ioaddr + BCR); 889 890 if (dev->if_port == 0) 891 dev->if_port = np->default_port; 892 893 iowrite32(0, ioaddr + RXPDR); 894// 89/9/1 modify, 895// np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */ 896 np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */ 897 np->mii.full_duplex = np->mii.force_media; 898 getlinkstatus(dev); 899 if (np->linkok) 900 getlinktype(dev); 901 __set_rx_mode(dev); 902 903 netif_start_queue(dev); 904 905 /* Clear and Enable interrupts by setting the interrupt mask. */ 906 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); 907 iowrite32(np->imrvalue, ioaddr + IMR); 908 909 if (debug) 910 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); 911 912 /* Set the timer to check for link beat. */ 913 init_timer(&np->timer); 914 np->timer.expires = RUN_AT(3 * HZ); 915 np->timer.data = (unsigned long) dev; 916 np->timer.function = &netdev_timer; 917 918 /* timer handler */ 919 add_timer(&np->timer); 920 921 init_timer(&np->reset_timer); 922 np->reset_timer.data = (unsigned long) dev; 923 np->reset_timer.function = &reset_timer; 924 np->reset_timer_armed = 0; 925 926 return 0; 927} 928 929 930static void getlinkstatus(struct net_device *dev) 931/* function: Routine will read MII Status Register to get link status. */ 932/* input : dev... pointer to the adapter block. */ 933/* output : none. */ 934{ 935 struct netdev_private *np = netdev_priv(dev); 936 unsigned int i, DelayTime = 0x1000; 937 938 np->linkok = 0; 939 940 if (np->PHYType == MysonPHY) { 941 for (i = 0; i < DelayTime; ++i) { 942 if (ioread32(np->mem + BMCRSR) & LinkIsUp2) { 943 np->linkok = 1; 944 return; 945 } 946 udelay(100); 947 } 948 } else { 949 for (i = 0; i < DelayTime; ++i) { 950 if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) { 951 np->linkok = 1; 952 return; 953 } 954 udelay(100); 955 } 956 } 957} 958 959 960static void getlinktype(struct net_device *dev) 961{ 962 struct netdev_private *np = netdev_priv(dev); 963 964 if (np->PHYType == MysonPHY) { /* 3-in-1 case */ 965 if (ioread32(np->mem + TCRRCR) & CR_R_FD) 966 np->duplexmode = 2; /* full duplex */ 967 else 968 np->duplexmode = 1; /* half duplex */ 969 if (ioread32(np->mem + TCRRCR) & CR_R_PS10) 970 np->line_speed = 1; /* 10M */ 971 else 972 np->line_speed = 2; /* 100M */ 973 } else { 974 if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */ 975 unsigned int data; 976 977 data = mdio_read(dev, np->phys[0], MIIRegister18); 978 if (data & SPD_DET_100) 979 np->line_speed = 2; /* 100M */ 980 else 981 np->line_speed = 1; /* 10M */ 982 if (data & DPLX_DET_FULL) 983 np->duplexmode = 2; /* full duplex mode */ 984 else 985 np->duplexmode = 1; /* half duplex mode */ 986 } else if (np->PHYType == AhdocPHY) { 987 unsigned int data; 988 989 data = mdio_read(dev, np->phys[0], DiagnosticReg); 990 if (data & Speed_100) 991 np->line_speed = 2; /* 100M */ 992 else 993 np->line_speed = 1; /* 10M */ 994 if (data & DPLX_FULL) 995 np->duplexmode = 2; /* full duplex mode */ 996 else 997 np->duplexmode = 1; /* half duplex mode */ 998 } 999/* 89/6/13 add, (begin) */ 1000 else if (np->PHYType == MarvellPHY) { 1001 unsigned int data; 1002 1003 data = mdio_read(dev, np->phys[0], SpecificReg); 1004 if (data & Full_Duplex) 1005 np->duplexmode = 2; /* full duplex mode */ 1006 else 1007 np->duplexmode = 1; /* half duplex mode */ 1008 data &= SpeedMask; 1009 if (data == Speed_1000M) 1010 np->line_speed = 3; /* 1000M */ 1011 else if (data == Speed_100M) 1012 np->line_speed = 2; /* 100M */ 1013 else 1014 np->line_speed = 1; /* 10M */ 1015 } 1016/* 89/6/13 add, (end) */ 1017/* 89/7/27 add, (begin) */ 1018 else if (np->PHYType == Myson981) { 1019 unsigned int data; 1020 1021 data = mdio_read(dev, np->phys[0], StatusRegister); 1022 1023 if (data & SPEED100) 1024 np->line_speed = 2; 1025 else 1026 np->line_speed = 1; 1027 1028 if (data & FULLMODE) 1029 np->duplexmode = 2; 1030 else 1031 np->duplexmode = 1; 1032 } 1033/* 89/7/27 add, (end) */ 1034/* 89/12/29 add */ 1035 else if (np->PHYType == LevelOnePHY) { 1036 unsigned int data; 1037 1038 data = mdio_read(dev, np->phys[0], SpecificReg); 1039 if (data & LXT1000_Full) 1040 np->duplexmode = 2; /* full duplex mode */ 1041 else 1042 np->duplexmode = 1; /* half duplex mode */ 1043 data &= SpeedMask; 1044 if (data == LXT1000_1000M) 1045 np->line_speed = 3; /* 1000M */ 1046 else if (data == LXT1000_100M) 1047 np->line_speed = 2; /* 100M */ 1048 else 1049 np->line_speed = 1; /* 10M */ 1050 } 1051 np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000); 1052 if (np->line_speed == 1) 1053 np->crvalue |= CR_W_PS10; 1054 else if (np->line_speed == 3) 1055 np->crvalue |= CR_W_PS1000; 1056 if (np->duplexmode == 2) 1057 np->crvalue |= CR_W_FD; 1058 } 1059} 1060 1061 1062/* Take lock before calling this */ 1063static void allocate_rx_buffers(struct net_device *dev) 1064{ 1065 struct netdev_private *np = netdev_priv(dev); 1066 1067 /* allocate skb for rx buffers */ 1068 while (np->really_rx_count != RX_RING_SIZE) { 1069 struct sk_buff *skb; 1070 1071 skb = dev_alloc_skb(np->rx_buf_sz); 1072 if (skb == NULL) 1073 break; /* Better luck next round. */ 1074 1075 while (np->lack_rxbuf->skbuff) 1076 np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; 1077 1078 skb->dev = dev; /* Mark as being used by this device. */ 1079 np->lack_rxbuf->skbuff = skb; 1080 np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data, 1081 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1082 np->lack_rxbuf->status = RXOWN; 1083 ++np->really_rx_count; 1084 } 1085} 1086 1087 1088static void netdev_timer(unsigned long data) 1089{ 1090 struct net_device *dev = (struct net_device *) data; 1091 struct netdev_private *np = netdev_priv(dev); 1092 void __iomem *ioaddr = np->mem; 1093 int old_crvalue = np->crvalue; 1094 unsigned int old_linkok = np->linkok; 1095 unsigned long flags; 1096 1097 if (debug) 1098 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x " 1099 "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR), 1100 ioread32(ioaddr + TCRRCR)); 1101 1102 spin_lock_irqsave(&np->lock, flags); 1103 1104 if (np->flags == HAS_MII_XCVR) { 1105 getlinkstatus(dev); 1106 if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */ 1107 getlinktype(dev); 1108 if (np->crvalue != old_crvalue) { 1109 stop_nic_rxtx(ioaddr, np->crvalue); 1110 iowrite32(np->crvalue, ioaddr + TCRRCR); 1111 } 1112 } 1113 } 1114 1115 allocate_rx_buffers(dev); 1116 1117 spin_unlock_irqrestore(&np->lock, flags); 1118 1119 np->timer.expires = RUN_AT(10 * HZ); 1120 add_timer(&np->timer); 1121} 1122 1123 1124/* Take lock before calling */ 1125/* Reset chip and disable rx, tx and interrupts */ 1126static void reset_and_disable_rxtx(struct net_device *dev) 1127{ 1128 struct netdev_private *np = netdev_priv(dev); 1129 void __iomem *ioaddr = np->mem; 1130 int delay=51; 1131 1132 /* Reset the chip's Tx and Rx processes. */ 1133 stop_nic_rxtx(ioaddr, 0); 1134 1135 /* Disable interrupts by clearing the interrupt mask. */ 1136 iowrite32(0, ioaddr + IMR); 1137 1138 /* Reset the chip to erase previous misconfiguration. */ 1139 iowrite32(0x00000001, ioaddr + BCR); 1140 1141 /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw). 1142 We surely wait too long (address+data phase). Who cares? */ 1143 while (--delay) { 1144 ioread32(ioaddr + BCR); 1145 rmb(); 1146 } 1147} 1148 1149 1150/* Take lock before calling */ 1151/* Restore chip after reset */ 1152static void enable_rxtx(struct net_device *dev) 1153{ 1154 struct netdev_private *np = netdev_priv(dev); 1155 void __iomem *ioaddr = np->mem; 1156 1157 reset_rx_descriptors(dev); 1158 1159 iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring), 1160 ioaddr + TXLBA); 1161 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), 1162 ioaddr + RXLBA); 1163 1164 iowrite32(np->bcrvalue, ioaddr + BCR); 1165 1166 iowrite32(0, ioaddr + RXPDR); 1167 __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */ 1168 1169 /* Clear and Enable interrupts by setting the interrupt mask. */ 1170 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); 1171 iowrite32(np->imrvalue, ioaddr + IMR); 1172 1173 iowrite32(0, ioaddr + TXPDR); 1174} 1175 1176 1177static void reset_timer(unsigned long data) 1178{ 1179 struct net_device *dev = (struct net_device *) data; 1180 struct netdev_private *np = netdev_priv(dev); 1181 unsigned long flags; 1182 1183 printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name); 1184 1185 spin_lock_irqsave(&np->lock, flags); 1186 np->crvalue = np->crvalue_sv; 1187 np->imrvalue = np->imrvalue_sv; 1188 1189 reset_and_disable_rxtx(dev); 1190 /* works for me without this: 1191 reset_tx_descriptors(dev); */ 1192 enable_rxtx(dev); 1193 netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */ 1194 1195 np->reset_timer_armed = 0; 1196 1197 spin_unlock_irqrestore(&np->lock, flags); 1198} 1199 1200 1201static void tx_timeout(struct net_device *dev) 1202{ 1203 struct netdev_private *np = netdev_priv(dev); 1204 void __iomem *ioaddr = np->mem; 1205 unsigned long flags; 1206 int i; 1207 1208 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x," 1209 " resetting...\n", dev->name, ioread32(ioaddr + ISR)); 1210 1211 { 1212 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); 1213 for (i = 0; i < RX_RING_SIZE; i++) 1214 printk(" %8.8x", (unsigned int) np->rx_ring[i].status); 1215 printk("\n" KERN_DEBUG " Tx ring %p: ", np->tx_ring); 1216 for (i = 0; i < TX_RING_SIZE; i++) 1217 printk(" %4.4x", np->tx_ring[i].status); 1218 printk("\n"); 1219 } 1220 1221 spin_lock_irqsave(&np->lock, flags); 1222 1223 reset_and_disable_rxtx(dev); 1224 reset_tx_descriptors(dev); 1225 enable_rxtx(dev); 1226 1227 spin_unlock_irqrestore(&np->lock, flags); 1228 1229 dev->trans_start = jiffies; 1230 np->stats.tx_errors++; 1231 netif_wake_queue(dev); /* or .._start_.. ?? */ 1232} 1233 1234 1235/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 1236static void init_ring(struct net_device *dev) 1237{ 1238 struct netdev_private *np = netdev_priv(dev); 1239 int i; 1240 1241 /* initialize rx variables */ 1242 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); 1243 np->cur_rx = &np->rx_ring[0]; 1244 np->lack_rxbuf = np->rx_ring; 1245 np->really_rx_count = 0; 1246 1247 /* initial rx descriptors. */ 1248 for (i = 0; i < RX_RING_SIZE; i++) { 1249 np->rx_ring[i].status = 0; 1250 np->rx_ring[i].control = np->rx_buf_sz << RBSShift; 1251 np->rx_ring[i].next_desc = np->rx_ring_dma + 1252 (i + 1)*sizeof(struct fealnx_desc); 1253 np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1]; 1254 np->rx_ring[i].skbuff = NULL; 1255 } 1256 1257 /* for the last rx descriptor */ 1258 np->rx_ring[i - 1].next_desc = np->rx_ring_dma; 1259 np->rx_ring[i - 1].next_desc_logical = np->rx_ring; 1260 1261 /* allocate skb for rx buffers */ 1262 for (i = 0; i < RX_RING_SIZE; i++) { 1263 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); 1264 1265 if (skb == NULL) { 1266 np->lack_rxbuf = &np->rx_ring[i]; 1267 break; 1268 } 1269 1270 ++np->really_rx_count; 1271 np->rx_ring[i].skbuff = skb; 1272 skb->dev = dev; /* Mark as being used by this device. */ 1273 np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data, 1274 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1275 np->rx_ring[i].status = RXOWN; 1276 np->rx_ring[i].control |= RXIC; 1277 } 1278 1279 /* initialize tx variables */ 1280 np->cur_tx = &np->tx_ring[0]; 1281 np->cur_tx_copy = &np->tx_ring[0]; 1282 np->really_tx_count = 0; 1283 np->free_tx_count = TX_RING_SIZE; 1284 1285 for (i = 0; i < TX_RING_SIZE; i++) { 1286 np->tx_ring[i].status = 0; 1287 /* do we need np->tx_ring[i].control = XXX; ?? */ 1288 np->tx_ring[i].next_desc = np->tx_ring_dma + 1289 (i + 1)*sizeof(struct fealnx_desc); 1290 np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1]; 1291 np->tx_ring[i].skbuff = NULL; 1292 } 1293 1294 /* for the last tx descriptor */ 1295 np->tx_ring[i - 1].next_desc = np->tx_ring_dma; 1296 np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0]; 1297} 1298 1299 1300static int start_tx(struct sk_buff *skb, struct net_device *dev) 1301{ 1302 struct netdev_private *np = netdev_priv(dev); 1303 unsigned long flags; 1304 1305 spin_lock_irqsave(&np->lock, flags); 1306 1307 np->cur_tx_copy->skbuff = skb; 1308 1309#define one_buffer 1310#define BPT 1022 1311#if defined(one_buffer) 1312 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1313 skb->len, PCI_DMA_TODEVICE); 1314 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; 1315 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1316 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ 1317// 89/12/29 add, 1318 if (np->pci_dev->device == 0x891) 1319 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1320 np->cur_tx_copy->status = TXOWN; 1321 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; 1322 --np->free_tx_count; 1323#elif defined(two_buffer) 1324 if (skb->len > BPT) { 1325 struct fealnx_desc *next; 1326 1327 /* for the first descriptor */ 1328 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1329 BPT, PCI_DMA_TODEVICE); 1330 np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable; 1331 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1332 np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */ 1333 1334 /* for the last descriptor */ 1335 next = np->cur_tx_copy->next_desc_logical; 1336 next->skbuff = skb; 1337 next->control = TXIC | TXLD | CRCEnable | PADEnable; 1338 next->control |= (skb->len << PKTSShift); /* pkt size */ 1339 next->control |= ((skb->len - BPT) << TBSShift); /* buf size */ 1340// 89/12/29 add, 1341 if (np->pci_dev->device == 0x891) 1342 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1343 next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT, 1344 skb->len - BPT, PCI_DMA_TODEVICE); 1345 1346 next->status = TXOWN; 1347 np->cur_tx_copy->status = TXOWN; 1348 1349 np->cur_tx_copy = next->next_desc_logical; 1350 np->free_tx_count -= 2; 1351 } else { 1352 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1353 skb->len, PCI_DMA_TODEVICE); 1354 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; 1355 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1356 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ 1357// 89/12/29 add, 1358 if (np->pci_dev->device == 0x891) 1359 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1360 np->cur_tx_copy->status = TXOWN; 1361 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; 1362 --np->free_tx_count; 1363 } 1364#endif 1365 1366 if (np->free_tx_count < 2) 1367 netif_stop_queue(dev); 1368 ++np->really_tx_count; 1369 iowrite32(0, np->mem + TXPDR); 1370 dev->trans_start = jiffies; 1371 1372 spin_unlock_irqrestore(&np->lock, flags); 1373 return 0; 1374} 1375 1376 1377/* Take lock before calling */ 1378/* Chip probably hosed tx ring. Clean up. */ 1379static void reset_tx_descriptors(struct net_device *dev) 1380{ 1381 struct netdev_private *np = netdev_priv(dev); 1382 struct fealnx_desc *cur; 1383 int i; 1384 1385 /* initialize tx variables */ 1386 np->cur_tx = &np->tx_ring[0]; 1387 np->cur_tx_copy = &np->tx_ring[0]; 1388 np->really_tx_count = 0; 1389 np->free_tx_count = TX_RING_SIZE; 1390 1391 for (i = 0; i < TX_RING_SIZE; i++) { 1392 cur = &np->tx_ring[i]; 1393 if (cur->skbuff) { 1394 pci_unmap_single(np->pci_dev, cur->buffer, 1395 cur->skbuff->len, PCI_DMA_TODEVICE); 1396 dev_kfree_skb_any(cur->skbuff); 1397 cur->skbuff = NULL; 1398 } 1399 cur->status = 0; 1400 cur->control = 0; /* needed? */ 1401 /* probably not needed. We do it for purely paranoid reasons */ 1402 cur->next_desc = np->tx_ring_dma + 1403 (i + 1)*sizeof(struct fealnx_desc); 1404 cur->next_desc_logical = &np->tx_ring[i + 1]; 1405 } 1406 /* for the last tx descriptor */ 1407 np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma; 1408 np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0]; 1409} 1410 1411 1412/* Take lock and stop rx before calling this */ 1413static void reset_rx_descriptors(struct net_device *dev) 1414{ 1415 struct netdev_private *np = netdev_priv(dev); 1416 struct fealnx_desc *cur = np->cur_rx; 1417 int i; 1418 1419 allocate_rx_buffers(dev); 1420 1421 for (i = 0; i < RX_RING_SIZE; i++) { 1422 if (cur->skbuff) 1423 cur->status = RXOWN; 1424 cur = cur->next_desc_logical; 1425 } 1426 1427 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), 1428 np->mem + RXLBA); 1429} 1430 1431 1432/* The interrupt handler does all of the Rx thread work and cleans up 1433 after the Tx thread. */ 1434static irqreturn_t intr_handler(int irq, void *dev_instance) 1435{ 1436 struct net_device *dev = (struct net_device *) dev_instance; 1437 struct netdev_private *np = netdev_priv(dev); 1438 void __iomem *ioaddr = np->mem; 1439 long boguscnt = max_interrupt_work; 1440 unsigned int num_tx = 0; 1441 int handled = 0; 1442 1443 spin_lock(&np->lock); 1444 1445 iowrite32(0, ioaddr + IMR); 1446 1447 do { 1448 u32 intr_status = ioread32(ioaddr + ISR); 1449 1450 /* Acknowledge all of the current interrupt sources ASAP. */ 1451 iowrite32(intr_status, ioaddr + ISR); 1452 1453 if (debug) 1454 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, 1455 intr_status); 1456 1457 if (!(intr_status & np->imrvalue)) 1458 break; 1459 1460 handled = 1; 1461 1462// 90/1/16 delete, 1463// 1464// if (intr_status & FBE) 1465// { /* fatal error */ 1466// stop_nic_tx(ioaddr, 0); 1467// stop_nic_rx(ioaddr, 0); 1468// break; 1469// }; 1470 1471 if (intr_status & TUNF) 1472 iowrite32(0, ioaddr + TXPDR); 1473 1474 if (intr_status & CNTOVF) { 1475 /* missed pkts */ 1476 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1477 1478 /* crc error */ 1479 np->stats.rx_crc_errors += 1480 (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1481 } 1482 1483 if (intr_status & (RI | RBU)) { 1484 if (intr_status & RI) 1485 netdev_rx(dev); 1486 else { 1487 stop_nic_rx(ioaddr, np->crvalue); 1488 reset_rx_descriptors(dev); 1489 iowrite32(np->crvalue, ioaddr + TCRRCR); 1490 } 1491 } 1492 1493 while (np->really_tx_count) { 1494 long tx_status = np->cur_tx->status; 1495 long tx_control = np->cur_tx->control; 1496 1497 if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */ 1498 struct fealnx_desc *next; 1499 1500 next = np->cur_tx->next_desc_logical; 1501 tx_status = next->status; 1502 tx_control = next->control; 1503 } 1504 1505 if (tx_status & TXOWN) 1506 break; 1507 1508 if (!(np->crvalue & CR_W_ENH)) { 1509 if (tx_status & (CSL | LC | EC | UDF | HF)) { 1510 np->stats.tx_errors++; 1511 if (tx_status & EC) 1512 np->stats.tx_aborted_errors++; 1513 if (tx_status & CSL) 1514 np->stats.tx_carrier_errors++; 1515 if (tx_status & LC) 1516 np->stats.tx_window_errors++; 1517 if (tx_status & UDF) 1518 np->stats.tx_fifo_errors++; 1519 if ((tx_status & HF) && np->mii.full_duplex == 0) 1520 np->stats.tx_heartbeat_errors++; 1521 1522 } else { 1523 np->stats.tx_bytes += 1524 ((tx_control & PKTSMask) >> PKTSShift); 1525 1526 np->stats.collisions += 1527 ((tx_status & NCRMask) >> NCRShift); 1528 np->stats.tx_packets++; 1529 } 1530 } else { 1531 np->stats.tx_bytes += 1532 ((tx_control & PKTSMask) >> PKTSShift); 1533 np->stats.tx_packets++; 1534 } 1535 1536 /* Free the original skb. */ 1537 pci_unmap_single(np->pci_dev, np->cur_tx->buffer, 1538 np->cur_tx->skbuff->len, PCI_DMA_TODEVICE); 1539 dev_kfree_skb_irq(np->cur_tx->skbuff); 1540 np->cur_tx->skbuff = NULL; 1541 --np->really_tx_count; 1542 if (np->cur_tx->control & TXLD) { 1543 np->cur_tx = np->cur_tx->next_desc_logical; 1544 ++np->free_tx_count; 1545 } else { 1546 np->cur_tx = np->cur_tx->next_desc_logical; 1547 np->cur_tx = np->cur_tx->next_desc_logical; 1548 np->free_tx_count += 2; 1549 } 1550 num_tx++; 1551 } /* end of for loop */ 1552 1553 if (num_tx && np->free_tx_count >= 2) 1554 netif_wake_queue(dev); 1555 1556 /* read transmit status for enhanced mode only */ 1557 if (np->crvalue & CR_W_ENH) { 1558 long data; 1559 1560 data = ioread32(ioaddr + TSR); 1561 np->stats.tx_errors += (data & 0xff000000) >> 24; 1562 np->stats.tx_aborted_errors += (data & 0xff000000) >> 24; 1563 np->stats.tx_window_errors += (data & 0x00ff0000) >> 16; 1564 np->stats.collisions += (data & 0x0000ffff); 1565 } 1566 1567 if (--boguscnt < 0) { 1568 printk(KERN_WARNING "%s: Too much work at interrupt, " 1569 "status=0x%4.4x.\n", dev->name, intr_status); 1570 if (!np->reset_timer_armed) { 1571 np->reset_timer_armed = 1; 1572 np->reset_timer.expires = RUN_AT(HZ/2); 1573 add_timer(&np->reset_timer); 1574 stop_nic_rxtx(ioaddr, 0); 1575 netif_stop_queue(dev); 1576 /* or netif_tx_disable(dev); ?? */ 1577 /* Prevent other paths from enabling tx,rx,intrs */ 1578 np->crvalue_sv = np->crvalue; 1579 np->imrvalue_sv = np->imrvalue; 1580 np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */ 1581 np->imrvalue = 0; 1582 } 1583 1584 break; 1585 } 1586 } while (1); 1587 1588 /* read the tally counters */ 1589 /* missed pkts */ 1590 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1591 1592 /* crc error */ 1593 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1594 1595 if (debug) 1596 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", 1597 dev->name, ioread32(ioaddr + ISR)); 1598 1599 iowrite32(np->imrvalue, ioaddr + IMR); 1600 1601 spin_unlock(&np->lock); 1602 1603 return IRQ_RETVAL(handled); 1604} 1605 1606 1607/* This routine is logically part of the interrupt handler, but separated 1608 for clarity and better register allocation. */ 1609static int netdev_rx(struct net_device *dev) 1610{ 1611 struct netdev_private *np = netdev_priv(dev); 1612 void __iomem *ioaddr = np->mem; 1613 1614 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1615 while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) { 1616 s32 rx_status = np->cur_rx->status; 1617 1618 if (np->really_rx_count == 0) 1619 break; 1620 1621 if (debug) 1622 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status); 1623 1624 if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) 1625 || (rx_status & ErrorSummary)) { 1626 if (rx_status & ErrorSummary) { /* there was a fatal error */ 1627 if (debug) 1628 printk(KERN_DEBUG 1629 "%s: Receive error, Rx status %8.8x.\n", 1630 dev->name, rx_status); 1631 1632 np->stats.rx_errors++; /* end of a packet. */ 1633 if (rx_status & (LONG | RUNT)) 1634 np->stats.rx_length_errors++; 1635 if (rx_status & RXER) 1636 np->stats.rx_frame_errors++; 1637 if (rx_status & CRC) 1638 np->stats.rx_crc_errors++; 1639 } else { 1640 int need_to_reset = 0; 1641 int desno = 0; 1642 1643 if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */ 1644 struct fealnx_desc *cur; 1645 1646 /* check this packet is received completely? */ 1647 cur = np->cur_rx; 1648 while (desno <= np->really_rx_count) { 1649 ++desno; 1650 if ((!(cur->status & RXOWN)) 1651 && (cur->status & RXLSD)) 1652 break; 1653 /* goto next rx descriptor */ 1654 cur = cur->next_desc_logical; 1655 } 1656 if (desno > np->really_rx_count) 1657 need_to_reset = 1; 1658 } else /* RXLSD did not find, something error */ 1659 need_to_reset = 1; 1660 1661 if (need_to_reset == 0) { 1662 int i; 1663 1664 np->stats.rx_length_errors++; 1665 1666 /* free all rx descriptors related this long pkt */ 1667 for (i = 0; i < desno; ++i) { 1668 if (!np->cur_rx->skbuff) { 1669 printk(KERN_DEBUG 1670 "%s: I'm scared\n", dev->name); 1671 break; 1672 } 1673 np->cur_rx->status = RXOWN; 1674 np->cur_rx = np->cur_rx->next_desc_logical; 1675 } 1676 continue; 1677 } else { /* rx error, need to reset this chip */ 1678 stop_nic_rx(ioaddr, np->crvalue); 1679 reset_rx_descriptors(dev); 1680 iowrite32(np->crvalue, ioaddr + TCRRCR); 1681 } 1682 break; /* exit the while loop */ 1683 } 1684 } else { /* this received pkt is ok */ 1685 1686 struct sk_buff *skb; 1687 /* Omit the four octet CRC from the length. */ 1688 short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4; 1689 1690#ifndef final_version 1691 if (debug) 1692 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" 1693 " status %x.\n", pkt_len, rx_status); 1694#endif 1695 1696 /* Check if the packet is long enough to accept without copying 1697 to a minimally-sized skbuff. */ 1698 if (pkt_len < rx_copybreak && 1699 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1700 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1701 pci_dma_sync_single_for_cpu(np->pci_dev, 1702 np->cur_rx->buffer, 1703 np->rx_buf_sz, 1704 PCI_DMA_FROMDEVICE); 1705 /* Call copy + cksum if available. */ 1706 1707#if ! defined(__alpha__) 1708 skb_copy_to_linear_data(skb, 1709 np->cur_rx->skbuff->data, pkt_len); 1710 skb_put(skb, pkt_len); 1711#else 1712 memcpy(skb_put(skb, pkt_len), 1713 np->cur_rx->skbuff->data, pkt_len); 1714#endif 1715 pci_dma_sync_single_for_device(np->pci_dev, 1716 np->cur_rx->buffer, 1717 np->rx_buf_sz, 1718 PCI_DMA_FROMDEVICE); 1719 } else { 1720 pci_unmap_single(np->pci_dev, 1721 np->cur_rx->buffer, 1722 np->rx_buf_sz, 1723 PCI_DMA_FROMDEVICE); 1724 skb_put(skb = np->cur_rx->skbuff, pkt_len); 1725 np->cur_rx->skbuff = NULL; 1726 --np->really_rx_count; 1727 } 1728 skb->protocol = eth_type_trans(skb, dev); 1729 netif_rx(skb); 1730 dev->last_rx = jiffies; 1731 np->stats.rx_packets++; 1732 np->stats.rx_bytes += pkt_len; 1733 } 1734 1735 np->cur_rx = np->cur_rx->next_desc_logical; 1736 } /* end of while loop */ 1737 1738 /* allocate skb for rx buffers */ 1739 allocate_rx_buffers(dev); 1740 1741 return 0; 1742} 1743 1744 1745static struct net_device_stats *get_stats(struct net_device *dev) 1746{ 1747 struct netdev_private *np = netdev_priv(dev); 1748 void __iomem *ioaddr = np->mem; 1749 1750 /* The chip only need report frame silently dropped. */ 1751 if (netif_running(dev)) { 1752 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1753 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1754 } 1755 1756 return &np->stats; 1757} 1758 1759 1760/* for dev->set_multicast_list */ 1761static void set_rx_mode(struct net_device *dev) 1762{ 1763 spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock; 1764 unsigned long flags; 1765 spin_lock_irqsave(lp, flags); 1766 __set_rx_mode(dev); 1767 spin_unlock_irqrestore(lp, flags); 1768} 1769 1770 1771/* Take lock before calling */ 1772static void __set_rx_mode(struct net_device *dev) 1773{ 1774 struct netdev_private *np = netdev_priv(dev); 1775 void __iomem *ioaddr = np->mem; 1776 u32 mc_filter[2]; /* Multicast hash filter */ 1777 u32 rx_mode; 1778 1779 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1780 memset(mc_filter, 0xff, sizeof(mc_filter)); 1781 rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM; 1782 } else if ((dev->mc_count > multicast_filter_limit) 1783 || (dev->flags & IFF_ALLMULTI)) { 1784 /* Too many to match, or accept all multicasts. */ 1785 memset(mc_filter, 0xff, sizeof(mc_filter)); 1786 rx_mode = CR_W_AB | CR_W_AM; 1787 } else { 1788 struct dev_mc_list *mclist; 1789 int i; 1790 1791 memset(mc_filter, 0, sizeof(mc_filter)); 1792 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1793 i++, mclist = mclist->next) { 1794 unsigned int bit; 1795 bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; 1796 mc_filter[bit >> 5] |= (1 << bit); 1797 } 1798 rx_mode = CR_W_AB | CR_W_AM; 1799 } 1800 1801 stop_nic_rxtx(ioaddr, np->crvalue); 1802 1803 iowrite32(mc_filter[0], ioaddr + MAR0); 1804 iowrite32(mc_filter[1], ioaddr + MAR1); 1805 np->crvalue &= ~CR_W_RXMODEMASK; 1806 np->crvalue |= rx_mode; 1807 iowrite32(np->crvalue, ioaddr + TCRRCR); 1808} 1809 1810static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1811{ 1812 struct netdev_private *np = netdev_priv(dev); 1813 1814 strcpy(info->driver, DRV_NAME); 1815 strcpy(info->version, DRV_VERSION); 1816 strcpy(info->bus_info, pci_name(np->pci_dev)); 1817} 1818 1819static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1820{ 1821 struct netdev_private *np = netdev_priv(dev); 1822 int rc; 1823 1824 spin_lock_irq(&np->lock); 1825 rc = mii_ethtool_gset(&np->mii, cmd); 1826 spin_unlock_irq(&np->lock); 1827 1828 return rc; 1829} 1830 1831static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1832{ 1833 struct netdev_private *np = netdev_priv(dev); 1834 int rc; 1835 1836 spin_lock_irq(&np->lock); 1837 rc = mii_ethtool_sset(&np->mii, cmd); 1838 spin_unlock_irq(&np->lock); 1839 1840 return rc; 1841} 1842 1843static int netdev_nway_reset(struct net_device *dev) 1844{ 1845 struct netdev_private *np = netdev_priv(dev); 1846 return mii_nway_restart(&np->mii); 1847} 1848 1849static u32 netdev_get_link(struct net_device *dev) 1850{ 1851 struct netdev_private *np = netdev_priv(dev); 1852 return mii_link_ok(&np->mii); 1853} 1854 1855static u32 netdev_get_msglevel(struct net_device *dev) 1856{ 1857 return debug; 1858} 1859 1860static void netdev_set_msglevel(struct net_device *dev, u32 value) 1861{ 1862 debug = value; 1863} 1864 1865static const struct ethtool_ops netdev_ethtool_ops = { 1866 .get_drvinfo = netdev_get_drvinfo, 1867 .get_settings = netdev_get_settings, 1868 .set_settings = netdev_set_settings, 1869 .nway_reset = netdev_nway_reset, 1870 .get_link = netdev_get_link, 1871 .get_msglevel = netdev_get_msglevel, 1872 .set_msglevel = netdev_set_msglevel, 1873}; 1874 1875static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1876{ 1877 struct netdev_private *np = netdev_priv(dev); 1878 int rc; 1879 1880 if (!netif_running(dev)) 1881 return -EINVAL; 1882 1883 spin_lock_irq(&np->lock); 1884 rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL); 1885 spin_unlock_irq(&np->lock); 1886 1887 return rc; 1888} 1889 1890 1891static int netdev_close(struct net_device *dev) 1892{ 1893 struct netdev_private *np = netdev_priv(dev); 1894 void __iomem *ioaddr = np->mem; 1895 int i; 1896 1897 netif_stop_queue(dev); 1898 1899 /* Disable interrupts by clearing the interrupt mask. */ 1900 iowrite32(0x0000, ioaddr + IMR); 1901 1902 /* Stop the chip's Tx and Rx processes. */ 1903 stop_nic_rxtx(ioaddr, 0); 1904 1905 del_timer_sync(&np->timer); 1906 del_timer_sync(&np->reset_timer); 1907 1908 free_irq(dev->irq, dev); 1909 1910 /* Free all the skbuffs in the Rx queue. */ 1911 for (i = 0; i < RX_RING_SIZE; i++) { 1912 struct sk_buff *skb = np->rx_ring[i].skbuff; 1913 1914 np->rx_ring[i].status = 0; 1915 if (skb) { 1916 pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer, 1917 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1918 dev_kfree_skb(skb); 1919 np->rx_ring[i].skbuff = NULL; 1920 } 1921 } 1922 1923 for (i = 0; i < TX_RING_SIZE; i++) { 1924 struct sk_buff *skb = np->tx_ring[i].skbuff; 1925 1926 if (skb) { 1927 pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer, 1928 skb->len, PCI_DMA_TODEVICE); 1929 dev_kfree_skb(skb); 1930 np->tx_ring[i].skbuff = NULL; 1931 } 1932 } 1933 1934 return 0; 1935} 1936 1937static struct pci_device_id fealnx_pci_tbl[] = { 1938 {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1939 {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 1940 {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 1941 {} /* terminate list */ 1942}; 1943MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl); 1944 1945 1946static struct pci_driver fealnx_driver = { 1947 .name = "fealnx", 1948 .id_table = fealnx_pci_tbl, 1949 .probe = fealnx_init_one, 1950 .remove = __devexit_p(fealnx_remove_one), 1951}; 1952 1953static int __init fealnx_init(void) 1954{ 1955/* when a module, this is printed whether or not devices are found in probe */ 1956#ifdef MODULE 1957 printk(version); 1958#endif 1959 1960 return pci_register_driver(&fealnx_driver); 1961} 1962 1963static void __exit fealnx_exit(void) 1964{ 1965 pci_unregister_driver(&fealnx_driver); 1966} 1967 1968module_init(fealnx_init); 1969module_exit(fealnx_exit);