Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.30-rc8 1972 lines 55 kB view raw
1/* 2 Written 1998-2000 by Donald Becker. 3 4 This software may be used and distributed according to the terms of 5 the GNU General Public License (GPL), incorporated herein by reference. 6 Drivers based on or derived from this code fall under the GPL and must 7 retain the authorship, copyright and license notice. This file is not 8 a complete program and may only be used when the entire operating 9 system is licensed under the GPL. 10 11 The author may be reached as becker@scyld.com, or C/O 12 Scyld Computing Corporation 13 410 Severn Ave., Suite 210 14 Annapolis MD 21403 15 16 Support information and updates available at 17 http://www.scyld.com/network/pci-skeleton.html 18 19 Linux kernel updates: 20 21 Version 2.51, Nov 17, 2001 (jgarzik): 22 - Add ethtool support 23 - Replace some MII-related magic numbers with constants 24 25*/ 26 27#define DRV_NAME "fealnx" 28#define DRV_VERSION "2.52" 29#define DRV_RELDATE "Sep-11-2006" 30 31static int debug; /* 1-> print debug message */ 32static int max_interrupt_work = 20; 33 34/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */ 35static int multicast_filter_limit = 32; 36 37/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */ 38/* Setting to > 1518 effectively disables this feature. */ 39static int rx_copybreak; 40 41/* Used to pass the media type, etc. */ 42/* Both 'options[]' and 'full_duplex[]' should exist for driver */ 43/* interoperability. */ 44/* The media type is usually passed in 'options[]'. */ 45#define MAX_UNITS 8 /* More are supported, limit only on options */ 46static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; 47static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; 48 49/* Operational parameters that are set at compile time. */ 50/* Keep the ring sizes a power of two for compile efficiency. */ 51/* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */ 52/* Making the Tx ring too large decreases the effectiveness of channel */ 53/* bonding and packet priority. */ 54/* There are no ill effects from too-large receive rings. */ 55// 88-12-9 modify, 56// #define TX_RING_SIZE 16 57// #define RX_RING_SIZE 32 58#define TX_RING_SIZE 6 59#define RX_RING_SIZE 12 60#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc) 61#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc) 62 63/* Operational parameters that usually are not changed. */ 64/* Time in jiffies before concluding the transmitter is hung. */ 65#define TX_TIMEOUT (2*HZ) 66 67#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ 68 69 70/* Include files, designed to support most kernel versions 2.0.0 and later. */ 71#include <linux/module.h> 72#include <linux/kernel.h> 73#include <linux/string.h> 74#include <linux/timer.h> 75#include <linux/errno.h> 76#include <linux/ioport.h> 77#include <linux/slab.h> 78#include <linux/interrupt.h> 79#include <linux/pci.h> 80#include <linux/netdevice.h> 81#include <linux/etherdevice.h> 82#include <linux/skbuff.h> 83#include <linux/init.h> 84#include <linux/mii.h> 85#include <linux/ethtool.h> 86#include <linux/crc32.h> 87#include <linux/delay.h> 88#include <linux/bitops.h> 89 90#include <asm/processor.h> /* Processor type for cache alignment. */ 91#include <asm/io.h> 92#include <asm/uaccess.h> 93#include <asm/byteorder.h> 94 95/* These identify the driver base version and may not be removed. */ 96static const char version[] __devinitconst = 97 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; 98 99 100/* This driver was written to use PCI memory space, however some x86 systems 101 work only with I/O space accesses. */ 102#ifndef __alpha__ 103#define USE_IO_OPS 104#endif 105 106/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */ 107/* This is only in the support-all-kernels source code. */ 108 109#define RUN_AT(x) (jiffies + (x)) 110 111MODULE_AUTHOR("Myson or whoever"); 112MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver"); 113MODULE_LICENSE("GPL"); 114module_param(max_interrupt_work, int, 0); 115module_param(debug, int, 0); 116module_param(rx_copybreak, int, 0); 117module_param(multicast_filter_limit, int, 0); 118module_param_array(options, int, NULL, 0); 119module_param_array(full_duplex, int, NULL, 0); 120MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt"); 121MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)"); 122MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames"); 123MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses"); 124MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex"); 125MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)"); 126 127enum { 128 MIN_REGION_SIZE = 136, 129}; 130 131/* A chip capabilities table, matching the entries in pci_tbl[] above. */ 132enum chip_capability_flags { 133 HAS_MII_XCVR, 134 HAS_CHIP_XCVR, 135}; 136 137/* 89/6/13 add, */ 138/* for different PHY */ 139enum phy_type_flags { 140 MysonPHY = 1, 141 AhdocPHY = 2, 142 SeeqPHY = 3, 143 MarvellPHY = 4, 144 Myson981 = 5, 145 LevelOnePHY = 6, 146 OtherPHY = 10, 147}; 148 149struct chip_info { 150 char *chip_name; 151 int flags; 152}; 153 154static const struct chip_info skel_netdrv_tbl[] __devinitdata = { 155 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 156 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR }, 157 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 158}; 159 160/* Offsets to the Command and Status Registers. */ 161enum fealnx_offsets { 162 PAR0 = 0x0, /* physical address 0-3 */ 163 PAR1 = 0x04, /* physical address 4-5 */ 164 MAR0 = 0x08, /* multicast address 0-3 */ 165 MAR1 = 0x0C, /* multicast address 4-7 */ 166 FAR0 = 0x10, /* flow-control address 0-3 */ 167 FAR1 = 0x14, /* flow-control address 4-5 */ 168 TCRRCR = 0x18, /* receive & transmit configuration */ 169 BCR = 0x1C, /* bus command */ 170 TXPDR = 0x20, /* transmit polling demand */ 171 RXPDR = 0x24, /* receive polling demand */ 172 RXCWP = 0x28, /* receive current word pointer */ 173 TXLBA = 0x2C, /* transmit list base address */ 174 RXLBA = 0x30, /* receive list base address */ 175 ISR = 0x34, /* interrupt status */ 176 IMR = 0x38, /* interrupt mask */ 177 FTH = 0x3C, /* flow control high/low threshold */ 178 MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */ 179 TALLY = 0x44, /* tally counters for crc and mpa */ 180 TSR = 0x48, /* tally counter for transmit status */ 181 BMCRSR = 0x4c, /* basic mode control and status */ 182 PHYIDENTIFIER = 0x50, /* phy identifier */ 183 ANARANLPAR = 0x54, /* auto-negotiation advertisement and link 184 partner ability */ 185 ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */ 186 BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */ 187}; 188 189/* Bits in the interrupt status/enable registers. */ 190/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */ 191enum intr_status_bits { 192 RFCON = 0x00020000, /* receive flow control xon packet */ 193 RFCOFF = 0x00010000, /* receive flow control xoff packet */ 194 LSCStatus = 0x00008000, /* link status change */ 195 ANCStatus = 0x00004000, /* autonegotiation completed */ 196 FBE = 0x00002000, /* fatal bus error */ 197 FBEMask = 0x00001800, /* mask bit12-11 */ 198 ParityErr = 0x00000000, /* parity error */ 199 TargetErr = 0x00001000, /* target abort */ 200 MasterErr = 0x00000800, /* master error */ 201 TUNF = 0x00000400, /* transmit underflow */ 202 ROVF = 0x00000200, /* receive overflow */ 203 ETI = 0x00000100, /* transmit early int */ 204 ERI = 0x00000080, /* receive early int */ 205 CNTOVF = 0x00000040, /* counter overflow */ 206 RBU = 0x00000020, /* receive buffer unavailable */ 207 TBU = 0x00000010, /* transmit buffer unavilable */ 208 TI = 0x00000008, /* transmit interrupt */ 209 RI = 0x00000004, /* receive interrupt */ 210 RxErr = 0x00000002, /* receive error */ 211}; 212 213/* Bits in the NetworkConfig register, W for writing, R for reading */ 214/* FIXME: some names are invented by me. Marked with (name?) */ 215/* If you have docs and know bit names, please fix 'em */ 216enum rx_mode_bits { 217 CR_W_ENH = 0x02000000, /* enhanced mode (name?) */ 218 CR_W_FD = 0x00100000, /* full duplex */ 219 CR_W_PS10 = 0x00080000, /* 10 mbit */ 220 CR_W_TXEN = 0x00040000, /* tx enable (name?) */ 221 CR_W_PS1000 = 0x00010000, /* 1000 mbit */ 222 /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */ 223 CR_W_RXMODEMASK = 0x000000e0, 224 CR_W_PROM = 0x00000080, /* promiscuous mode */ 225 CR_W_AB = 0x00000040, /* accept broadcast */ 226 CR_W_AM = 0x00000020, /* accept mutlicast */ 227 CR_W_ARP = 0x00000008, /* receive runt pkt */ 228 CR_W_ALP = 0x00000004, /* receive long pkt */ 229 CR_W_SEP = 0x00000002, /* receive error pkt */ 230 CR_W_RXEN = 0x00000001, /* rx enable (unicast?) (name?) */ 231 232 CR_R_TXSTOP = 0x04000000, /* tx stopped (name?) */ 233 CR_R_FD = 0x00100000, /* full duplex detected */ 234 CR_R_PS10 = 0x00080000, /* 10 mbit detected */ 235 CR_R_RXSTOP = 0x00008000, /* rx stopped (name?) */ 236}; 237 238/* The Tulip Rx and Tx buffer descriptors. */ 239struct fealnx_desc { 240 s32 status; 241 s32 control; 242 u32 buffer; 243 u32 next_desc; 244 struct fealnx_desc *next_desc_logical; 245 struct sk_buff *skbuff; 246 u32 reserved1; 247 u32 reserved2; 248}; 249 250/* Bits in network_desc.status */ 251enum rx_desc_status_bits { 252 RXOWN = 0x80000000, /* own bit */ 253 FLNGMASK = 0x0fff0000, /* frame length */ 254 FLNGShift = 16, 255 MARSTATUS = 0x00004000, /* multicast address received */ 256 BARSTATUS = 0x00002000, /* broadcast address received */ 257 PHYSTATUS = 0x00001000, /* physical address received */ 258 RXFSD = 0x00000800, /* first descriptor */ 259 RXLSD = 0x00000400, /* last descriptor */ 260 ErrorSummary = 0x80, /* error summary */ 261 RUNT = 0x40, /* runt packet received */ 262 LONG = 0x20, /* long packet received */ 263 FAE = 0x10, /* frame align error */ 264 CRC = 0x08, /* crc error */ 265 RXER = 0x04, /* receive error */ 266}; 267 268enum rx_desc_control_bits { 269 RXIC = 0x00800000, /* interrupt control */ 270 RBSShift = 0, 271}; 272 273enum tx_desc_status_bits { 274 TXOWN = 0x80000000, /* own bit */ 275 JABTO = 0x00004000, /* jabber timeout */ 276 CSL = 0x00002000, /* carrier sense lost */ 277 LC = 0x00001000, /* late collision */ 278 EC = 0x00000800, /* excessive collision */ 279 UDF = 0x00000400, /* fifo underflow */ 280 DFR = 0x00000200, /* deferred */ 281 HF = 0x00000100, /* heartbeat fail */ 282 NCRMask = 0x000000ff, /* collision retry count */ 283 NCRShift = 0, 284}; 285 286enum tx_desc_control_bits { 287 TXIC = 0x80000000, /* interrupt control */ 288 ETIControl = 0x40000000, /* early transmit interrupt */ 289 TXLD = 0x20000000, /* last descriptor */ 290 TXFD = 0x10000000, /* first descriptor */ 291 CRCEnable = 0x08000000, /* crc control */ 292 PADEnable = 0x04000000, /* padding control */ 293 RetryTxLC = 0x02000000, /* retry late collision */ 294 PKTSMask = 0x3ff800, /* packet size bit21-11 */ 295 PKTSShift = 11, 296 TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */ 297 TBSShift = 0, 298}; 299 300/* BootROM/EEPROM/MII Management Register */ 301#define MASK_MIIR_MII_READ 0x00000000 302#define MASK_MIIR_MII_WRITE 0x00000008 303#define MASK_MIIR_MII_MDO 0x00000004 304#define MASK_MIIR_MII_MDI 0x00000002 305#define MASK_MIIR_MII_MDC 0x00000001 306 307/* ST+OP+PHYAD+REGAD+TA */ 308#define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */ 309#define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */ 310 311/* ------------------------------------------------------------------------- */ 312/* Constants for Myson PHY */ 313/* ------------------------------------------------------------------------- */ 314#define MysonPHYID 0xd0000302 315/* 89-7-27 add, (begin) */ 316#define MysonPHYID0 0x0302 317#define StatusRegister 18 318#define SPEED100 0x0400 // bit10 319#define FULLMODE 0x0800 // bit11 320/* 89-7-27 add, (end) */ 321 322/* ------------------------------------------------------------------------- */ 323/* Constants for Seeq 80225 PHY */ 324/* ------------------------------------------------------------------------- */ 325#define SeeqPHYID0 0x0016 326 327#define MIIRegister18 18 328#define SPD_DET_100 0x80 329#define DPLX_DET_FULL 0x40 330 331/* ------------------------------------------------------------------------- */ 332/* Constants for Ahdoc 101 PHY */ 333/* ------------------------------------------------------------------------- */ 334#define AhdocPHYID0 0x0022 335 336#define DiagnosticReg 18 337#define DPLX_FULL 0x0800 338#define Speed_100 0x0400 339 340/* 89/6/13 add, */ 341/* -------------------------------------------------------------------------- */ 342/* Constants */ 343/* -------------------------------------------------------------------------- */ 344#define MarvellPHYID0 0x0141 345#define LevelOnePHYID0 0x0013 346 347#define MII1000BaseTControlReg 9 348#define MII1000BaseTStatusReg 10 349#define SpecificReg 17 350 351/* for 1000BaseT Control Register */ 352#define PHYAbletoPerform1000FullDuplex 0x0200 353#define PHYAbletoPerform1000HalfDuplex 0x0100 354#define PHY1000AbilityMask 0x300 355 356// for phy specific status register, marvell phy. 357#define SpeedMask 0x0c000 358#define Speed_1000M 0x08000 359#define Speed_100M 0x4000 360#define Speed_10M 0 361#define Full_Duplex 0x2000 362 363// 89/12/29 add, for phy specific status register, levelone phy, (begin) 364#define LXT1000_100M 0x08000 365#define LXT1000_1000M 0x0c000 366#define LXT1000_Full 0x200 367// 89/12/29 add, for phy specific status register, levelone phy, (end) 368 369/* for 3-in-1 case, BMCRSR register */ 370#define LinkIsUp2 0x00040000 371 372/* for PHY */ 373#define LinkIsUp 0x0004 374 375 376struct netdev_private { 377 /* Descriptor rings first for alignment. */ 378 struct fealnx_desc *rx_ring; 379 struct fealnx_desc *tx_ring; 380 381 dma_addr_t rx_ring_dma; 382 dma_addr_t tx_ring_dma; 383 384 spinlock_t lock; 385 386 struct net_device_stats stats; 387 388 /* Media monitoring timer. */ 389 struct timer_list timer; 390 391 /* Reset timer */ 392 struct timer_list reset_timer; 393 int reset_timer_armed; 394 unsigned long crvalue_sv; 395 unsigned long imrvalue_sv; 396 397 /* Frequently used values: keep some adjacent for cache effect. */ 398 int flags; 399 struct pci_dev *pci_dev; 400 unsigned long crvalue; 401 unsigned long bcrvalue; 402 unsigned long imrvalue; 403 struct fealnx_desc *cur_rx; 404 struct fealnx_desc *lack_rxbuf; 405 int really_rx_count; 406 struct fealnx_desc *cur_tx; 407 struct fealnx_desc *cur_tx_copy; 408 int really_tx_count; 409 int free_tx_count; 410 unsigned int rx_buf_sz; /* Based on MTU+slack. */ 411 412 /* These values are keep track of the transceiver/media in use. */ 413 unsigned int linkok; 414 unsigned int line_speed; 415 unsigned int duplexmode; 416 unsigned int default_port:4; /* Last dev->if_port value. */ 417 unsigned int PHYType; 418 419 /* MII transceiver section. */ 420 int mii_cnt; /* MII device addresses. */ 421 unsigned char phys[2]; /* MII device addresses. */ 422 struct mii_if_info mii; 423 void __iomem *mem; 424}; 425 426 427static int mdio_read(struct net_device *dev, int phy_id, int location); 428static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 429static int netdev_open(struct net_device *dev); 430static void getlinktype(struct net_device *dev); 431static void getlinkstatus(struct net_device *dev); 432static void netdev_timer(unsigned long data); 433static void reset_timer(unsigned long data); 434static void fealnx_tx_timeout(struct net_device *dev); 435static void init_ring(struct net_device *dev); 436static int start_tx(struct sk_buff *skb, struct net_device *dev); 437static irqreturn_t intr_handler(int irq, void *dev_instance); 438static int netdev_rx(struct net_device *dev); 439static void set_rx_mode(struct net_device *dev); 440static void __set_rx_mode(struct net_device *dev); 441static struct net_device_stats *get_stats(struct net_device *dev); 442static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 443static const struct ethtool_ops netdev_ethtool_ops; 444static int netdev_close(struct net_device *dev); 445static void reset_rx_descriptors(struct net_device *dev); 446static void reset_tx_descriptors(struct net_device *dev); 447 448static void stop_nic_rx(void __iomem *ioaddr, long crvalue) 449{ 450 int delay = 0x1000; 451 iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR); 452 while (--delay) { 453 if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP) 454 break; 455 } 456} 457 458 459static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue) 460{ 461 int delay = 0x1000; 462 iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR); 463 while (--delay) { 464 if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP)) 465 == (CR_R_RXSTOP+CR_R_TXSTOP) ) 466 break; 467 } 468} 469 470static const struct net_device_ops netdev_ops = { 471 .ndo_open = netdev_open, 472 .ndo_stop = netdev_close, 473 .ndo_start_xmit = start_tx, 474 .ndo_get_stats = get_stats, 475 .ndo_set_multicast_list = set_rx_mode, 476 .ndo_do_ioctl = mii_ioctl, 477 .ndo_tx_timeout = fealnx_tx_timeout, 478 .ndo_change_mtu = eth_change_mtu, 479 .ndo_set_mac_address = eth_mac_addr, 480 .ndo_validate_addr = eth_validate_addr, 481}; 482 483static int __devinit fealnx_init_one(struct pci_dev *pdev, 484 const struct pci_device_id *ent) 485{ 486 struct netdev_private *np; 487 int i, option, err, irq; 488 static int card_idx = -1; 489 char boardname[12]; 490 void __iomem *ioaddr; 491 unsigned long len; 492 unsigned int chip_id = ent->driver_data; 493 struct net_device *dev; 494 void *ring_space; 495 dma_addr_t ring_dma; 496#ifdef USE_IO_OPS 497 int bar = 0; 498#else 499 int bar = 1; 500#endif 501 502/* when built into the kernel, we only print version if device is found */ 503#ifndef MODULE 504 static int printed_version; 505 if (!printed_version++) 506 printk(version); 507#endif 508 509 card_idx++; 510 sprintf(boardname, "fealnx%d", card_idx); 511 512 option = card_idx < MAX_UNITS ? options[card_idx] : 0; 513 514 i = pci_enable_device(pdev); 515 if (i) return i; 516 pci_set_master(pdev); 517 518 len = pci_resource_len(pdev, bar); 519 if (len < MIN_REGION_SIZE) { 520 dev_err(&pdev->dev, 521 "region size %ld too small, aborting\n", len); 522 return -ENODEV; 523 } 524 525 i = pci_request_regions(pdev, boardname); 526 if (i) 527 return i; 528 529 irq = pdev->irq; 530 531 ioaddr = pci_iomap(pdev, bar, len); 532 if (!ioaddr) { 533 err = -ENOMEM; 534 goto err_out_res; 535 } 536 537 dev = alloc_etherdev(sizeof(struct netdev_private)); 538 if (!dev) { 539 err = -ENOMEM; 540 goto err_out_unmap; 541 } 542 SET_NETDEV_DEV(dev, &pdev->dev); 543 544 /* read ethernet id */ 545 for (i = 0; i < 6; ++i) 546 dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i); 547 548 /* Reset the chip to erase previous misconfiguration. */ 549 iowrite32(0x00000001, ioaddr + BCR); 550 551 dev->base_addr = (unsigned long)ioaddr; 552 dev->irq = irq; 553 554 /* Make certain the descriptor lists are aligned. */ 555 np = netdev_priv(dev); 556 np->mem = ioaddr; 557 spin_lock_init(&np->lock); 558 np->pci_dev = pdev; 559 np->flags = skel_netdrv_tbl[chip_id].flags; 560 pci_set_drvdata(pdev, dev); 561 np->mii.dev = dev; 562 np->mii.mdio_read = mdio_read; 563 np->mii.mdio_write = mdio_write; 564 np->mii.phy_id_mask = 0x1f; 565 np->mii.reg_num_mask = 0x1f; 566 567 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); 568 if (!ring_space) { 569 err = -ENOMEM; 570 goto err_out_free_dev; 571 } 572 np->rx_ring = (struct fealnx_desc *)ring_space; 573 np->rx_ring_dma = ring_dma; 574 575 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); 576 if (!ring_space) { 577 err = -ENOMEM; 578 goto err_out_free_rx; 579 } 580 np->tx_ring = (struct fealnx_desc *)ring_space; 581 np->tx_ring_dma = ring_dma; 582 583 /* find the connected MII xcvrs */ 584 if (np->flags == HAS_MII_XCVR) { 585 int phy, phy_idx = 0; 586 587 for (phy = 1; phy < 32 && phy_idx < 4; phy++) { 588 int mii_status = mdio_read(dev, phy, 1); 589 590 if (mii_status != 0xffff && mii_status != 0x0000) { 591 np->phys[phy_idx++] = phy; 592 dev_info(&pdev->dev, 593 "MII PHY found at address %d, status " 594 "0x%4.4x.\n", phy, mii_status); 595 /* get phy type */ 596 { 597 unsigned int data; 598 599 data = mdio_read(dev, np->phys[0], 2); 600 if (data == SeeqPHYID0) 601 np->PHYType = SeeqPHY; 602 else if (data == AhdocPHYID0) 603 np->PHYType = AhdocPHY; 604 else if (data == MarvellPHYID0) 605 np->PHYType = MarvellPHY; 606 else if (data == MysonPHYID0) 607 np->PHYType = Myson981; 608 else if (data == LevelOnePHYID0) 609 np->PHYType = LevelOnePHY; 610 else 611 np->PHYType = OtherPHY; 612 } 613 } 614 } 615 616 np->mii_cnt = phy_idx; 617 if (phy_idx == 0) 618 dev_warn(&pdev->dev, 619 "MII PHY not found -- this device may " 620 "not operate correctly.\n"); 621 } else { 622 np->phys[0] = 32; 623/* 89/6/23 add, (begin) */ 624 /* get phy type */ 625 if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID) 626 np->PHYType = MysonPHY; 627 else 628 np->PHYType = OtherPHY; 629 } 630 np->mii.phy_id = np->phys[0]; 631 632 if (dev->mem_start) 633 option = dev->mem_start; 634 635 /* The lower four bits are the media type. */ 636 if (option > 0) { 637 if (option & 0x200) 638 np->mii.full_duplex = 1; 639 np->default_port = option & 15; 640 } 641 642 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0) 643 np->mii.full_duplex = full_duplex[card_idx]; 644 645 if (np->mii.full_duplex) { 646 dev_info(&pdev->dev, "Media type forced to Full Duplex.\n"); 647/* 89/6/13 add, (begin) */ 648// if (np->PHYType==MarvellPHY) 649 if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) { 650 unsigned int data; 651 652 data = mdio_read(dev, np->phys[0], 9); 653 data = (data & 0xfcff) | 0x0200; 654 mdio_write(dev, np->phys[0], 9, data); 655 } 656/* 89/6/13 add, (end) */ 657 if (np->flags == HAS_MII_XCVR) 658 mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL); 659 else 660 iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR); 661 np->mii.force_media = 1; 662 } 663 664 dev->netdev_ops = &netdev_ops; 665 dev->ethtool_ops = &netdev_ethtool_ops; 666 dev->watchdog_timeo = TX_TIMEOUT; 667 668 err = register_netdev(dev); 669 if (err) 670 goto err_out_free_tx; 671 672 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n", 673 dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr, 674 dev->dev_addr, irq); 675 676 return 0; 677 678err_out_free_tx: 679 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 680err_out_free_rx: 681 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 682err_out_free_dev: 683 free_netdev(dev); 684err_out_unmap: 685 pci_iounmap(pdev, ioaddr); 686err_out_res: 687 pci_release_regions(pdev); 688 return err; 689} 690 691 692static void __devexit fealnx_remove_one(struct pci_dev *pdev) 693{ 694 struct net_device *dev = pci_get_drvdata(pdev); 695 696 if (dev) { 697 struct netdev_private *np = netdev_priv(dev); 698 699 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, 700 np->tx_ring_dma); 701 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, 702 np->rx_ring_dma); 703 unregister_netdev(dev); 704 pci_iounmap(pdev, np->mem); 705 free_netdev(dev); 706 pci_release_regions(pdev); 707 pci_set_drvdata(pdev, NULL); 708 } else 709 printk(KERN_ERR "fealnx: remove for unknown device\n"); 710} 711 712 713static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad) 714{ 715 ulong miir; 716 int i; 717 unsigned int mask, data; 718 719 /* enable MII output */ 720 miir = (ulong) ioread32(miiport); 721 miir &= 0xfffffff0; 722 723 miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO; 724 725 /* send 32 1's preamble */ 726 for (i = 0; i < 32; i++) { 727 /* low MDC; MDO is already high (miir) */ 728 miir &= ~MASK_MIIR_MII_MDC; 729 iowrite32(miir, miiport); 730 731 /* high MDC */ 732 miir |= MASK_MIIR_MII_MDC; 733 iowrite32(miir, miiport); 734 } 735 736 /* calculate ST+OP+PHYAD+REGAD+TA */ 737 data = opcode | (phyad << 7) | (regad << 2); 738 739 /* sent out */ 740 mask = 0x8000; 741 while (mask) { 742 /* low MDC, prepare MDO */ 743 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO); 744 if (mask & data) 745 miir |= MASK_MIIR_MII_MDO; 746 747 iowrite32(miir, miiport); 748 /* high MDC */ 749 miir |= MASK_MIIR_MII_MDC; 750 iowrite32(miir, miiport); 751 udelay(30); 752 753 /* next */ 754 mask >>= 1; 755 if (mask == 0x2 && opcode == OP_READ) 756 miir &= ~MASK_MIIR_MII_WRITE; 757 } 758 return miir; 759} 760 761 762static int mdio_read(struct net_device *dev, int phyad, int regad) 763{ 764 struct netdev_private *np = netdev_priv(dev); 765 void __iomem *miiport = np->mem + MANAGEMENT; 766 ulong miir; 767 unsigned int mask, data; 768 769 miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad); 770 771 /* read data */ 772 mask = 0x8000; 773 data = 0; 774 while (mask) { 775 /* low MDC */ 776 miir &= ~MASK_MIIR_MII_MDC; 777 iowrite32(miir, miiport); 778 779 /* read MDI */ 780 miir = ioread32(miiport); 781 if (miir & MASK_MIIR_MII_MDI) 782 data |= mask; 783 784 /* high MDC, and wait */ 785 miir |= MASK_MIIR_MII_MDC; 786 iowrite32(miir, miiport); 787 udelay(30); 788 789 /* next */ 790 mask >>= 1; 791 } 792 793 /* low MDC */ 794 miir &= ~MASK_MIIR_MII_MDC; 795 iowrite32(miir, miiport); 796 797 return data & 0xffff; 798} 799 800 801static void mdio_write(struct net_device *dev, int phyad, int regad, int data) 802{ 803 struct netdev_private *np = netdev_priv(dev); 804 void __iomem *miiport = np->mem + MANAGEMENT; 805 ulong miir; 806 unsigned int mask; 807 808 miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad); 809 810 /* write data */ 811 mask = 0x8000; 812 while (mask) { 813 /* low MDC, prepare MDO */ 814 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO); 815 if (mask & data) 816 miir |= MASK_MIIR_MII_MDO; 817 iowrite32(miir, miiport); 818 819 /* high MDC */ 820 miir |= MASK_MIIR_MII_MDC; 821 iowrite32(miir, miiport); 822 823 /* next */ 824 mask >>= 1; 825 } 826 827 /* low MDC */ 828 miir &= ~MASK_MIIR_MII_MDC; 829 iowrite32(miir, miiport); 830} 831 832 833static int netdev_open(struct net_device *dev) 834{ 835 struct netdev_private *np = netdev_priv(dev); 836 void __iomem *ioaddr = np->mem; 837 int i; 838 839 iowrite32(0x00000001, ioaddr + BCR); /* Reset */ 840 841 if (request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev)) 842 return -EAGAIN; 843 844 for (i = 0; i < 3; i++) 845 iowrite16(((unsigned short*)dev->dev_addr)[i], 846 ioaddr + PAR0 + i*2); 847 848 init_ring(dev); 849 850 iowrite32(np->rx_ring_dma, ioaddr + RXLBA); 851 iowrite32(np->tx_ring_dma, ioaddr + TXLBA); 852 853 /* Initialize other registers. */ 854 /* Configure the PCI bus bursts and FIFO thresholds. 855 486: Set 8 longword burst. 856 586: no burst limit. 857 Burst length 5:3 858 0 0 0 1 859 0 0 1 4 860 0 1 0 8 861 0 1 1 16 862 1 0 0 32 863 1 0 1 64 864 1 1 0 128 865 1 1 1 256 866 Wait the specified 50 PCI cycles after a reset by initializing 867 Tx and Rx queues and the address filter list. 868 FIXME (Ueimor): optimistic for alpha + posted writes ? */ 869 870 np->bcrvalue = 0x10; /* little-endian, 8 burst length */ 871#ifdef __BIG_ENDIAN 872 np->bcrvalue |= 0x04; /* big-endian */ 873#endif 874 875#if defined(__i386__) && !defined(MODULE) 876 if (boot_cpu_data.x86 <= 4) 877 np->crvalue = 0xa00; 878 else 879#endif 880 np->crvalue = 0xe00; /* rx 128 burst length */ 881 882 883// 89/12/29 add, 884// 90/1/16 modify, 885// np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI; 886 np->imrvalue = TUNF | CNTOVF | RBU | TI | RI; 887 if (np->pci_dev->device == 0x891) { 888 np->bcrvalue |= 0x200; /* set PROG bit */ 889 np->crvalue |= CR_W_ENH; /* set enhanced bit */ 890 np->imrvalue |= ETI; 891 } 892 iowrite32(np->bcrvalue, ioaddr + BCR); 893 894 if (dev->if_port == 0) 895 dev->if_port = np->default_port; 896 897 iowrite32(0, ioaddr + RXPDR); 898// 89/9/1 modify, 899// np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */ 900 np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */ 901 np->mii.full_duplex = np->mii.force_media; 902 getlinkstatus(dev); 903 if (np->linkok) 904 getlinktype(dev); 905 __set_rx_mode(dev); 906 907 netif_start_queue(dev); 908 909 /* Clear and Enable interrupts by setting the interrupt mask. */ 910 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); 911 iowrite32(np->imrvalue, ioaddr + IMR); 912 913 if (debug) 914 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); 915 916 /* Set the timer to check for link beat. */ 917 init_timer(&np->timer); 918 np->timer.expires = RUN_AT(3 * HZ); 919 np->timer.data = (unsigned long) dev; 920 np->timer.function = &netdev_timer; 921 922 /* timer handler */ 923 add_timer(&np->timer); 924 925 init_timer(&np->reset_timer); 926 np->reset_timer.data = (unsigned long) dev; 927 np->reset_timer.function = &reset_timer; 928 np->reset_timer_armed = 0; 929 930 return 0; 931} 932 933 934static void getlinkstatus(struct net_device *dev) 935/* function: Routine will read MII Status Register to get link status. */ 936/* input : dev... pointer to the adapter block. */ 937/* output : none. */ 938{ 939 struct netdev_private *np = netdev_priv(dev); 940 unsigned int i, DelayTime = 0x1000; 941 942 np->linkok = 0; 943 944 if (np->PHYType == MysonPHY) { 945 for (i = 0; i < DelayTime; ++i) { 946 if (ioread32(np->mem + BMCRSR) & LinkIsUp2) { 947 np->linkok = 1; 948 return; 949 } 950 udelay(100); 951 } 952 } else { 953 for (i = 0; i < DelayTime; ++i) { 954 if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) { 955 np->linkok = 1; 956 return; 957 } 958 udelay(100); 959 } 960 } 961} 962 963 964static void getlinktype(struct net_device *dev) 965{ 966 struct netdev_private *np = netdev_priv(dev); 967 968 if (np->PHYType == MysonPHY) { /* 3-in-1 case */ 969 if (ioread32(np->mem + TCRRCR) & CR_R_FD) 970 np->duplexmode = 2; /* full duplex */ 971 else 972 np->duplexmode = 1; /* half duplex */ 973 if (ioread32(np->mem + TCRRCR) & CR_R_PS10) 974 np->line_speed = 1; /* 10M */ 975 else 976 np->line_speed = 2; /* 100M */ 977 } else { 978 if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */ 979 unsigned int data; 980 981 data = mdio_read(dev, np->phys[0], MIIRegister18); 982 if (data & SPD_DET_100) 983 np->line_speed = 2; /* 100M */ 984 else 985 np->line_speed = 1; /* 10M */ 986 if (data & DPLX_DET_FULL) 987 np->duplexmode = 2; /* full duplex mode */ 988 else 989 np->duplexmode = 1; /* half duplex mode */ 990 } else if (np->PHYType == AhdocPHY) { 991 unsigned int data; 992 993 data = mdio_read(dev, np->phys[0], DiagnosticReg); 994 if (data & Speed_100) 995 np->line_speed = 2; /* 100M */ 996 else 997 np->line_speed = 1; /* 10M */ 998 if (data & DPLX_FULL) 999 np->duplexmode = 2; /* full duplex mode */ 1000 else 1001 np->duplexmode = 1; /* half duplex mode */ 1002 } 1003/* 89/6/13 add, (begin) */ 1004 else if (np->PHYType == MarvellPHY) { 1005 unsigned int data; 1006 1007 data = mdio_read(dev, np->phys[0], SpecificReg); 1008 if (data & Full_Duplex) 1009 np->duplexmode = 2; /* full duplex mode */ 1010 else 1011 np->duplexmode = 1; /* half duplex mode */ 1012 data &= SpeedMask; 1013 if (data == Speed_1000M) 1014 np->line_speed = 3; /* 1000M */ 1015 else if (data == Speed_100M) 1016 np->line_speed = 2; /* 100M */ 1017 else 1018 np->line_speed = 1; /* 10M */ 1019 } 1020/* 89/6/13 add, (end) */ 1021/* 89/7/27 add, (begin) */ 1022 else if (np->PHYType == Myson981) { 1023 unsigned int data; 1024 1025 data = mdio_read(dev, np->phys[0], StatusRegister); 1026 1027 if (data & SPEED100) 1028 np->line_speed = 2; 1029 else 1030 np->line_speed = 1; 1031 1032 if (data & FULLMODE) 1033 np->duplexmode = 2; 1034 else 1035 np->duplexmode = 1; 1036 } 1037/* 89/7/27 add, (end) */ 1038/* 89/12/29 add */ 1039 else if (np->PHYType == LevelOnePHY) { 1040 unsigned int data; 1041 1042 data = mdio_read(dev, np->phys[0], SpecificReg); 1043 if (data & LXT1000_Full) 1044 np->duplexmode = 2; /* full duplex mode */ 1045 else 1046 np->duplexmode = 1; /* half duplex mode */ 1047 data &= SpeedMask; 1048 if (data == LXT1000_1000M) 1049 np->line_speed = 3; /* 1000M */ 1050 else if (data == LXT1000_100M) 1051 np->line_speed = 2; /* 100M */ 1052 else 1053 np->line_speed = 1; /* 10M */ 1054 } 1055 np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000); 1056 if (np->line_speed == 1) 1057 np->crvalue |= CR_W_PS10; 1058 else if (np->line_speed == 3) 1059 np->crvalue |= CR_W_PS1000; 1060 if (np->duplexmode == 2) 1061 np->crvalue |= CR_W_FD; 1062 } 1063} 1064 1065 1066/* Take lock before calling this */ 1067static void allocate_rx_buffers(struct net_device *dev) 1068{ 1069 struct netdev_private *np = netdev_priv(dev); 1070 1071 /* allocate skb for rx buffers */ 1072 while (np->really_rx_count != RX_RING_SIZE) { 1073 struct sk_buff *skb; 1074 1075 skb = dev_alloc_skb(np->rx_buf_sz); 1076 if (skb == NULL) 1077 break; /* Better luck next round. */ 1078 1079 while (np->lack_rxbuf->skbuff) 1080 np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; 1081 1082 skb->dev = dev; /* Mark as being used by this device. */ 1083 np->lack_rxbuf->skbuff = skb; 1084 np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data, 1085 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1086 np->lack_rxbuf->status = RXOWN; 1087 ++np->really_rx_count; 1088 } 1089} 1090 1091 1092static void netdev_timer(unsigned long data) 1093{ 1094 struct net_device *dev = (struct net_device *) data; 1095 struct netdev_private *np = netdev_priv(dev); 1096 void __iomem *ioaddr = np->mem; 1097 int old_crvalue = np->crvalue; 1098 unsigned int old_linkok = np->linkok; 1099 unsigned long flags; 1100 1101 if (debug) 1102 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x " 1103 "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR), 1104 ioread32(ioaddr + TCRRCR)); 1105 1106 spin_lock_irqsave(&np->lock, flags); 1107 1108 if (np->flags == HAS_MII_XCVR) { 1109 getlinkstatus(dev); 1110 if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */ 1111 getlinktype(dev); 1112 if (np->crvalue != old_crvalue) { 1113 stop_nic_rxtx(ioaddr, np->crvalue); 1114 iowrite32(np->crvalue, ioaddr + TCRRCR); 1115 } 1116 } 1117 } 1118 1119 allocate_rx_buffers(dev); 1120 1121 spin_unlock_irqrestore(&np->lock, flags); 1122 1123 np->timer.expires = RUN_AT(10 * HZ); 1124 add_timer(&np->timer); 1125} 1126 1127 1128/* Take lock before calling */ 1129/* Reset chip and disable rx, tx and interrupts */ 1130static void reset_and_disable_rxtx(struct net_device *dev) 1131{ 1132 struct netdev_private *np = netdev_priv(dev); 1133 void __iomem *ioaddr = np->mem; 1134 int delay=51; 1135 1136 /* Reset the chip's Tx and Rx processes. */ 1137 stop_nic_rxtx(ioaddr, 0); 1138 1139 /* Disable interrupts by clearing the interrupt mask. */ 1140 iowrite32(0, ioaddr + IMR); 1141 1142 /* Reset the chip to erase previous misconfiguration. */ 1143 iowrite32(0x00000001, ioaddr + BCR); 1144 1145 /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw). 1146 We surely wait too long (address+data phase). Who cares? */ 1147 while (--delay) { 1148 ioread32(ioaddr + BCR); 1149 rmb(); 1150 } 1151} 1152 1153 1154/* Take lock before calling */ 1155/* Restore chip after reset */ 1156static void enable_rxtx(struct net_device *dev) 1157{ 1158 struct netdev_private *np = netdev_priv(dev); 1159 void __iomem *ioaddr = np->mem; 1160 1161 reset_rx_descriptors(dev); 1162 1163 iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring), 1164 ioaddr + TXLBA); 1165 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), 1166 ioaddr + RXLBA); 1167 1168 iowrite32(np->bcrvalue, ioaddr + BCR); 1169 1170 iowrite32(0, ioaddr + RXPDR); 1171 __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */ 1172 1173 /* Clear and Enable interrupts by setting the interrupt mask. */ 1174 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); 1175 iowrite32(np->imrvalue, ioaddr + IMR); 1176 1177 iowrite32(0, ioaddr + TXPDR); 1178} 1179 1180 1181static void reset_timer(unsigned long data) 1182{ 1183 struct net_device *dev = (struct net_device *) data; 1184 struct netdev_private *np = netdev_priv(dev); 1185 unsigned long flags; 1186 1187 printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name); 1188 1189 spin_lock_irqsave(&np->lock, flags); 1190 np->crvalue = np->crvalue_sv; 1191 np->imrvalue = np->imrvalue_sv; 1192 1193 reset_and_disable_rxtx(dev); 1194 /* works for me without this: 1195 reset_tx_descriptors(dev); */ 1196 enable_rxtx(dev); 1197 netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */ 1198 1199 np->reset_timer_armed = 0; 1200 1201 spin_unlock_irqrestore(&np->lock, flags); 1202} 1203 1204 1205static void fealnx_tx_timeout(struct net_device *dev) 1206{ 1207 struct netdev_private *np = netdev_priv(dev); 1208 void __iomem *ioaddr = np->mem; 1209 unsigned long flags; 1210 int i; 1211 1212 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x," 1213 " resetting...\n", dev->name, ioread32(ioaddr + ISR)); 1214 1215 { 1216 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); 1217 for (i = 0; i < RX_RING_SIZE; i++) 1218 printk(" %8.8x", (unsigned int) np->rx_ring[i].status); 1219 printk("\n" KERN_DEBUG " Tx ring %p: ", np->tx_ring); 1220 for (i = 0; i < TX_RING_SIZE; i++) 1221 printk(" %4.4x", np->tx_ring[i].status); 1222 printk("\n"); 1223 } 1224 1225 spin_lock_irqsave(&np->lock, flags); 1226 1227 reset_and_disable_rxtx(dev); 1228 reset_tx_descriptors(dev); 1229 enable_rxtx(dev); 1230 1231 spin_unlock_irqrestore(&np->lock, flags); 1232 1233 dev->trans_start = jiffies; 1234 np->stats.tx_errors++; 1235 netif_wake_queue(dev); /* or .._start_.. ?? */ 1236} 1237 1238 1239/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 1240static void init_ring(struct net_device *dev) 1241{ 1242 struct netdev_private *np = netdev_priv(dev); 1243 int i; 1244 1245 /* initialize rx variables */ 1246 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); 1247 np->cur_rx = &np->rx_ring[0]; 1248 np->lack_rxbuf = np->rx_ring; 1249 np->really_rx_count = 0; 1250 1251 /* initial rx descriptors. */ 1252 for (i = 0; i < RX_RING_SIZE; i++) { 1253 np->rx_ring[i].status = 0; 1254 np->rx_ring[i].control = np->rx_buf_sz << RBSShift; 1255 np->rx_ring[i].next_desc = np->rx_ring_dma + 1256 (i + 1)*sizeof(struct fealnx_desc); 1257 np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1]; 1258 np->rx_ring[i].skbuff = NULL; 1259 } 1260 1261 /* for the last rx descriptor */ 1262 np->rx_ring[i - 1].next_desc = np->rx_ring_dma; 1263 np->rx_ring[i - 1].next_desc_logical = np->rx_ring; 1264 1265 /* allocate skb for rx buffers */ 1266 for (i = 0; i < RX_RING_SIZE; i++) { 1267 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); 1268 1269 if (skb == NULL) { 1270 np->lack_rxbuf = &np->rx_ring[i]; 1271 break; 1272 } 1273 1274 ++np->really_rx_count; 1275 np->rx_ring[i].skbuff = skb; 1276 skb->dev = dev; /* Mark as being used by this device. */ 1277 np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data, 1278 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1279 np->rx_ring[i].status = RXOWN; 1280 np->rx_ring[i].control |= RXIC; 1281 } 1282 1283 /* initialize tx variables */ 1284 np->cur_tx = &np->tx_ring[0]; 1285 np->cur_tx_copy = &np->tx_ring[0]; 1286 np->really_tx_count = 0; 1287 np->free_tx_count = TX_RING_SIZE; 1288 1289 for (i = 0; i < TX_RING_SIZE; i++) { 1290 np->tx_ring[i].status = 0; 1291 /* do we need np->tx_ring[i].control = XXX; ?? */ 1292 np->tx_ring[i].next_desc = np->tx_ring_dma + 1293 (i + 1)*sizeof(struct fealnx_desc); 1294 np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1]; 1295 np->tx_ring[i].skbuff = NULL; 1296 } 1297 1298 /* for the last tx descriptor */ 1299 np->tx_ring[i - 1].next_desc = np->tx_ring_dma; 1300 np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0]; 1301} 1302 1303 1304static int start_tx(struct sk_buff *skb, struct net_device *dev) 1305{ 1306 struct netdev_private *np = netdev_priv(dev); 1307 unsigned long flags; 1308 1309 spin_lock_irqsave(&np->lock, flags); 1310 1311 np->cur_tx_copy->skbuff = skb; 1312 1313#define one_buffer 1314#define BPT 1022 1315#if defined(one_buffer) 1316 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1317 skb->len, PCI_DMA_TODEVICE); 1318 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; 1319 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1320 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ 1321// 89/12/29 add, 1322 if (np->pci_dev->device == 0x891) 1323 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1324 np->cur_tx_copy->status = TXOWN; 1325 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; 1326 --np->free_tx_count; 1327#elif defined(two_buffer) 1328 if (skb->len > BPT) { 1329 struct fealnx_desc *next; 1330 1331 /* for the first descriptor */ 1332 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1333 BPT, PCI_DMA_TODEVICE); 1334 np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable; 1335 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1336 np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */ 1337 1338 /* for the last descriptor */ 1339 next = np->cur_tx_copy->next_desc_logical; 1340 next->skbuff = skb; 1341 next->control = TXIC | TXLD | CRCEnable | PADEnable; 1342 next->control |= (skb->len << PKTSShift); /* pkt size */ 1343 next->control |= ((skb->len - BPT) << TBSShift); /* buf size */ 1344// 89/12/29 add, 1345 if (np->pci_dev->device == 0x891) 1346 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1347 next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT, 1348 skb->len - BPT, PCI_DMA_TODEVICE); 1349 1350 next->status = TXOWN; 1351 np->cur_tx_copy->status = TXOWN; 1352 1353 np->cur_tx_copy = next->next_desc_logical; 1354 np->free_tx_count -= 2; 1355 } else { 1356 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1357 skb->len, PCI_DMA_TODEVICE); 1358 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; 1359 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1360 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ 1361// 89/12/29 add, 1362 if (np->pci_dev->device == 0x891) 1363 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1364 np->cur_tx_copy->status = TXOWN; 1365 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; 1366 --np->free_tx_count; 1367 } 1368#endif 1369 1370 if (np->free_tx_count < 2) 1371 netif_stop_queue(dev); 1372 ++np->really_tx_count; 1373 iowrite32(0, np->mem + TXPDR); 1374 dev->trans_start = jiffies; 1375 1376 spin_unlock_irqrestore(&np->lock, flags); 1377 return 0; 1378} 1379 1380 1381/* Take lock before calling */ 1382/* Chip probably hosed tx ring. Clean up. */ 1383static void reset_tx_descriptors(struct net_device *dev) 1384{ 1385 struct netdev_private *np = netdev_priv(dev); 1386 struct fealnx_desc *cur; 1387 int i; 1388 1389 /* initialize tx variables */ 1390 np->cur_tx = &np->tx_ring[0]; 1391 np->cur_tx_copy = &np->tx_ring[0]; 1392 np->really_tx_count = 0; 1393 np->free_tx_count = TX_RING_SIZE; 1394 1395 for (i = 0; i < TX_RING_SIZE; i++) { 1396 cur = &np->tx_ring[i]; 1397 if (cur->skbuff) { 1398 pci_unmap_single(np->pci_dev, cur->buffer, 1399 cur->skbuff->len, PCI_DMA_TODEVICE); 1400 dev_kfree_skb_any(cur->skbuff); 1401 cur->skbuff = NULL; 1402 } 1403 cur->status = 0; 1404 cur->control = 0; /* needed? */ 1405 /* probably not needed. We do it for purely paranoid reasons */ 1406 cur->next_desc = np->tx_ring_dma + 1407 (i + 1)*sizeof(struct fealnx_desc); 1408 cur->next_desc_logical = &np->tx_ring[i + 1]; 1409 } 1410 /* for the last tx descriptor */ 1411 np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma; 1412 np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0]; 1413} 1414 1415 1416/* Take lock and stop rx before calling this */ 1417static void reset_rx_descriptors(struct net_device *dev) 1418{ 1419 struct netdev_private *np = netdev_priv(dev); 1420 struct fealnx_desc *cur = np->cur_rx; 1421 int i; 1422 1423 allocate_rx_buffers(dev); 1424 1425 for (i = 0; i < RX_RING_SIZE; i++) { 1426 if (cur->skbuff) 1427 cur->status = RXOWN; 1428 cur = cur->next_desc_logical; 1429 } 1430 1431 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), 1432 np->mem + RXLBA); 1433} 1434 1435 1436/* The interrupt handler does all of the Rx thread work and cleans up 1437 after the Tx thread. */ 1438static irqreturn_t intr_handler(int irq, void *dev_instance) 1439{ 1440 struct net_device *dev = (struct net_device *) dev_instance; 1441 struct netdev_private *np = netdev_priv(dev); 1442 void __iomem *ioaddr = np->mem; 1443 long boguscnt = max_interrupt_work; 1444 unsigned int num_tx = 0; 1445 int handled = 0; 1446 1447 spin_lock(&np->lock); 1448 1449 iowrite32(0, ioaddr + IMR); 1450 1451 do { 1452 u32 intr_status = ioread32(ioaddr + ISR); 1453 1454 /* Acknowledge all of the current interrupt sources ASAP. */ 1455 iowrite32(intr_status, ioaddr + ISR); 1456 1457 if (debug) 1458 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, 1459 intr_status); 1460 1461 if (!(intr_status & np->imrvalue)) 1462 break; 1463 1464 handled = 1; 1465 1466// 90/1/16 delete, 1467// 1468// if (intr_status & FBE) 1469// { /* fatal error */ 1470// stop_nic_tx(ioaddr, 0); 1471// stop_nic_rx(ioaddr, 0); 1472// break; 1473// }; 1474 1475 if (intr_status & TUNF) 1476 iowrite32(0, ioaddr + TXPDR); 1477 1478 if (intr_status & CNTOVF) { 1479 /* missed pkts */ 1480 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1481 1482 /* crc error */ 1483 np->stats.rx_crc_errors += 1484 (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1485 } 1486 1487 if (intr_status & (RI | RBU)) { 1488 if (intr_status & RI) 1489 netdev_rx(dev); 1490 else { 1491 stop_nic_rx(ioaddr, np->crvalue); 1492 reset_rx_descriptors(dev); 1493 iowrite32(np->crvalue, ioaddr + TCRRCR); 1494 } 1495 } 1496 1497 while (np->really_tx_count) { 1498 long tx_status = np->cur_tx->status; 1499 long tx_control = np->cur_tx->control; 1500 1501 if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */ 1502 struct fealnx_desc *next; 1503 1504 next = np->cur_tx->next_desc_logical; 1505 tx_status = next->status; 1506 tx_control = next->control; 1507 } 1508 1509 if (tx_status & TXOWN) 1510 break; 1511 1512 if (!(np->crvalue & CR_W_ENH)) { 1513 if (tx_status & (CSL | LC | EC | UDF | HF)) { 1514 np->stats.tx_errors++; 1515 if (tx_status & EC) 1516 np->stats.tx_aborted_errors++; 1517 if (tx_status & CSL) 1518 np->stats.tx_carrier_errors++; 1519 if (tx_status & LC) 1520 np->stats.tx_window_errors++; 1521 if (tx_status & UDF) 1522 np->stats.tx_fifo_errors++; 1523 if ((tx_status & HF) && np->mii.full_duplex == 0) 1524 np->stats.tx_heartbeat_errors++; 1525 1526 } else { 1527 np->stats.tx_bytes += 1528 ((tx_control & PKTSMask) >> PKTSShift); 1529 1530 np->stats.collisions += 1531 ((tx_status & NCRMask) >> NCRShift); 1532 np->stats.tx_packets++; 1533 } 1534 } else { 1535 np->stats.tx_bytes += 1536 ((tx_control & PKTSMask) >> PKTSShift); 1537 np->stats.tx_packets++; 1538 } 1539 1540 /* Free the original skb. */ 1541 pci_unmap_single(np->pci_dev, np->cur_tx->buffer, 1542 np->cur_tx->skbuff->len, PCI_DMA_TODEVICE); 1543 dev_kfree_skb_irq(np->cur_tx->skbuff); 1544 np->cur_tx->skbuff = NULL; 1545 --np->really_tx_count; 1546 if (np->cur_tx->control & TXLD) { 1547 np->cur_tx = np->cur_tx->next_desc_logical; 1548 ++np->free_tx_count; 1549 } else { 1550 np->cur_tx = np->cur_tx->next_desc_logical; 1551 np->cur_tx = np->cur_tx->next_desc_logical; 1552 np->free_tx_count += 2; 1553 } 1554 num_tx++; 1555 } /* end of for loop */ 1556 1557 if (num_tx && np->free_tx_count >= 2) 1558 netif_wake_queue(dev); 1559 1560 /* read transmit status for enhanced mode only */ 1561 if (np->crvalue & CR_W_ENH) { 1562 long data; 1563 1564 data = ioread32(ioaddr + TSR); 1565 np->stats.tx_errors += (data & 0xff000000) >> 24; 1566 np->stats.tx_aborted_errors += (data & 0xff000000) >> 24; 1567 np->stats.tx_window_errors += (data & 0x00ff0000) >> 16; 1568 np->stats.collisions += (data & 0x0000ffff); 1569 } 1570 1571 if (--boguscnt < 0) { 1572 printk(KERN_WARNING "%s: Too much work at interrupt, " 1573 "status=0x%4.4x.\n", dev->name, intr_status); 1574 if (!np->reset_timer_armed) { 1575 np->reset_timer_armed = 1; 1576 np->reset_timer.expires = RUN_AT(HZ/2); 1577 add_timer(&np->reset_timer); 1578 stop_nic_rxtx(ioaddr, 0); 1579 netif_stop_queue(dev); 1580 /* or netif_tx_disable(dev); ?? */ 1581 /* Prevent other paths from enabling tx,rx,intrs */ 1582 np->crvalue_sv = np->crvalue; 1583 np->imrvalue_sv = np->imrvalue; 1584 np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */ 1585 np->imrvalue = 0; 1586 } 1587 1588 break; 1589 } 1590 } while (1); 1591 1592 /* read the tally counters */ 1593 /* missed pkts */ 1594 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1595 1596 /* crc error */ 1597 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1598 1599 if (debug) 1600 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", 1601 dev->name, ioread32(ioaddr + ISR)); 1602 1603 iowrite32(np->imrvalue, ioaddr + IMR); 1604 1605 spin_unlock(&np->lock); 1606 1607 return IRQ_RETVAL(handled); 1608} 1609 1610 1611/* This routine is logically part of the interrupt handler, but separated 1612 for clarity and better register allocation. */ 1613static int netdev_rx(struct net_device *dev) 1614{ 1615 struct netdev_private *np = netdev_priv(dev); 1616 void __iomem *ioaddr = np->mem; 1617 1618 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1619 while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) { 1620 s32 rx_status = np->cur_rx->status; 1621 1622 if (np->really_rx_count == 0) 1623 break; 1624 1625 if (debug) 1626 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status); 1627 1628 if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) 1629 || (rx_status & ErrorSummary)) { 1630 if (rx_status & ErrorSummary) { /* there was a fatal error */ 1631 if (debug) 1632 printk(KERN_DEBUG 1633 "%s: Receive error, Rx status %8.8x.\n", 1634 dev->name, rx_status); 1635 1636 np->stats.rx_errors++; /* end of a packet. */ 1637 if (rx_status & (LONG | RUNT)) 1638 np->stats.rx_length_errors++; 1639 if (rx_status & RXER) 1640 np->stats.rx_frame_errors++; 1641 if (rx_status & CRC) 1642 np->stats.rx_crc_errors++; 1643 } else { 1644 int need_to_reset = 0; 1645 int desno = 0; 1646 1647 if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */ 1648 struct fealnx_desc *cur; 1649 1650 /* check this packet is received completely? */ 1651 cur = np->cur_rx; 1652 while (desno <= np->really_rx_count) { 1653 ++desno; 1654 if ((!(cur->status & RXOWN)) 1655 && (cur->status & RXLSD)) 1656 break; 1657 /* goto next rx descriptor */ 1658 cur = cur->next_desc_logical; 1659 } 1660 if (desno > np->really_rx_count) 1661 need_to_reset = 1; 1662 } else /* RXLSD did not find, something error */ 1663 need_to_reset = 1; 1664 1665 if (need_to_reset == 0) { 1666 int i; 1667 1668 np->stats.rx_length_errors++; 1669 1670 /* free all rx descriptors related this long pkt */ 1671 for (i = 0; i < desno; ++i) { 1672 if (!np->cur_rx->skbuff) { 1673 printk(KERN_DEBUG 1674 "%s: I'm scared\n", dev->name); 1675 break; 1676 } 1677 np->cur_rx->status = RXOWN; 1678 np->cur_rx = np->cur_rx->next_desc_logical; 1679 } 1680 continue; 1681 } else { /* rx error, need to reset this chip */ 1682 stop_nic_rx(ioaddr, np->crvalue); 1683 reset_rx_descriptors(dev); 1684 iowrite32(np->crvalue, ioaddr + TCRRCR); 1685 } 1686 break; /* exit the while loop */ 1687 } 1688 } else { /* this received pkt is ok */ 1689 1690 struct sk_buff *skb; 1691 /* Omit the four octet CRC from the length. */ 1692 short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4; 1693 1694#ifndef final_version 1695 if (debug) 1696 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" 1697 " status %x.\n", pkt_len, rx_status); 1698#endif 1699 1700 /* Check if the packet is long enough to accept without copying 1701 to a minimally-sized skbuff. */ 1702 if (pkt_len < rx_copybreak && 1703 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1704 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1705 pci_dma_sync_single_for_cpu(np->pci_dev, 1706 np->cur_rx->buffer, 1707 np->rx_buf_sz, 1708 PCI_DMA_FROMDEVICE); 1709 /* Call copy + cksum if available. */ 1710 1711#if ! defined(__alpha__) 1712 skb_copy_to_linear_data(skb, 1713 np->cur_rx->skbuff->data, pkt_len); 1714 skb_put(skb, pkt_len); 1715#else 1716 memcpy(skb_put(skb, pkt_len), 1717 np->cur_rx->skbuff->data, pkt_len); 1718#endif 1719 pci_dma_sync_single_for_device(np->pci_dev, 1720 np->cur_rx->buffer, 1721 np->rx_buf_sz, 1722 PCI_DMA_FROMDEVICE); 1723 } else { 1724 pci_unmap_single(np->pci_dev, 1725 np->cur_rx->buffer, 1726 np->rx_buf_sz, 1727 PCI_DMA_FROMDEVICE); 1728 skb_put(skb = np->cur_rx->skbuff, pkt_len); 1729 np->cur_rx->skbuff = NULL; 1730 --np->really_rx_count; 1731 } 1732 skb->protocol = eth_type_trans(skb, dev); 1733 netif_rx(skb); 1734 np->stats.rx_packets++; 1735 np->stats.rx_bytes += pkt_len; 1736 } 1737 1738 np->cur_rx = np->cur_rx->next_desc_logical; 1739 } /* end of while loop */ 1740 1741 /* allocate skb for rx buffers */ 1742 allocate_rx_buffers(dev); 1743 1744 return 0; 1745} 1746 1747 1748static struct net_device_stats *get_stats(struct net_device *dev) 1749{ 1750 struct netdev_private *np = netdev_priv(dev); 1751 void __iomem *ioaddr = np->mem; 1752 1753 /* The chip only need report frame silently dropped. */ 1754 if (netif_running(dev)) { 1755 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1756 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1757 } 1758 1759 return &np->stats; 1760} 1761 1762 1763/* for dev->set_multicast_list */ 1764static void set_rx_mode(struct net_device *dev) 1765{ 1766 spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock; 1767 unsigned long flags; 1768 spin_lock_irqsave(lp, flags); 1769 __set_rx_mode(dev); 1770 spin_unlock_irqrestore(lp, flags); 1771} 1772 1773 1774/* Take lock before calling */ 1775static void __set_rx_mode(struct net_device *dev) 1776{ 1777 struct netdev_private *np = netdev_priv(dev); 1778 void __iomem *ioaddr = np->mem; 1779 u32 mc_filter[2]; /* Multicast hash filter */ 1780 u32 rx_mode; 1781 1782 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1783 memset(mc_filter, 0xff, sizeof(mc_filter)); 1784 rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM; 1785 } else if ((dev->mc_count > multicast_filter_limit) 1786 || (dev->flags & IFF_ALLMULTI)) { 1787 /* Too many to match, or accept all multicasts. */ 1788 memset(mc_filter, 0xff, sizeof(mc_filter)); 1789 rx_mode = CR_W_AB | CR_W_AM; 1790 } else { 1791 struct dev_mc_list *mclist; 1792 int i; 1793 1794 memset(mc_filter, 0, sizeof(mc_filter)); 1795 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1796 i++, mclist = mclist->next) { 1797 unsigned int bit; 1798 bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; 1799 mc_filter[bit >> 5] |= (1 << bit); 1800 } 1801 rx_mode = CR_W_AB | CR_W_AM; 1802 } 1803 1804 stop_nic_rxtx(ioaddr, np->crvalue); 1805 1806 iowrite32(mc_filter[0], ioaddr + MAR0); 1807 iowrite32(mc_filter[1], ioaddr + MAR1); 1808 np->crvalue &= ~CR_W_RXMODEMASK; 1809 np->crvalue |= rx_mode; 1810 iowrite32(np->crvalue, ioaddr + TCRRCR); 1811} 1812 1813static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1814{ 1815 struct netdev_private *np = netdev_priv(dev); 1816 1817 strcpy(info->driver, DRV_NAME); 1818 strcpy(info->version, DRV_VERSION); 1819 strcpy(info->bus_info, pci_name(np->pci_dev)); 1820} 1821 1822static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1823{ 1824 struct netdev_private *np = netdev_priv(dev); 1825 int rc; 1826 1827 spin_lock_irq(&np->lock); 1828 rc = mii_ethtool_gset(&np->mii, cmd); 1829 spin_unlock_irq(&np->lock); 1830 1831 return rc; 1832} 1833 1834static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1835{ 1836 struct netdev_private *np = netdev_priv(dev); 1837 int rc; 1838 1839 spin_lock_irq(&np->lock); 1840 rc = mii_ethtool_sset(&np->mii, cmd); 1841 spin_unlock_irq(&np->lock); 1842 1843 return rc; 1844} 1845 1846static int netdev_nway_reset(struct net_device *dev) 1847{ 1848 struct netdev_private *np = netdev_priv(dev); 1849 return mii_nway_restart(&np->mii); 1850} 1851 1852static u32 netdev_get_link(struct net_device *dev) 1853{ 1854 struct netdev_private *np = netdev_priv(dev); 1855 return mii_link_ok(&np->mii); 1856} 1857 1858static u32 netdev_get_msglevel(struct net_device *dev) 1859{ 1860 return debug; 1861} 1862 1863static void netdev_set_msglevel(struct net_device *dev, u32 value) 1864{ 1865 debug = value; 1866} 1867 1868static const struct ethtool_ops netdev_ethtool_ops = { 1869 .get_drvinfo = netdev_get_drvinfo, 1870 .get_settings = netdev_get_settings, 1871 .set_settings = netdev_set_settings, 1872 .nway_reset = netdev_nway_reset, 1873 .get_link = netdev_get_link, 1874 .get_msglevel = netdev_get_msglevel, 1875 .set_msglevel = netdev_set_msglevel, 1876}; 1877 1878static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1879{ 1880 struct netdev_private *np = netdev_priv(dev); 1881 int rc; 1882 1883 if (!netif_running(dev)) 1884 return -EINVAL; 1885 1886 spin_lock_irq(&np->lock); 1887 rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL); 1888 spin_unlock_irq(&np->lock); 1889 1890 return rc; 1891} 1892 1893 1894static int netdev_close(struct net_device *dev) 1895{ 1896 struct netdev_private *np = netdev_priv(dev); 1897 void __iomem *ioaddr = np->mem; 1898 int i; 1899 1900 netif_stop_queue(dev); 1901 1902 /* Disable interrupts by clearing the interrupt mask. */ 1903 iowrite32(0x0000, ioaddr + IMR); 1904 1905 /* Stop the chip's Tx and Rx processes. */ 1906 stop_nic_rxtx(ioaddr, 0); 1907 1908 del_timer_sync(&np->timer); 1909 del_timer_sync(&np->reset_timer); 1910 1911 free_irq(dev->irq, dev); 1912 1913 /* Free all the skbuffs in the Rx queue. */ 1914 for (i = 0; i < RX_RING_SIZE; i++) { 1915 struct sk_buff *skb = np->rx_ring[i].skbuff; 1916 1917 np->rx_ring[i].status = 0; 1918 if (skb) { 1919 pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer, 1920 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1921 dev_kfree_skb(skb); 1922 np->rx_ring[i].skbuff = NULL; 1923 } 1924 } 1925 1926 for (i = 0; i < TX_RING_SIZE; i++) { 1927 struct sk_buff *skb = np->tx_ring[i].skbuff; 1928 1929 if (skb) { 1930 pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer, 1931 skb->len, PCI_DMA_TODEVICE); 1932 dev_kfree_skb(skb); 1933 np->tx_ring[i].skbuff = NULL; 1934 } 1935 } 1936 1937 return 0; 1938} 1939 1940static struct pci_device_id fealnx_pci_tbl[] = { 1941 {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1942 {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 1943 {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 1944 {} /* terminate list */ 1945}; 1946MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl); 1947 1948 1949static struct pci_driver fealnx_driver = { 1950 .name = "fealnx", 1951 .id_table = fealnx_pci_tbl, 1952 .probe = fealnx_init_one, 1953 .remove = __devexit_p(fealnx_remove_one), 1954}; 1955 1956static int __init fealnx_init(void) 1957{ 1958/* when a module, this is printed whether or not devices are found in probe */ 1959#ifdef MODULE 1960 printk(version); 1961#endif 1962 1963 return pci_register_driver(&fealnx_driver); 1964} 1965 1966static void __exit fealnx_exit(void) 1967{ 1968 pci_unregister_driver(&fealnx_driver); 1969} 1970 1971module_init(fealnx_init); 1972module_exit(fealnx_exit);